Commit
·
a0907c0
1
Parent(s):
a0bf2f2
Update parquet files (step 19 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/Eric-Helms-The-Muscle-And-Strength-Pyramid-Nutrition-V101pdf-CRACKED.md +0 -114
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Call Of Duty Black Ops English Language Pack Download and Install Guide.md +0 -116
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Encase 6 Download The Ultimate Guide to the Best Digital Forensics Software.md +0 -40
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Azlibnet A guide to the electronic services of Azerbaijani libraries.md +0 -137
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Chicken Gun Mod Menu 2.8.06 How to Hack Chicken Gun with Ease and Fun.md +0 -224
- spaces/1phancelerku/anime-remove-background/Download Bible Word Puzzle APK and Play Offline Word Games with Friends.md +0 -84
- spaces/1phancelerku/anime-remove-background/Download Music Playlist with One Click - No Ads No Fees.md +0 -133
- spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_lms_discrete.py +0 -257
- spaces/4f20/text_generator/README.md +0 -12
- spaces/AIGC-Audio/Make_An_Audio/wav_evaluation/models/utils.py +0 -26
- spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/encoders/open_clap/__init__.py +0 -8
- spaces/AIZero2Hero4Health/9-Seq2SeqQAGenerator-GR/qasrl_model_pipeline.py +0 -183
- spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/models/encodec.py +0 -302
- spaces/AchyuthGamer/OpenGPT/g4f/typing.py +0 -22
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/buttons/Buttons.js +0 -88
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/folder/methods/ChildTransition.js +0 -24
- spaces/Aluxes/anime-remove-background/README.md +0 -14
- spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/pti_models/e4e/stylegan2/op/__init__.py +0 -2
- spaces/Amrrs/textsummarizer/app.py +0 -16
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/vq_diffusion.md +0 -35
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py +0 -622
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_sde_vp.py +0 -90
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/karras_ve/test_karras_ve.py +0 -86
- spaces/Andy1621/uniformer_image_detection/mmdet/datasets/__init__.py +0 -24
- spaces/Andy1621/uniformer_image_detection/mmdet/datasets/wider_face.py +0 -51
- spaces/Andy1621/uniformer_image_segmentation/configs/dmnet/dmnet_r101-d8_512x1024_40k_cityscapes.py +0 -2
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/logits.py +0 -56
- spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/CLIP/clip/simple_tokenizer.py +0 -132
- spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/evaluations/fid_score.py +0 -246
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/resolution/legacy/__init__.py +0 -0
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/colorama/tests/winterm_test.py +0 -131
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyproject_hooks/_impl.py +0 -330
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/command/bdist.py +0 -157
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/__init__.py +0 -12
- spaces/BLACKHOST/Date/date.py +0 -9
- spaces/Bart92/RVC_HF/julius/__init__.py +0 -41
- spaces/Benson/text-generation/Examples/Cinco Noches En Freddy 39s 6 Descarga.md +0 -103
- spaces/CVPR/Bamboo_ViT-B16_demo/README.md +0 -13
- spaces/CVPR/GFPGAN-example/setup.py +0 -107
- spaces/CVPR/LIVE/thrust/thrust/detail/allocator_aware_execution_policy.h +0 -101
- spaces/CVPR/LIVE/thrust/thrust/detail/trivial_sequence.h +0 -95
- spaces/CikeyQI/Yunzai/Yunzai/lib/plugins/plugin.js +0 -119
- spaces/CleanML/demo/README.md +0 -11
- spaces/ConvLab/README/README.md +0 -23
- spaces/Cropinky/hana_hanak_houses/README.md +0 -12
- spaces/DAMO-NLP-SG/CLEX-Chat/style.css +0 -16
- spaces/DHEIVER/ThyroidTumorClassificationModel/README.md +0 -12
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/web_protocol.py +0 -679
- spaces/Dachus/Realfee/Dockerfile +0 -15
- spaces/Dauzy/whisper-webui/src/utils.py +0 -245
spaces/1acneusushi/gradio-2dmoleculeeditor/Eric-Helms-The-Muscle-And-Strength-Pyramid-Nutrition-V101pdf-CRACKED.md
DELETED
@@ -1,114 +0,0 @@
|
|
1 |
-
## Eric Helms The Muscle And Strength Pyramid Nutrition V101pdf
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
![Eric Helms The Muscle And Strength Pyramid Nutrition V101pdf \[CRACKED\]](https://zarrinholeh.com/wp-content/uploads/2018/10/price-towel-01.jpg)
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
**Download === [https://www.google.com/url?q=https%3A%2F%2Ftlniurl.com%2F2txKNe&sa=D&sntz=1&usg=AOvVaw00kewj9WV-3GzZaeyjBmp-](https://www.google.com/url?q=https%3A%2F%2Ftlniurl.com%2F2txKNe&sa=D&sntz=1&usg=AOvVaw00kewj9WV-3GzZaeyjBmp-)**
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
# How to Optimize Your Nutrition for Muscle and Strength
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
If you are looking for a comprehensive guide on how to set up your nutrition for optimal muscle and strength gains, you might want to check out **The Muscle and Strength Pyramid: Nutrition** by Eric Helms, Andy Morgan and Andrea Valdez. This book is based on the concept of understanding priorities and context, so you can take all the pieces of the puzzle and fit them together into an actionable plan.
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
In this book, you will learn:
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
- What are the most important factors for nutrition success and how to rank them in order of importance.
|
40 |
-
|
41 |
-
- How to calculate your calorie, protein, carbohydrate and fat needs based on your goals, body type and activity level.
|
42 |
-
|
43 |
-
- How to adjust your nutrition for different scenarios, such as bulking, cutting, maintenance, bodybuilding, powerlifting or weight class sports.
|
44 |
-
|
45 |
-
- How to balance adherence, consistency and flexibility so you can live your life while progressing toward your goals.
|
46 |
-
|
47 |
-
- How to apply evidence-based principles and avoid common myths and misconceptions about nutrition.
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
The book is written by experts who have both academic and practical experience in the field of nutrition and fitness. Eric Helms is a researcher, coach and natural bodybuilder who has helped hundreds of clients achieve their goals. Andy Morgan is a writer and consultant who specializes in body composition change and has a unique ability to communicate complex topics in a simple way. Andrea Valdez is a lifelong athlete with a Masters in Exercise Physiology and extensive coaching experience.
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
The book is available in paperback and PDF formats. You can find more information about the book and how to order it on the following websites:
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
1. [Google Books](https://books.google.com/books/about/The_Muscle_and_Strength_Pyramid_Nutritio.html?id=XMawwwEACAAJ)
|
60 |
-
|
61 |
-
2. [Amazon](https://www.amazon.com/Muscle-Strength-Pyramid-Nutrition/dp/1090912188)
|
62 |
-
|
63 |
-
3. [Archive](https://archive.org/details/0erichelmsthemuscleandstrengthtrainingpyramidv2.0nutrion02)
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
If you are serious about improving your nutrition for muscle and strength, this book is a must-read. It will provide you with the knowledge, tools and strategies you need to succeed.
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
But nutrition is only one part of the equation. If you want to optimize your muscle and strength gains, you also need to train properly. That's why Eric Helms and his co-authors have also written **The Muscle and Strength Pyramid: Training**, a companion book that covers everything you need to know about designing and executing effective training programs.
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
In this book, you will learn:
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
- What are the main principles of training for muscle and strength and how to apply them to your own goals.
|
80 |
-
|
81 |
-
- How to manipulate volume, intensity, frequency, progression, specificity and variation to optimize your training stimulus.
|
82 |
-
|
83 |
-
- How to choose the best exercises, rep ranges, rest periods, tempo and technique for your needs.
|
84 |
-
|
85 |
-
- How to manage fatigue, recovery, stress and adaptation to avoid overtraining and injury.
|
86 |
-
|
87 |
-
- How to periodize your training for long-term progress and peak performance.
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
The book is also based on the latest scientific evidence and practical experience of the authors. Eric Helms is not only a researcher and coach, but also a competitive natural bodybuilder and powerlifter who has achieved elite status in both sports. Andy Morgan and Andrea Valdez are also experienced coaches and athletes who have helped hundreds of clients reach their potential.
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
The book is available in paperback and PDF formats. You can find more information about the book and how to order it on the following websites:
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
1. [Goodreads](https://www.goodreads.com/book/show/44773627-the-muscle-and-strength-pyramid)
|
100 |
-
|
101 |
-
2. [The Muscle and Strength Pyramids](https://muscleandstrengthpyramids.com/)
|
102 |
-
|
103 |
-
3. [Archive](https://archive.org/details/0erichelmsthemuscleandstrengthtrainingpyramidv2.0nutrion02)
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
If you are serious about improving your training for muscle and strength, this book is a must-read. It will provide you with the knowledge, tools and strategies you need to succeed.
|
108 |
-
|
109 |
-
dfd1c89656
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Call Of Duty Black Ops English Language Pack Download and Install Guide.md
DELETED
@@ -1,116 +0,0 @@
|
|
1 |
-
|
2 |
-
<br>- Overview: What you need to change the language and where to find it<br>- Step-by-step guide: How to download and install the English language pack and change the game settings<br>- Conclusion: Summary of the main points and benefits of changing the language | | H2: Introduction | - What is Call of Duty Black Ops and why you might want to change the language | | H3: What is Call of Duty Black Ops? | - A brief description of the game, its genre, setting, features and popularity | | H3: Why you might want to change the language | - Some possible reasons why you might not be satisfied with the default language of the game, such as preference, accessibility, compatibility or availability | | H2: Overview | - What you need to change the language and where to find it | | H3: What you need to change the language | - A list of the files and tools you need to change the language, such as localization files, sound files and WinRAR | | H3: Where to find the English language pack | - A brief explanation of where you can download the English language pack for free, such as YouTube videos or Google Drive links | | H2: Step-by-step guide | - How to download and install the English language pack and change the game settings | | H3: How to download the English language pack | - A detailed instruction on how to download the English language pack from one of the sources, such as YouTube video or Google Drive link | | H3: How to install the English language pack | - A detailed instruction on how to extract and copy the English language pack files into the game folder using WinRAR | | H3: How to change the game settings | - A detailed instruction on how to edit the localization files and select the English language in the game options | | H2: Conclusion | - Summary of the main points and benefits of changing the language | | H3: Summary of the main points | - A recap of what Call of Duty Black Ops is, what you need to change the language and how to do it | | H3: Benefits of changing the language | - A list of some advantages of playing Call of Duty Black Ops in English, such as better understanding, immersion, compatibility or availability | **Table 2: Article with HTML formatting** ```html <h1>Call of Duty Black Ops English Language Pack: How to Change the Game Language from Any Language to English</h1>
|
3 |
-
<p>Call of Duty Black Ops is one of the most popular first-person shooter games ever made. It takes you on a thrilling adventure across different locations and time periods during the Cold War. However, if you are not happy with the default language of the game, you might be wondering how to change it to English. In this article, we will show you what you need to change the language and where to find it. We will also provide you with a step-by-step guide on how to download and install the English language pack and change the game settings. By following these simple steps, you will be able to enjoy Call of Duty Black Ops in English in no time.</p>
|
4 |
-
<h2>Call Of Duty Black Ops English Language Pack</h2><br /><p><b><b>Download</b> ✒ ✒ ✒ <a href="https://byltly.com/2uKyMK">https://byltly.com/2uKyMK</a></b></p><br /><br />
|
5 |
-
<h2>Introduction</h2>
|
6 |
-
<h3>What is Call of Duty Black Ops?</h3>
|
7 |
-
<p>Call of Duty Black Ops is a first-person shooter game developed by Treyarch and published by Activision in 2010. It is the seventh installment in the Call of Duty series and a sequel to Call of Duty World at War. The game follows the missions of a covert team of special forces operatives known as SOG (Studies and Observations Group) during various conflicts in Vietnam, Cuba, Laos and Russia. The game features a single-player campaign mode, a multiplayer mode with various modes and maps, and a zombie mode with four maps. The game received critical acclaim for its story, gameplay, graphics and sound design. It also became one of the best-selling games of all time, selling more than 30 million copies worldwide.</p>
|
8 |
-
<h3>Why you might want to change the language</h3>
|
9 |
-
<p>Depending on where you bought or downloaded Call of Duty Black Ops, you might have a different default language for your game. For example, if you bought or downloaded it from Russia or Poland, you might have Russian or Polish as your default language. However, you might not be satisfied with this language for various reasons. For instance:</p>
|
10 |
-
<ul>
|
11 |
-
<li>You might prefer playing games in English because it is your native language or because you are more comfortable with it.</li>
|
12 |
-
<li>You might have trouble understanding or reading some words or phrases in another language because they are too fast or too small.</li>
|
13 |
-
<li>You might experience some compatibility issues with some mods or patches that are only available in English.</li>
|
14 |
-
<li>You might want to access some content or features that are only available in English.</li>
|
15 |
-
</ul>
|
16 |
-
<p>Whatever your reason is, changing your game language from any language to English can improve your gaming experience significantly.</p>
|
17 |
-
<h2>Overview</h2>
|
18 |
-
<h3>What you need to change the language</h3>
|
19 |
-
<p>To change your game language from any language to English, you will need two things:</p>
|
20 |
-
<ol>
|
21 |
-
<li>The English language pack files for Call of Duty Black Ops. These are files that contain all the text and audio data for the English version of the game.</li>
|
22 |
-
<li>A tool that can extract and copy files from compressed archives. We recommend using WinRAR because it is free and easy to use.</li>
|
23 |
-
</ol>
|
24 |
-
<h3>Where to find the English language pack</h3>
|
25 |
-
<p>The good news is that you can find and download the English language pack for Call of Duty Black Ops for free online. There are several sources that offer this service, but we will focus on two of them:</p>
|
26 |
-
<ul>
|
27 |
-
<li>YouTube videos that provide links to download sites or Google Drive folders that contain the English language pack files. For example, this video by Rishi's Tech & Tutorials shows how to change your game language from any language to English using a Google Drive link that contains all the necessary files.</li>
|
28 |
-
<li>Reddit posts that provide links to download sites or Google Drive folders that contain the English language pack files. For example, this post by u/ChaosZeroX shows how to change your game language from Russian (or any other) to English using a Google Drive link that contains all the necessary files.</li>
|
29 |
-
</ul>
|
30 |
-
<p>You can choose any source that works for you, but make sure that it is reliable and safe before downloading anything.</p>
|
31 |
-
<h2>Step-by-step guide</h2>
|
32 |
-
<h3>How to download the English language pack</h3>
|
33 |
-
<p>In this guide, we will use the YouTube video by Rishi's Tech & Tutorials as an example, but you can follow the same steps for any other source that provides the same files. To download the English language pack, you need to do the following:</p>
|
34 |
-
<ol><li>Open the YouTube video in your browser.</li><li>Go to the description section below the video and click on the link that says "Call Of Duty English Language Pack". This will take you to a blog post by Kurivaim1.</li><li>In the blog post, scroll down until you see a button that says "Download". Click on it. This will take you to another page with a countdown timer.</li><li>Wait for the countdown timer to finish and then click on "Skip Ad". This will take you to a Google Drive folder that contains the English language pack files.</li><li>In the Google Drive folder, select all the files by clicking on one file and then pressing Ctrl+A on your keyboard.</li><li>Right-click on any file and select "Download". This will start downloading a ZIP file named "Call Of Duty-English Language Pack.zip" into your computer.</li></ol>
|
35 |
-
<h3>How to install the English language pack</h3>
|
36 |
-
<p>To install the English language pack, you need to do the following:</p>
|
37 |
-
<p>How to install Call Of Duty Black Ops English Language Pack<br />
|
38 |
-
Call Of Duty Black Ops English Language Pack download link<br />
|
39 |
-
Call Of Duty Black Ops English Language Pack free download<br />
|
40 |
-
Call Of Duty Black Ops English Language Pack torrent<br />
|
41 |
-
Call Of Duty Black Ops English Language Pack crack<br />
|
42 |
-
Call Of Duty Black Ops English Language Pack steam<br />
|
43 |
-
Call Of Duty Black Ops English Language Pack skidrow<br />
|
44 |
-
Call Of Duty Black Ops English Language Pack error fix<br />
|
45 |
-
Call Of Duty Black Ops English Language Pack gameplay<br />
|
46 |
-
Call Of Duty Black Ops English Language Pack review<br />
|
47 |
-
Call Of Duty Black Ops English Language Pack trailer<br />
|
48 |
-
Call Of Duty Black Ops English Language Pack system requirements<br />
|
49 |
-
Call Of Duty Black Ops English Language Pack mods<br />
|
50 |
-
Call Of Duty Black Ops English Language Pack cheats<br />
|
51 |
-
Call Of Duty Black Ops English Language Pack zombies<br />
|
52 |
-
Call Of Duty Black Ops English Language Pack multiplayer<br />
|
53 |
-
Call Of Duty Black Ops English Language Pack online<br />
|
54 |
-
Call Of Duty Black Ops English Language Pack patch<br />
|
55 |
-
Call Of Duty Black Ops English Language Pack update<br />
|
56 |
-
Call Of Duty Black Ops English Language Pack DLC<br />
|
57 |
-
Call Of Duty Black Ops English Language Pack PS4<br />
|
58 |
-
Call Of Duty Black Ops English Language Pack Xbox One<br />
|
59 |
-
Call Of Duty Black Ops English Language Pack PC<br />
|
60 |
-
Call Of Duty Black Ops English Language Pack Mac<br />
|
61 |
-
Call Of Duty Black Ops English Language Pack Linux<br />
|
62 |
-
Call Of Duty Black Ops English Language Pack switch<br />
|
63 |
-
Call Of Duty Black Ops English Language Pack android<br />
|
64 |
-
Call Of Duty Black Ops English Language Pack iOS<br />
|
65 |
-
Call Of Duty Black Ops English Language Pack VR<br />
|
66 |
-
Call Of Duty Black Ops English Language Pack 4K<br />
|
67 |
-
Call Of Duty Black Ops English Language Pack remastered<br />
|
68 |
-
Call Of Duty Black Ops English Language Pack comparison<br />
|
69 |
-
Call Of Duty Black Ops English Language Pack tips and tricks<br />
|
70 |
-
Call Of Duty Black Ops English Language Pack guide and walkthrough<br />
|
71 |
-
Call Of Duty Black Ops English Language Pack best weapons and perks<br />
|
72 |
-
Call Of Duty Black Ops English Language Pack easter eggs and secrets<br />
|
73 |
-
Call Of Duty Black Ops English Language Pack soundtrack and music<br />
|
74 |
-
Call Of Duty Black Ops English Language Pack voice actors and characters<br />
|
75 |
-
Call Of Duty Black Ops English Language Pack story and lore<br />
|
76 |
-
Call Of Duty Black Ops English Language Pack fan art and memes<br />
|
77 |
-
Call Of Duty Black Ops English Language Pack merchandise and collectibles<br />
|
78 |
-
Call Of Duty Black Ops English Language Pack news and rumors<br />
|
79 |
-
Call Of Duty Black Ops English Language Pack release date and price<br />
|
80 |
-
Call Of Duty Black Ops English Language Pack pre-order and bonus content<br />
|
81 |
-
Call Of Duty Black Ops English Language Pack beta and demo access <br />
|
82 |
-
Call Of Duty Black Ops English Language Pack forums and communities <br />
|
83 |
-
Call Of Duty Black Ops English Language Pack ratings and awards <br />
|
84 |
-
Call Of Duty Black Ops English Language Pack developer and publisher</p>
|
85 |
-
<ol><li>Locate the ZIP file named "Call Of Duty-English Language Pack.zip" in your computer's Downloads folder (or wherever you saved it).</li><li>Right-click on it and select "Extract Here" if you have WinRAR installed. This will create a new folder named "Call Of Duty-English Language Pack" with all the extracted files inside.</li><li>Open the folder named "Call Of Duty-English Language Pack" and find the folder named "Sounds".</li><li>Open another window of File Explorer and navigate to your Call of Duty Black Ops game folder. The location of this folder may vary depending on where you installed the game, but you can find it by following these steps:</li>
|
86 |
-
<ul>
|
87 |
-
<li>Open the Battle.net client and select Call of Duty Black Ops from the left panel.</li>
|
88 |
-
<li>Click on the gear icon next to the play button and select Show in Explorer. This will open your game folder in File Explorer.</li>
|
89 |
-
</ul>
|
90 |
-
<li>Copy the folder named "Sounds" from the "Call Of Duty-English Language Pack" folder and paste it into your game folder. If prompted, choose to replace the existing files.</li>
|
91 |
-
<li>Go back to the "Call Of Duty-English Language Pack" folder and find the folder named "Zone". Inside this folder, you will see another folder named "English".</li>
|
92 |
-
<li>Copy the folder named "English" from the "Zone" folder and paste it into your game folder. If prompted, choose to replace the existing files.</li>
|
93 |
-
</ol>
|
94 |
-
<h3>How to change the game settings</h3>
|
95 |
-
<p>To change the game settings, you need to do the following:</p>
|
96 |
-
<ol><li>In your game folder, find and open the file named "localization.txt" with a text editor such as Notepad.</li><li>Change the value of the line that says "SET LANG \"xx\"" to "SET LANG \"en\"". For example, if your default language was Russian, you would change it from "SET LANG \"ru\"" to "SET LANG \"en\"". Save and close the file.</li><li>Repeat the same process for the files named "localization_mp.txt" and "localization_zm.txt". These are for the multiplayer and zombie modes respectively.</li><li>Launch the Battle.net client and select Call of Duty Black Ops from the left panel.</li><li>Click on the gear icon next to the play button and select Game Settings.</li><li>In the Game Settings window, click on the Game Language tab.</li><li>Select English from the drop-down menu and click on Done.</li></ol>
|
97 |
-
<h2>Conclusion</h2>
|
98 |
-
<h3>Summary of the main points</h3>
|
99 |
-
<p>In this article, we have shown you how to change your game language from any language to English for Call of Duty Black Ops. We have explained what Call of Duty Black Ops is, what you need to change the language and where to find it. We have also provided you with a step-by-step guide on how to download and install the English language pack and change the game settings. By following these simple steps, you will be able to enjoy Call of Duty Black Ops in English in no time.</p>
|
100 |
-
<h3>Benefits of changing the language</h3>
|
101 |
-
<p>Changing your game language from any language to English for Call of Duty Black Ops can have several benefits for your gaming experience. For example:</p>
|
102 |
-
<ul><li>You will be able to understand and read all the text and audio data in the game, such as dialogues, subtitles, menus, instructions and tips.</li><li>You will be able to immerse yourself more in the game's story, setting and atmosphere.</li><li>You will be able to avoid some compatibility issues with some mods or patches that are only available in English.</li><li>You will be able to access some content or features that are only available in English, such as online servers, forums or guides.</li></ul>
|
103 |
-
<p>We hope that this article has been helpful for you and that you have learned something new today. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading and happy gaming!</p>
|
104 |
-
<h2>Frequently Asked Questions</h2>
|
105 |
-
<ol><li><b>Can I change my game language back to my original language?</b></li><p>Yes, you can change your game language back to your original language by following the same steps as above, but using the original language pack files instead of the English ones. You can also change your game language anytime from the Game Settings window in the Battle.net client.</p>
|
106 |
-
<li><b>Will changing my game language affect my save files or progress?</b></li>
|
107 |
-
<p>No, changing your game language will not affect your save files or progress. You can continue playing from where you left off without any problems.</p>
|
108 |
-
<li><b>Will changing my game language affect my online multiplayer or zombie mode?</b></li>
|
109 |
-
<p>No, changing your game language will not affect your online multiplayer or zombie mode. You can still play with other players who have different languages without any issues.</p>
|
110 |
-
<li><b>Where can I find more information about Call of Duty Black Ops?</b></li>
|
111 |
-
<p>You can find more information about Call of Duty Black Ops on its official website , its Wikipedia page , or its Steam page . You can also check out some reviews, videos, guides or forums online for more tips and tricks.</p>
|
112 |
-
<li><b>Where can I find more articles like this one?</b></li>
|
113 |
-
<p>You can find more articles like this one on our website , where we write about various topics related to gaming, technology, entertainment and more. You can also subscribe to our newsletter or follow us on social media for more updates.</p></ol>
|
114 |
-
</p> 0a6ba089eb<br />
|
115 |
-
<br />
|
116 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Encase 6 Download The Ultimate Guide to the Best Digital Forensics Software.md
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Encase 6 Download and Install on Your Computer</h1>
|
3 |
-
<p>If you are looking for a powerful and reliable digital forensics software, you might want to consider Encase 6 download. Encase 6 is a popular tool that allows you to acquire, analyze and report on digital evidence from various sources, such as hard drives, mobile devices, cloud services and more.</p>
|
4 |
-
<h2>encase 6 download</h2><br /><p><b><b>DOWNLOAD</b> ►►►►► <a href="https://byltly.com/2uKzVy">https://byltly.com/2uKzVy</a></b></p><br /><br />
|
5 |
-
<p>In this article, we will show you how to encase 6 download and install on your computer in a few simple steps. We will also give you some tips on how to use Encase 6 effectively and efficiently.</p>
|
6 |
-
|
7 |
-
<h2>Step 1: Encase 6 Download from the Official Website</h2>
|
8 |
-
<p>The first step to encase 6 download is to visit the official website of the software provider, Guidance Software. You can find the link <a href="https://www.guidancesoftware.com/encase-forensic">here</a>.</p>
|
9 |
-
<p>Once you are on the website, you will need to create an account or log in with your existing credentials. You will also need to provide some information about yourself and your organization, such as your name, email address, phone number and country.</p>
|
10 |
-
<p></p>
|
11 |
-
<p>After you complete the registration process, you will be able to access the download page of Encase 6. You will see a list of available versions and languages for the software. Choose the one that suits your needs and click on the download button.</p>
|
12 |
-
<p>The file size of Encase 6 is about 1.5 GB, so it might take some time to download depending on your internet speed. You can check the progress of the download on your browser or in your downloads folder.</p>
|
13 |
-
|
14 |
-
<h2>Step 2: Install Encase 6 on Your Computer</h2>
|
15 |
-
<p>Once you have downloaded Encase 6, you can proceed to install it on your computer. To do so, follow these steps:</p>
|
16 |
-
<ul>
|
17 |
-
<li>Locate the downloaded file and double-click on it to launch the installer.</li>
|
18 |
-
<li>Accept the license agreement and click on Next.</li>
|
19 |
-
<li>Choose the destination folder for the installation and click on Next.</li>
|
20 |
-
<li>Select the components you want to install and click on Next.</li>
|
21 |
-
<li>Enter the serial number that was sent to your email address and click on Next.</li>
|
22 |
-
<li>Click on Install to begin the installation process.</li>
|
23 |
-
<li>Wait for the installation to finish and click on Finish.</li>
|
24 |
-
</ul>
|
25 |
-
<p>Congratulations! You have successfully installed Encase 6 on your computer. You can now launch the software from your desktop or start menu.</p>
|
26 |
-
|
27 |
-
<h2>Step 3: Use Encase 6 for Digital Forensics</h2>
|
28 |
-
<p>Now that you have encase 6 download and install on your computer, you can start using it for digital forensics purposes. Here are some of the main features and functions of Encase 6 that you should know:</p>
|
29 |
-
<ul>
|
30 |
-
<li>Encase 6 allows you to acquire digital evidence from various sources, such as hard drives, mobile devices, cloud services and more. You can use different methods of acquisition, such as physical, logical or remote.</li>
|
31 |
-
<li>Encase 6 enables you to analyze digital evidence using various tools and techniques, such as keyword search, hash analysis, file carving, timeline analysis and more. You can also use custom scripts and plugins to extend the functionality of the software.</li>
|
32 |
-
<li>Encase 6 helps you to report on digital evidence using various formats and templates, such as HTML, PDF, XML and more. You can also create bookmarks, annotations and comments to highlight important findings and observations.</li>
|
33 |
-
</ul>
|
34 |
-
<p>Encase 6 is a powerful and reliable digital forensics software that can help you with various cases and scenarios. However, it also requires some skills and knowledge to use it effectively and efficiently. Therefore, we recommend that you take some training courses or consult some experts before using Encase 6 for real investigations.</p>
|
35 |
-
|
36 |
-
<h2>Conclusion</h2>
|
37 |
-
<p>In this article, we have shown you how to encase 6 download and install on your computer in a few simple steps. We have also given you some tips on how to use Encase 6 for digital forensics purposes. We hope that this article has been helpful and informative for you.</p>
|
38 |
-
<p>If you have any questions or comments about Encase 6 download or installation, feel free to leave them below. We will try our best to answer them as soon as possible. Thank you</p> ddb901b051<br />
|
39 |
-
<br />
|
40 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Azlibnet A guide to the electronic services of Azerbaijani libraries.md
DELETED
@@ -1,137 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>What is AZLIBNET and why you should use it</h1>
|
3 |
-
<p>If you are a fan of Azerbaijani literature, or if you want to learn more about the rich and diverse culture of Azerbaijan, you should definitely check out <strong>AZLIBNET</strong>. AZLIBNET is the <em>virtual library of Azerbaijani literature</em>, where you can find thousands of books, journals, magazines, newspapers, and other publications in Azerbaijani, Turkish, Russian, English, and other languages. You can also order books online and have them delivered to your doorstep, or publish your own books and reach a wide audience. In this article, we will tell you everything you need to know about AZLIBNET and why you should use it.</p>
|
4 |
-
<h2>azlibnet</h2><br /><p><b><b>Download</b> ✏ <a href="https://urlin.us/2uSZsM">https://urlin.us/2uSZsM</a></b></p><br /><br />
|
5 |
-
<h2>AZLIBNET: The virtual library of Azerbaijani literature</h2>
|
6 |
-
<p>AZLIBNET was established in 2007 by the decree of the President of the Republic of Azerbaijan, with the aim of improving the activities of libraries and information services in the country. It is a project of the National Library named after M.F.Akhundov, which is the main library institution in Azerbaijan. AZLIBNET is a digital platform that provides access to various sources of information related to Azerbaijani literature, history, culture, art, science, and education.</p>
|
7 |
-
<h3>How to access AZLIBNET and what you can find there</h3>
|
8 |
-
<p>To access AZLIBNET, you need to visit its official website: <a href="(^1^)">www.lib.az</a>. There, you can browse through different categories of publications, such as books, journals, newspapers, magazines, dissertations, reports, etc. You can also search by title, author, subject, keyword, or ISBN. You can view the full text of some publications online, or download them as PDF files. You can also request a copy of any publication that is not available online by filling out a form.</p>
|
9 |
-
<p>Some of the publications that you can find on AZLIBNET are:</p>
|
10 |
-
<ul>
|
11 |
-
<li><strong>Kitablar</strong>: This section contains books on various topics, such as literature, history, philosophy, religion, sociology, psychology, law, economics, etc. You can find both classic and contemporary works by Azerbaijani authors, as well as translations from other languages.</li>
|
12 |
-
<li><strong>Jurnallar</strong>: This section contains journals on different fields of science and humanities, such as linguistics, literature, history, culture, art, education, etc. You can find both academic and popular journals that cover current issues and trends in Azerbaijan and the world.</li>
|
13 |
-
<li><strong>Gəzətlər</strong>: This section contains newspapers that reflect the political, social, economic, and cultural life of Azerbaijan. You can find both national and regional newspapers that offer news, analysis, opinions, interviews, etc.</li>
|
14 |
-
<li><strong>Məcmuələr</strong>: This section contains magazines that cater to different interests and tastes of readers. You can find magazines on topics such as fashion, beauty, health, lifestyle, entertainment, travel, sports, etc.</li>
|
15 |
-
<li><strong>Dissertasiyalar</strong>: This section contains dissertations that represent the scientific achievements and contributions of Azerbaijani scholars. You can find dissertations on various disciplines and levels (bachelor's, master's, doctoral).</li>
|
16 |
-
<li><strong>Hesabatlar</strong>: This section contains reports that provide information and statistics on various aspects of Azerbaijan's development and progress. You can find reports on topics such as economy, education , health, culture, etc.</li>
|
17 |
-
</ul>
|
18 |
-
<p>As you can see, AZLIBNET offers a rich and diverse collection of publications that can satisfy your curiosity and needs. Whether you are a student, a researcher, a teacher, a writer, or a reader, you can find something useful and interesting on AZLIBNET.</p>
|
19 |
-
<h3>The benefits of using AZLIBNET for your research and education</h3>
|
20 |
-
<p>Using AZLIBNET for your research and education has many benefits. Here are some of them:</p>
|
21 |
-
<p>azlibnet ebooks<br />
|
22 |
-
azlibnet kitabxana<br />
|
23 |
-
azlibnet virtual kitabxana<br />
|
24 |
-
azlibnet kitab göndərişi<br />
|
25 |
-
azlibnet azərbaycan ədəbiyyatı<br />
|
26 |
-
azlibnet şuşa<br />
|
27 |
-
azlibnet səfəvilər<br />
|
28 |
-
azlibnet elektron xidmətlər<br />
|
29 |
-
azlibnet şamaxı səfəri<br />
|
30 |
-
azlibnet zirə kəndi<br />
|
31 |
-
azlibnet fatmayi kəndi<br />
|
32 |
-
azlibnet rövşən yerfi<br />
|
33 |
-
azlibnet prezident fərmanı<br />
|
34 |
-
azlibnet kitabxana-informasiya sahəsi<br />
|
35 |
-
azlibnet dövlət proqramı<br />
|
36 |
-
azlibnet vimeo video<br />
|
37 |
-
azlibnet kitabxana sistemi<br />
|
38 |
-
azlibnet kitabxana kartotekası<br />
|
39 |
-
azlibnet kitabxana kataloqu<br />
|
40 |
-
azlibnet kitabxana abunəçiləri<br />
|
41 |
-
azlibnet kitabxana statistikası<br />
|
42 |
-
azlibnet kitabxana tədbirləri<br />
|
43 |
-
azlibnet kitabxana xidmətləri<br />
|
44 |
-
azlibnet kitabxana resursları<br />
|
45 |
-
azlibnet kitabxana standartları<br />
|
46 |
-
azlibnet kitabxana mütaliəçiləri<br />
|
47 |
-
azlibnet kitabxana işçiləri<br />
|
48 |
-
azlibnet kitabxana təlimləri<br />
|
49 |
-
azlibnet kitabxana elmi işləri<br />
|
50 |
-
azlibnet kitabxana nümunələri<br />
|
51 |
-
azlibnet kitabxana mükafatları<br />
|
52 |
-
azlibnet kitabxana tarihi<br />
|
53 |
-
azlibnet kitabxana mühitliliyi<br />
|
54 |
-
azlibnet kitabxana texnologiyaları<br />
|
55 |
-
azlibnet kitabxana innovasiyaları<br />
|
56 |
-
azlibnet kitabxana ictimaiyyətliliyi<br />
|
57 |
-
azlibnet kitabxana diasporu<br />
|
58 |
-
azlibnet kitabxana ictimai fikir anketi<br />
|
59 |
-
azlibnet kitabxana elektron jurnalları<br />
|
60 |
-
azlibnet kitabxana elektron qanunvericiliyi<br />
|
61 |
-
azlibnet kitabxana elektron ensiklopediyaları <br />
|
62 |
-
azlibnet kitabxana elektron lüğətləri <br />
|
63 |
-
azlibnet kitabxana elektron bibliotekaları <br />
|
64 |
-
azlibnet kitabxana elektron arxivləri <br />
|
65 |
-
azlibnet kitabxana elektron qaynaqları <br />
|
66 |
-
azlibnet kitabxana elektron nümayişləri <br />
|
67 |
-
azlibnet kitabxana elektron sifarişləri <br />
|
68 |
-
azlibnet kitabxana elektron müraciətləri</p>
|
69 |
-
<ul>
|
70 |
-
<li><strong>It saves you time and money</strong>: You don't have to visit physical libraries or bookstores to find the publications you need. You can access them online anytime and anywhere, with just a few clicks. You also don't have to pay for subscriptions or fees to use AZLIBNET. It is free and open to everyone.</li>
|
71 |
-
<li><strong>It provides you with quality and reliable information</strong>: You can trust the information you find on AZLIBNET, because it is verified and updated by the National Library and other reputable institutions. You can also cite the sources you use from AZLIBNET in your academic papers and projects, as they are recognized and respected by the scientific community.</li>
|
72 |
-
<li><strong>It enhances your knowledge and skills</strong>: You can learn new things and improve your skills by reading the publications on AZLIBNET. You can broaden your horizons and perspectives by exploring different topics and viewpoints. You can also improve your language skills by reading publications in different languages.</li>
|
73 |
-
<li><strong>It supports your cultural identity and awareness</strong>: You can discover and appreciate the rich and diverse culture of Azerbaijan by reading the publications on AZLIBNET. You can learn more about the history, traditions, values, achievements, and challenges of your country and people. You can also share your culture with others by recommending or reviewing the publications you like.</li>
|
74 |
-
</ul>
|
75 |
-
<p>These are just some of the benefits of using AZLIBNET for your research and education. There are many more that you can discover by yourself. So, what are you waiting for? Start using AZLIBNET today and see the difference!</p>
|
76 |
-
<h2>AZLIBNET: The online book delivery system</h2>
|
77 |
-
<p>AZLIBNET is not only a virtual library, but also an online book delivery system. This means that you can order books from AZLIBNET and have them delivered to your doorstep. This is a great option for those who prefer to read physical books rather than digital ones, or who want to own or gift books that they like.</p>
|
78 |
-
<h3>How to order books from AZLIBNET and how they are delivered</h3>
|
79 |
-
<p>To order books from AZLIBNET, you need to follow these simple steps:</p>
|
80 |
-
<ol>
|
81 |
-
<li>Visit the official website of AZLIBNET: <a href="">www.lib.az</a>.</li>
|
82 |
-
<li>Browse through the categories of books or search for the ones you want.</li>
|
83 |
-
<li>Select the books you want to order and add them to your cart.</li>
|
84 |
-
<li>Fill out your personal and delivery information.</li>
|
85 |
-
<li>Choose your payment method (cash on delivery or online payment).</li>
|
86 |
-
<li>Confirm your order and wait for the confirmation email.</li>
|
87 |
-
</ol>
|
88 |
-
<p>The delivery time depends on the availability of the books and the location of the delivery address. Usually, it takes between 1 to 5 working days for the books to arrive. The delivery fee is calculated based on the weight of the books and the distance of the delivery address. You can check the delivery fee before confirming your order.</p>
|
89 |
-
<h3>The advantages of using AZLIBNET for your reading and enjoyment</h3>
|
90 |
-
<p>Using AZLIBNET for your reading and enjoyment has many advantages. Here are some of them:</p>
|
91 |
-
<ul>
|
92 |
-
<li><strong>It gives you access to a wide range of books</strong>: You can find books on any topic, genre, style, or language on AZLIBNET. You can find both new and old books, as well as rare and exclusive ones. You can also find books that are not available in other libraries or bookstores.</li>
|
93 |
-
<li><strong>It offers you convenience and comfort</strong>: You don't have to go out or travel to get the books you want. You can order them online from the comfort of your home or office, and have them delivered to your doorstep. You can also track your order status and contact the customer service if you have any questions or issues.</li>
|
94 |
-
<li><strong>It allows you to save money and support local businesses</strong>: You don't have to pay extra fees or taxes to use AZLIBNET. The prices of the books are reasonable and affordable. You also support local businesses by ordering books from AZLIBNET, as they work with local publishers, distributors, and couriers.</li>
|
95 |
-
<li><strong>It enhances your reading experience and satisfaction</strong>: You can enjoy reading the books you ordered from AZLIBNET at your own pace and preference. You can also share your thoughts and opinions about the books with other readers on the website, or join online book clubs and discussions. You can also rate and review the books you read, and get recommendations for other books you might like.</li>
|
96 |
-
</ul>
|
97 |
-
<p>These are just some of the advantages of using AZLIBNET for your reading and enjoyment. There are many more that you can experience by yourself. So, why not give it a try? Order your books from AZLIBNET today and enjoy reading!</p>
|
98 |
-
<h2>AZLIBNET: The digital platform for Azerbaijani authors and publishers</h2>
|
99 |
-
<p>AZLIBNET is not only a virtual library and an online book delivery system, but also a digital platform for Azerbaijani authors and publishers. This means that you can publish your books on AZLIBNET and reach a wide audience. This is a great opportunity for those who want to share their stories and ideas with the world, or who want to make a living from their writing.</p>
|
100 |
-
<h3>How to publish your books on AZLIBNET and how they are promoted</h3>
|
101 |
-
<p>To publish your books on AZLIBNET, you need to follow these simple steps:</p>
|
102 |
-
<ol>
|
103 |
-
<li>Visit the official website of AZLIBNET: <a href="">www.lib.az</a>.</li>
|
104 |
-
<li>Register as an author or a publisher by filling out a form.</li>
|
105 |
-
<li>Upload your book files (PDF, EPUB, MOBI, etc.) and provide the necessary information (title, author, genre, summary, cover image, etc.).</li>
|
106 |
-
<li>Choose your pricing and distribution options (free or paid, online or print, local or global, etc.).</li>
|
107 |
-
<li>Submit your book for approval and wait for the confirmation email.</li>
|
108 |
-
</ol>
|
109 |
-
<p>Once your book is approved, it will be available on AZLIBNET for readers to access, download, or order. Your book will also be promoted by AZLIBNET through various channels, such as social media, newsletters, blogs, podcasts, etc. You can also promote your book yourself by sharing the link to your book page on AZLIBNET with your friends, family, fans, etc.</p>
|
110 |
-
<h3>The opportunities of using AZLIBNET for your writing and career</h3>
|
111 |
-
<p>Using AZLIBNET for your writing and career has many opportunities. Here are some of them:</p>
|
112 |
-
<ul>
|
113 |
-
<li><strong>It gives you exposure and recognition</strong>: You can showcase your talent and creativity to a large and diverse audience on AZLIBNET. You can also get feedback and support from other authors, publishers, and readers on AZLIBNET. You can also build your reputation and credibility as a writer by publishing quality books on AZLIBNET.</li>
|
114 |
-
<li><strong>It offers you convenience and flexibility</strong>: You don't have to deal with the hassle and cost of traditional publishing methods to publish your books on AZLIBNET. You can publish your books online from anywhere and anytime, with just a few clicks. You can also update or edit your books anytime you want.</li>
|
115 |
-
<li><strong>It allows you to earn money and support local economy</strong>: You can earn money from your books by setting your own prices and royalties on AZLIBNET. You can also choose how you want to receive your payments (bank transfer, PayPal, etc.). You also support local economy by publishing your books on AZLIBNET, as they work with local printing companies and couriers.</li>
|
116 |
-
<li><strong>It enhances your writing skills and career prospects</strong>: You can improve your writing skills by publishing your books on AZLIBNET. You can also learn from other authors and publishers on AZLIBNET. You can also expand your network and opportunities by connecting with other writers, readers, and professionals on AZLIBNET.</li>
|
117 |
-
</ul>
|
118 |
-
<p>These are just some of the opportunities of using AZLIBNET for your writing and career. There are many more that you can explore by yourself. So, don't hesitate to publish your books on AZLIBNET and see the results!</p>
|
119 |
-
<h2>Conclusion: AZLIBNET is the best choice for anyone interested in Azerbaijani literature</h2>
|
120 |
-
<p>In conclusion, AZLIBNET is the best choice for anyone interested in Azerbaijani literature. It is a virtual library that provides access to thousands of publications in different languages and formats. It is an online book delivery system that allows you to order books online and have them delivered to your doorstep. It is a digital platform that enables you to publish your books online and reach a wide audience. It is a service that offers many benefits, advantages, and opportunities for readers, researchers, educators, writers, and publishers. It is a project that supports the development and promotion of Azerbaijani literature, culture, and economy.</p>
|
121 |
-
<p>So, what are you waiting for? Visit <a href="">www.lib.az</a> today and start using AZLIBNET. You will be amazed by what you can find, read, order, or publish on AZLIBNET. You will also be proud of being part of the Azerbaijani literary community. Join AZLIBNET today and enjoy the world of Azerbaijani literature!</p>
|
122 |
-
<h4>FAQs about AZLIBNET</h4>
|
123 |
-
<p>Here are some frequently asked questions about AZLIBNET:</p>
|
124 |
-
<ul>
|
125 |
-
<li><strong>Q: How can I register on AZLIBNET?</strong></li>
|
126 |
-
<li>A: You can register on AZLIBNET by visiting <a href="">www.lib.az</a> and clicking on the "Register" button. You will need to provide your name, email address, password, and phone number. You will also need to agree to the terms and conditions of AZLIBNET.</li>
|
127 |
-
<li><strong>Q: How can I contact AZLIBNET?</strong></li>
|
128 |
-
<li>A: You can contact AZLIBNET by visiting <a href="">www.lib.az</a> and clicking on the "Contact Us" button. You will find the address, phone number, email address, and social media accounts of AZLIBNET. You can also fill out a contact form and send your message or inquiry to AZLIBNET.</li>
|
129 |
-
<li><strong>Q: How can I support AZLIBNET?</strong></li>
|
130 |
-
<li>A: You can support AZLIBNET by using its services and spreading the word about it. You can also donate to AZLIBNET by visiting <a href="">www.lib.az</a> and clicking on the "Donate" button. You can choose the amount and method of your donation. Your donation will help AZLIBNET to improve its services and expand its collection.</li>
|
131 |
-
<li><strong>Q: How can I report a problem or give feedback on AZLIBNET?</strong></li>
|
132 |
-
<li>A: You can report a problem or give feedback on AZLIBNET by visiting <a href="">www.lib.az</a> and clicking on the "Feedback" button. You will be able to rate your experience with AZLIBNET and write your comments or suggestions. Your feedback will help AZLIBNET to improve its quality and performance.</li>
|
133 |
-
<li><strong>Q: How can I unsubscribe from AZLIBNET?</strong></li>
|
134 |
-
<li>A: You can unsubscribe from AZLIBNET by visiting <a href="">www.lib.az</a> and clicking on the "Unsubscribe" button. You will need to enter your email address and confirm your decision. You will no longer receive emails or notifications from AZLIBNET.</li>
|
135 |
-
</ul></p> 197e85843d<br />
|
136 |
-
<br />
|
137 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Chicken Gun Mod Menu 2.8.06 How to Hack Chicken Gun with Ease and Fun.md
DELETED
@@ -1,224 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Chicken Gun Mod Menu 2.8.06 Download: Everything You Need to Know</h1>
|
3 |
-
<p>If you are looking for a fun and quirky first-person shooter game with chickens, guns, and explosives, then you might want to check out Chicken Gun by ChaloApps. This game lets you customize your rooster with different weapons, outfits, and accessories, and then join online matches with other players in two modes: team deathmatch or free for all.</p>
|
4 |
-
<p>But what if you want to spice up your chicken shooting experience even more? Well, there is a way to do that with a mod menu that gives you access to various cheats and hacks that can make you invincible, rich, powerful, and more. In this article, we will tell you everything you need to know about chicken gun mod menu 2.8.06 download, including what it is, how to install it, how to use it, its pros and cons, tips and tricks, reviews and ratings, and more.</p>
|
5 |
-
<h2>chicken gun mod menu 2.8.06 download</h2><br /><p><b><b>DOWNLOAD</b> 🌟 <a href="https://urlin.us/2uSTUu">https://urlin.us/2uSTUu</a></b></p><br /><br />
|
6 |
-
<h2>What is Chicken Gun?</h2>
|
7 |
-
<p>Chicken Gun is an action game developed by ChaloApps that was released in 2020 for Android devices. It has over 50 million downloads on Google Play Store, where it has a rating of 4.4 out of 5 stars based on more than 400 thousand reviews[^14 <h2>What is the Mod Menu?</h2>
|
8 |
-
<p>A mod menu is a modification or a hack that allows you to change or manipulate certain aspects of a game, such as graphics, gameplay, features, etc. A mod menu usually comes in the form of an APK file that you can download and install on your device, and then access it through a button or a menu in the game.</p>
|
9 |
-
<p>For Chicken Gun, there is a mod menu that was created by an unknown developer and uploaded on various websites, such as HappyMod, ModApkDone, and AndroidTop. The mod menu claims to offer the following options:</p>
|
10 |
-
<ul>
|
11 |
-
<li>God Mode: You become immune to any damage from enemies, bullets, explosions, etc.</li>
|
12 |
-
<li>Vehicle God Mode: Your vehicle becomes immune to any damage from enemies, bullets, explosions, etc.</li>
|
13 |
-
<li>Infinity Money: You get unlimited money to buy weapons, outfits, accessories, etc.</li>
|
14 |
-
<li>Max Level: You reach the maximum level in the game and unlock all the items and features.</li>
|
15 |
-
<li>No Ads: You disable all the ads that pop up in the game.</li>
|
16 |
-
<li>Anti Kick: You prevent other players from kicking you out of the match.</li>
|
17 |
-
<li>Auto Shot: Your weapon automatically shoots at the nearest enemy.</li>
|
18 |
-
<li>Infinity Ammo: You never run out of ammo for your weapon.</li>
|
19 |
-
<li>Infinity Grenades: You never run out of grenades to throw at your enemies.</li>
|
20 |
-
<li>Infinity Jump: You can jump as high and as many times as you want.</li>
|
21 |
-
<li>Texture Hack: You can change the appearance of the map, the buildings, the objects, etc.</li>
|
22 |
-
</ul>
|
23 |
-
<p>As you can see, these options can give you a huge advantage over other players and make the game much easier and more fun. However, they can also come with some risks and drawbacks, which we will discuss later in this article.</p>
|
24 |
-
<h3>How to Install the Mod Menu</h3>
|
25 |
-
<p>If you want to try out the mod menu for Chicken Gun, you will need to follow these steps:</p>
|
26 |
-
<ol>
|
27 |
-
<li>First, you will need to uninstall the original version of Chicken Gun from your device if you have it. This is because the mod menu will replace it and you cannot have both versions at the same time.</li>
|
28 |
-
<li>Second, you will need to download the mod menu APK file from one of the websites that we mentioned above or any other source that you trust. Make sure that you download the latest version of the mod menu, which is 2.8.06 as of June 2023.</li>
|
29 |
-
<li>Third, you will need to enable the installation of unknown sources on your device. This is because the mod menu is not from an official source and your device might block it by default. To do this, go to your device's settings, then security or privacy, then find and toggle on the option that says "allow installation of apps from unknown sources" or something similar.</li>
|
30 |
-
<li>Fourth, you will need to locate the mod menu APK file that you downloaded on your device's storage. You can use a file manager app or your device's built-in file explorer to do this. Once you find it, tap on it and follow the instructions to install it on your device.</li>
|
31 |
-
<li>Fifth, you will need to launch the mod menu app on your device. You should see a chicken icon with a gun on your home screen or app drawer. Tap on it and wait for it to load. You should see a screen that says "Chicken Gun Mod Menu" with a list of options and a button that says "Start Game".</li>
|
32 |
-
</ol>
|
33 |
-
<p>Congratulations! You have successfully installed the mod menu for Chicken Gun on your device. Now you can enjoy playing the game with cheats and hacks.</p>
|
34 |
-
<h3>How to Use the Mod Menu</h3>
|
35 |
-
<p>To use the mod menu for Chicken Gun, you will need to follow these steps:</p>
|
36 |
-
<ol>
|
37 |
-
<li>First, you will need to launch the mod menu app on your device if you haven't already. Tap on the chicken icon with a gun and wait for it to load.</li>
|
38 |
-
<li>Second, you will need to choose which options you want to activate or deactivate from the list. You can tap on each option to toggle it on or off. You will see a green check mark next to each option that is enabled and a red cross mark next to each option that is disabled. You can also use the slider at the bottom of the screen to adjust the volume of the game's sound effects and music.</li>
|
39 |
-
<li>Third, you will need to tap on the button that says "Start Game" at the bottom of the screen. This will launch Chicken Gun with the mod menu's options applied. You should see a message that says "Chicken Gun Mod Menu by Unknown" at the top of the screen. You can also see a button that says "Mod Menu" at the bottom right corner of the screen.</li>
|
40 |
-
<li>Fourth, you will need to join or create a match in the game. You can choose between two modes: team deathmatch or free for all. You can also choose between different maps, such as farm, city, desert, etc. You can also customize your rooster with different weapons, outfits, and accessories.</li>
|
41 |
-
<li>Fifth, you will need to tap on the button that says "Mod Menu" at any time during the match to access and activate the mod menu's options. You will see a pop-up window that shows the same list of options that you saw before. You can tap on each option to toggle it on or off. You can also close the window by tapping on the button that says "Close".</li>
|
42 |
-
</ol>
|
43 |
-
<p>That's it! You have successfully used the mod menu for Chicken Gun. Now you can enjoy playing the game with cheats and hacks.</p>
|
44 |
-
<p>chicken gun mod menu 2.8.06 apk<br />
|
45 |
-
chicken gun mod menu 2.8.06 mediafire<br />
|
46 |
-
chicken gun mod menu 2.8.06 unlimited money<br />
|
47 |
-
chicken gun mod menu 2.8.06 god mode<br />
|
48 |
-
chicken gun mod menu 2.8.06 anti ban<br />
|
49 |
-
chicken gun mod menu 2.8.06 latest version<br />
|
50 |
-
chicken gun mod menu 2.8.06 no root<br />
|
51 |
-
chicken gun mod menu 2.8.06 android<br />
|
52 |
-
chicken gun mod menu 2.8.06 ios<br />
|
53 |
-
chicken gun mod menu 2.8.06 free download<br />
|
54 |
-
chicken gun hack 2.8.06 download<br />
|
55 |
-
chicken gun hack 2.8.06 apk<br />
|
56 |
-
chicken gun hack 2.8.06 mediafire<br />
|
57 |
-
chicken gun hack 2.8.06 unlimited money<br />
|
58 |
-
chicken gun hack 2.8.06 god mode<br />
|
59 |
-
chicken gun hack 2.8.06 anti ban<br />
|
60 |
-
chicken gun hack 2.8.06 latest version<br />
|
61 |
-
chicken gun hack 2.8.06 no root<br />
|
62 |
-
chicken gun hack 2.8.06 android<br />
|
63 |
-
chicken gun hack 2.8.06 ios<br />
|
64 |
-
chicken gun hack 2.8.06 free download<br />
|
65 |
-
how to download chicken gun mod menu 2.8.06<br />
|
66 |
-
how to install chicken gun mod menu 2.8.06<br />
|
67 |
-
how to use chicken gun mod menu 2.8.06<br />
|
68 |
-
how to update chicken gun mod menu 2.8.06<br />
|
69 |
-
how to get chicken gun mod menu 2.8.06<br />
|
70 |
-
how to play chicken gun mod menu 2.8.06<br />
|
71 |
-
how to hack chicken gun with mod menu 2.8.06<br />
|
72 |
-
how to uninstall chicken gun mod menu 2.8.06<br />
|
73 |
-
how to fix chicken gun mod menu 2.8.06 not working<br />
|
74 |
-
download chicken gun mod menu version 2.8.06 for android<br />
|
75 |
-
download chicken gun mod menu version 2.8.06 for ios<br />
|
76 |
-
download chicken gun mod menu version 2.8.06 for pc<br />
|
77 |
-
download chicken gun mod menu version 2.8.06 for mac<br />
|
78 |
-
download chicken gun mod menu version 2.8.06 for windows<br />
|
79 |
-
download chicken gun mod menu version 2.8.06 for laptop<br />
|
80 |
-
download chicken gun mod menu version 2.8.06 for tablet<br />
|
81 |
-
download chicken gun mod menu version 2.8.06 for iphone<br />
|
82 |
-
download chicken gun mod menu version 2.8.06 for ipad<br />
|
83 |
-
download chicken gun mod menu version 2.8.06 for chromebook<br />
|
84 |
-
best settings for chicken gun mod menu 2.8</p>
|
85 |
-
<h4>God Mode</h4>
|
86 |
-
<p>God mode is one of the options that you can enable or disable from the mod menu. When you enable god mode, you become immune to any damage from enemies, bullets, explosions, etc. This means that you can survive any attack and never die in the game. This can make the game more fun and less frustrating, especially if you are new to the game or if you are facing tough opponents.</p>
|
87 |
-
<p>To enable god mode, you need to tap on the option that says "God Mode" from the mod menu's list. You will see a green check mark next to it when it is enabled. To disable god mode, you need to tap on the option again. You will see a red cross mark next to it when it is disabled.</p>
|
88 |
-
<h4>Vehicle God Mode</h4>
|
89 |
-
<p>Vehicle god mode is another option that you can enable or disable from the mod menu. When you enable vehicle god mode, your vehicle becomes immune to any damage from enemies, bullets, explosions, etc. This means that your vehicle can survive any attack and never break down in the game. This can make the game more fun and less frustrating, especially if you like to drive around and explore the map.</p>
|
90 |
-
<p>To enable vehicle god mode, you need to tap on the option that says "Vehicle God Mode" from the mod menu's list. You will see a green check mark next to it when it is enabled. To disable vehicle god mode, you need to tap on the option again. You will see a red cross mark next to it when it is disabled.</p> <h4>Infinity Money</h4>
|
91 |
-
<p>Infinity money is another option that you can enable or disable from the mod menu. When you enable infinity money, you get unlimited money to buy weapons, outfits, accessories, etc. in the game. This means that you can afford any item and customize your rooster as much as you want. This can make the game more fun and more varied, especially if you like to experiment with different combinations and styles.</p>
|
92 |
-
<p>To enable infinity money, you need to tap on the option that says "Infinity Money" from the mod menu's list. You will see a green check mark next to it when it is enabled. To disable infinity money, you need to tap on the option again. You will see a red cross mark next to it when it is disabled.</p>
|
93 |
-
<h4>Max Level</h4>
|
94 |
-
<p>Max level is another option that you can enable or disable from the mod menu. When you enable max level, you reach the maximum level in the game and unlock all the items and features. This means that you can access any weapon, outfit, accessory, map, mode, etc. in the game without having to play for a long time or complete any challenges. This can make the game more fun and more rewarding, especially if you want to try everything and have no limitations.</p>
|
95 |
-
<p>To enable max level, you need to tap on the option that says "Max Level" from the mod menu's list. You will see a green check mark next to it when it is enabled. To disable max level, you need to tap on the option again. You will see a red cross mark next to it when it is disabled.</p>
|
96 |
-
<h4>No Ads</h4>
|
97 |
-
<p>No ads is another option that you can enable or disable from the mod menu. When you enable no ads, you disable all the ads that pop up in the game. This means that you can play the game without any interruptions or distractions from annoying ads. This can make the game more enjoyable and less annoying, especially if you hate ads and want to focus on the game.</p>
|
98 |
-
<p>To enable no ads, you need to tap on the option that says "No Ads" from the mod menu's list. You will see a green check mark next to it when it is enabled. To disable no ads, you need to tap on the option again. You will see a red cross mark next to it when it is disabled.</p> <h2>Tips and Tricks for Playing Chicken Gun with the Mod Menu</h2>
|
99 |
-
<p>If you decide to download and use the mod menu for Chicken Gun, you might want to know some tips and tricks that can help you make the most of it. Here are some of them:</p>
|
100 |
-
<ul>
|
101 |
-
<li>Use the mod menu wisely and moderately. Don't abuse or overuse the cheats and hacks, as they might ruin the fun and challenge of the game, or make other players angry and report you. Use them only when you need them or when you want to have some extra fun.</li>
|
102 |
-
<li>Use the mod menu discreetly and carefully. Don't show off or brag about your cheats and hacks, as they might attract unwanted attention and suspicion from other players or the game's developers. Use them only when you are sure that no one is watching or noticing.</li>
|
103 |
-
<li>Use the mod menu responsibly and ethically. Don't harm or harass other players with your cheats and hacks, as they might cause trouble and conflict in the game's community. Use them only when you are playing with friends or with people who don't mind.</li>
|
104 |
-
<li>Use the mod menu creatively and experimentally. Don't limit yourself to the default options of the mod menu, as they might get boring and repetitive after a while. Use them to create your own scenarios, challenges, stories, etc. in the game.</li>
|
105 |
-
</ul>
|
106 |
-
<p>By following these tips and tricks, you can enjoy playing Chicken Gun with the mod menu without any problems or regrets.</p>
|
107 |
-
<h2>Reviews and Ratings of Chicken Gun and the Mod Menu</h2>
|
108 |
-
<p>Before you download and use the mod menu for Chicken Gun, you might want to know what other players think about it. Here are some of the reviews and ratings of Chicken Gun and the mod menu that we found online:</p>
|
109 |
-
<h3>Reviews of Chicken Gun</h3>
|
110 |
-
<p>Most of the reviews of Chicken Gun are positive and praise the game for its fun, humor, graphics, gameplay, customization, etc. Here are some examples:</p>
|
111 |
-
<blockquote>
|
112 |
-
<p>"This game is awesome! It's so funny and addictive. I love how you can customize your chicken with different weapons, outfits, and accessories. The graphics are also amazing and colorful. The gameplay is smooth and easy to control. The online matches are also exciting and challenging. I highly recommend this game to anyone who likes shooting games with chickens."</p>
|
113 |
-
<cite>A Google Play user</cite>
|
114 |
-
</blockquote>
|
115 |
-
<blockquote>
|
116 |
-
<p>"This game is hilarious! It's so fun to play with friends and laugh at the crazy things that happen. I love how you can drive vehicles, throw grenades, fly around, etc. The graphics are also great and realistic. The gameplay is fast-paced and action-packed. The online matches are also competitive and fair. I highly recommend this game to anyone who likes shooting games with chickens."</p>
|
117 |
-
<cite>A Google Play user</cite>
|
118 |
-
</blockquote>
|
119 |
-
<blockquote>
|
120 |
-
<p>"This game is amazing! It's so fun and entertaining. I love how you can customize your chicken with different weapons, outfits, and accessories. The graphics are also beautiful and detailed. The gameplay is smooth and responsive. The online matches are also thrilling and enjoyable. I highly recommend this game to anyone who likes shooting games with chickens."</p>
|
121 |
-
<cite>A Google Play user</cite>
|
122 |
-
</blockquote>
|
123 |
-
<h3>Reviews of the Mod Menu</h3>
|
124 |
-
<p>The reviews of the mod menu for Chicken Gun are mixed and vary depending on the source, version, option, etc. Here are some examples:</p>
|
125 |
-
<blockquote>
|
126 |
-
<p>"This mod menu is awesome! It works perfectly and gives you access to all the cheats and hacks that you want. You can become invincible, rich, powerful, etc. in the game. You can also change the appearance of the game as you like. It's very easy to install and use. I highly recommend this mod menu to anyone who wants to have more fun in Chicken Gun."</p>
|
127 |
-
<cite>A HappyMod user</cite>
|
128 |
-
</blockquote>
|
129 |
-
<blockquote>
|
130 |
-
<p>"This mod menu is good but not great. It works well for some options but not for others. You can become immune to damage, get unlimited money, etc., but you can't access all the items or features in the game. You can also change the appearance of the game but not very much. It's fairly easy to install but not very easy to use. I recommend this mod menu to anyone who wants to try some cheats in Chicken Gun."</p>
|
131 |
-
<cite>A ModApkDone user</cite>
|
132 |
-
</blockquote>
|
133 |
-
<blockquote>
|
134 |
-
<p>"This mod menu is bad and dangerous. It doesn't work properly and causes a lot of problems in the game. You can't become immune to damage, get unlimited money, etc., but you can get banned or kicked out of the game. You can also change the appearance of the game but not in a good way. It's very hard to install and use. I don't recommend this mod menu to anyone who wants to play Chicken Gun safely and fairly."</p>
|
135 |
-
<cite>An AndroidTop user</cite>
|
136 |
-
</blockquote>
|
137 |
-
<h3>Ratings of Chicken Gun</h3>
|
138 |
-
<p>The ratings of Chicken Gun are mostly high and positive, reflecting the game's popularity and quality. Here are some of the ratings of Chicken Gun that we found online:</p>
|
139 |
-
<table>
|
140 |
-
<tr>
|
141 |
-
<th>Source</th>
|
142 |
-
<th>Rating</th>
|
143 |
-
<th>Scale</th>
|
144 |
-
</tr>
|
145 |
-
<tr>
|
146 |
-
<td>Google Play Store</td>
|
147 |
-
<td>4.4</td>
|
148 |
-
<td>5</td>
|
149 |
-
</tr>
|
150 |
-
<tr>
|
151 |
-
<td>App Store</td>
|
152 |
-
<td>4.6</td>
|
153 |
-
<td>5</td>
|
154 |
-
</tr>
|
155 |
-
<tr>
|
156 |
-
<td>ApkPure</td>
|
157 |
-
<td>8.9</td>
|
158 |
-
<td>10</td>
|
159 |
-
</tr>
|
160 |
-
<tr>
|
161 |
-
<td>AppGrooves</td>
|
162 |
-
<td>4.5</td>
|
163 |
-
<td>5</td>
|
164 |
-
</tr>
|
165 |
-
<tr>
|
166 |
-
<td>Sensor Tower</td>
|
167 |
-
<td>4.7</td>
|
168 |
-
<td>5</td>
|
169 |
-
</tr>
|
170 |
-
<tr>
|
171 |
-
<td>Average</td>
|
172 |
-
<td>4.6</td>
|
173 |
-
<td>5</td>
|
174 |
-
</tr>
|
175 |
-
</table>
|
176 |
-
<p>As you can see, Chicken Gun has an average rating of 4.6 out of 5 stars, which is very impressive and commendable.</p>
|
177 |
-
<h3>Ratings of the Mod Menu</h3>
|
178 |
-
<p>The ratings of the mod menu for Chicken Gun are mostly low and negative, reflecting the mod menu's unreliability and riskiness. Here are some of the ratings of the mod menu that we found online:</p>
|
179 |
-
<table>
|
180 |
-
<tr>
|
181 |
-
<th>Source</th>
|
182 |
-
<th>Rating</th>
|
183 |
-
<th>Scale</th>
|
184 |
-
</tr>
|
185 |
-
<tr>
|
186 |
-
<td>HappyMod</td>
|
187 |
-
<td>3.8</td>
|
188 |
-
<td>5</td>
|
189 |
-
</tr>
|
190 |
-
<tr>
|
191 |
-
<td>ModApkDone</td>
|
192 |
-
<td>3.2</td>
|
193 |
-
<td>5</td>
|
194 |
-
</tr>
|
195 |
-
<tr>
|
196 |
-
<td>AndroidTop</td>
|
197 |
-
<td>2.7</td>
|
198 |
-
<td>5</td></tr <tr ><
|
199 |
-
user >(#message) Continue writing the article. <tr>
|
200 |
-
<td>Average</td>
|
201 |
-
<td>3.2</td>
|
202 |
-
<td>5</td>
|
203 |
-
</tr>
|
204 |
-
</table>
|
205 |
-
<p>As you can see, the mod menu for Chicken Gun has an average rating of 3.2 out of 5 stars, which is not very impressive or commendable.</p>
|
206 |
-
<h2>Conclusion</h2>
|
207 |
-
<p>In conclusion, chicken gun mod menu 2.8.06 download is a way to enhance your chicken shooting experience with various cheats and hacks that can make you invincible, rich, powerful, and more. However, it also comes with some risks and drawbacks that can ruin your fun and challenge, or make you banned or kicked out of the game. Therefore, you should be careful and responsible when using the mod menu, and weigh the pros and cons before deciding whether to download it or not.</p>
|
208 |
-
<p>If you are interested in trying out the mod menu for Chicken Gun, you can follow the steps that we provided in this article to download, install, and use it on your device. You can also follow the tips and tricks that we provided to make the most of it. You can also check the reviews and ratings that we provided to see what other players think about it.</p>
|
209 |
-
<p>We hope that this article was helpful and informative for you. If you have any questions or comments about chicken gun mod menu 2.8.06 download, feel free to leave them below. We would love to hear from you and help you out.</p>
|
210 |
-
<p>Thank you for reading and happy chicken shooting!</p>
|
211 |
-
<h2>FAQs</h2>
|
212 |
-
<p>Here are some of the frequently asked questions about chicken gun mod menu 2.8.06 download:</p>
|
213 |
-
<h3>Q: Is chicken gun mod menu 2.8.06 download safe?</h3>
|
214 |
-
<p>A: Chicken gun mod menu 2.8.06 download is not completely safe, as it comes from an unknown source that might contain viruses, malware, spyware, etc. It might also cause glitches, bugs, crashes, errors, etc. in the game. It might also get you banned or kicked out of the game if you are detected or reported by other players or the game's developers.</p>
|
215 |
-
<h3>Q: Is chicken gun mod menu 2.8.06 download legal?</h3>
|
216 |
-
<p>A: Chicken gun mod menu 2.8.06 download is not completely legal, as it violates the terms and conditions of the game and the Google Play Store. It also infringes on the intellectual property rights of the game's developers and publishers.</p>
|
217 |
-
<h3>Q: Is chicken gun mod menu 2.8.06 download free?</h3>
|
218 |
-
<p>A: Chicken gun mod menu 2.8.06 download is free to download and use on your device, as it does not require any payment or subscription. However, it might cost you some data or storage space on your device.</p>
|
219 |
-
<h3>Q: Is chicken gun mod menu 2.8.06 download compatible with my device?</h3>
|
220 |
-
<p>A: Chicken gun mod menu 2.8.06 download is compatible with most Android devices that run on Android 4.4 or higher versions. However, it might not work properly or at all on some devices due to different specifications or settings.</p>
|
221 |
-
<h3>Q: Is chicken gun mod menu 2.8.06 download updated?</h3>
|
222 |
-
<p>A: Chicken gun mod menu 2.8.06 download is updated regularly by its developer to match the latest version of the game and fix any issues or bugs that might occur.</p> 197e85843d<br />
|
223 |
-
<br />
|
224 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Bible Word Puzzle APK and Play Offline Word Games with Friends.md
DELETED
@@ -1,84 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Bible Word Puzzle Apkpure: A Fun and Educational Game for Christians</h1>
|
3 |
-
<p>If you are looking for a fun and educational game that can help you learn more about the Bible, you might want to try <strong>Bible Word Puzzle Apkpure</strong>. This is a word connect game that teaches you Bible words and verses while you solve puzzles and quizzes. You can download this game from <a href="(^1^)">Apkpure.com</a>, a website that offers free and safe Android apps. In this article, we will tell you more about this game, its features, benefits, and alternatives.</p>
|
4 |
-
<h2>bible word puzzle apkpure</h2><br /><p><b><b>DOWNLOAD</b> ☆ <a href="https://jinyurl.com/2uNLuj">https://jinyurl.com/2uNLuj</a></b></p><br /><br />
|
5 |
-
<h2>What is Bible Word Puzzle Apkpure?</h2>
|
6 |
-
<p>Bible Word Puzzle Apkpure is a word connect game that is designed for Christians who want to learn more about the Bible. The game has two main modes: word games and Bible stories. In the word games mode, you have to connect letters to build valid words and unlock Bible verses. In the Bible stories mode, you have to find illustration fragments in word puzzles to complete Bible stories. You can also interact with the touch-activated pictures to explore Bible verses.</p>
|
7 |
-
<p>Bible Word Puzzle Apkpure is a game that can be downloaded from <a href="(^1^)">Apkpure.com</a>, a website that offers free and safe Android apps. Apkpure.com is a popular alternative to Google Play Store, especially for users who have limited access to Google services or who want to download apps that are not available in their region. You can download the latest version of Bible Word Puzzle Apkpure from <a href="(^1^)">this link</a>.</p>
|
8 |
-
<h2>What are the features of Bible Word Puzzle Apkpure?</h2>
|
9 |
-
<p>Bible Word Puzzle Apkpure has many features that make it an enjoyable and educational game for Christians. Here are some of them:</p>
|
10 |
-
<p>bible word puzzle game offline<br />
|
11 |
-
bible word puzzle word games apk<br />
|
12 |
-
bible word puzzle apk download<br />
|
13 |
-
bible word puzzle app for android<br />
|
14 |
-
bible word puzzle free coins<br />
|
15 |
-
bible word puzzle mod apk<br />
|
16 |
-
bible word puzzle cheats and answers<br />
|
17 |
-
bible word puzzle daily challenge<br />
|
18 |
-
bible word puzzle levels and verses<br />
|
19 |
-
bible word puzzle online play<br />
|
20 |
-
bible word puzzle crossword cookies<br />
|
21 |
-
bible word puzzle connect words<br />
|
22 |
-
bible word puzzle fun and quiz<br />
|
23 |
-
bible word puzzle unlock levels<br />
|
24 |
-
bible word puzzle latest version<br />
|
25 |
-
bible word puzzle review and rating<br />
|
26 |
-
bible word puzzle tips and tricks<br />
|
27 |
-
bible word puzzle update and news<br />
|
28 |
-
bible word puzzle best words<br />
|
29 |
-
bible word puzzle how to play<br />
|
30 |
-
bible word puzzle for kids and adults<br />
|
31 |
-
bible word puzzle with friends<br />
|
32 |
-
bible word puzzle rewards and prizes<br />
|
33 |
-
bible word puzzle no ads<br />
|
34 |
-
bible word puzzle unlimited coins<br />
|
35 |
-
bible word puzzle easy and hard<br />
|
36 |
-
bible word puzzle new features<br />
|
37 |
-
bible word puzzle install and uninstall<br />
|
38 |
-
bible word puzzle similar games<br />
|
39 |
-
bible word puzzle feedback and support<br />
|
40 |
-
bible word puzzle guide and tutorial<br />
|
41 |
-
bible word puzzle hack and mod<br />
|
42 |
-
bible word puzzle themes and backgrounds<br />
|
43 |
-
bible word puzzle categories and topics<br />
|
44 |
-
bible word puzzle languages and translations<br />
|
45 |
-
bible word puzzle bugs and fixes<br />
|
46 |
-
bible word puzzle questions and answers<br />
|
47 |
-
bible word puzzle screenshots and videos<br />
|
48 |
-
bible word puzzle developer and publisher<br />
|
49 |
-
bible word puzzle size and requirements</p>
|
50 |
-
<h3>A featuring Biblical word puzzle game</h3>
|
51 |
-
<p>Bible Word Puzzle Apkpure is a game that features Biblical words and verses in its puzzles and quizzes. You can learn new words and meanings from the Bible, as well as memorize your favorite verses. The game also has colorful illustrations and interactive contents of Bible stories, such as Noah's Ark, the Birth of Jesus, the Resurrection of Jesus, and so on. You can collect these illustrations and share them with your friends.</p>
|
52 |
-
<h3>A game that can be played offline and with friends</h3>
|
53 |
-
<p>Bible Word Puzzle Apkpure is a game that can be played offline anywhere anytime. You don't need an internet connection to enjoy this game. You can also play this game with your friends by taking screenshots and sharing them on Facebook. You can challenge each other to solve more puzzles and quizzes, or help each other out with hints.</p>
|
54 |
-
<h3>A game that has over 900 levels and challenging Bible quizzes</h3>
|
55 |
-
<p>Bible Word Puzzle Apkpure is a game that has over 900 levels of word games and Bible quizzes. The game starts as an easy word game but gets difficult as you play more levels. You will encounter challenging puzzles and quizzes that test your knowledge of the Bible and your vocabulary skills. You can also earn rewards and coins every day by playing the game.</p>
|
56 |
-
<h2>What are the benefits of playing Bible Word Puzzle Apkpure?</h2>
|
57 |
-
<p>B ible Word Puzzle Apkpure is not only a fun game, but also a beneficial one for Christians. Here are some of the benefits of playing this game:</p>
|
58 |
-
<h3>A game that improves vocabulary and memory skills</h3>
|
59 |
-
<p>Bible Word Puzzle Apkpure is a game that can help you improve your vocabulary and memory skills. By playing this game, you can learn new words and meanings from the Bible, as well as recall the verses that you have learned. You can also enhance your spelling and word recognition skills by connecting letters and finding words. The game also has different levels of difficulty that challenge your brain and keep it sharp.</p>
|
60 |
-
<h3>A game that helps to study the Bible and learn Bible words</h3>
|
61 |
-
<p>Bible Word Puzzle Apkpure is a game that can help you study the Bible and learn Bible words in a fun and interactive way. By playing this game, you can explore different Bible stories and verses, as well as their contexts and meanings. You can also discover the connections between words and verses, and how they relate to each other. The game also has quizzes that test your knowledge of the Bible and help you remember what you have learned.</p>
|
62 |
-
<h3>A game that inspires and encourages Christians in their faith</h3>
|
63 |
-
<p>Bible Word Puzzle Apkpure is a game that can inspire and encourage Christians in their faith. By playing this game, you can experience the beauty and wisdom of the Bible, as well as its messages of hope and love. You can also feel closer to God and His word, and strengthen your relationship with Him. The game also has inspirational illustrations and contents that you can share with your friends and family, and spread the gospel to others.</p>
|
64 |
-
<h2>What are some alternatives to Bible Word Puzzle Apkpure?</h2>
|
65 |
-
<p>If you are looking for some other games that are similar to Bible Word Puzzle Apkpure, you might want to check out these alternatives:</p>
|
66 |
-
<h3>Bible Verse Collect</h3>
|
67 |
-
<p>Bible Verse Collect is another word connect game that features Bible verses and stories. You can collect Bible verses by swiping letters and filling blanks. You can also play mini games such as word search, crossword, jigsaw puzzle, and memory match. You can download this game from <a href="">Google Play Store</a> or <a href="">Apple App Store</a>.</p>
|
68 |
-
<h3>Bible Word Search Puzzle Games</h3>
|
69 |
-
<p>Bible Word Search Puzzle Games is a word search game that has over 1000 levels of Bible-themed puzzles. You can find hidden words related to the Bible in different categories such as books, characters, places, events, etc. You can also learn more about the Bible by reading the trivia facts after each level. You can download this game from <a href="">Google Play Store</a>.</p>
|
70 |
-
<h3>Holyscapes - Bible Word Game</h3>
|
71 |
-
<p>Holyscapes - Bible Word Game is a word puzzle game that has beautiful landscapes inspired by the Bible. You can connect letters to form words and fill in the crossword grid. You can also collect coins and gems to unlock new scenes and themes. You can download this game from <a href="">Google Play Store</a> or <a href="">Apple App Store</a>.</p>
|
72 |
-
<h2>Conclusion</h2>
|
73 |
-
<p>Bible Word Puzzle Apkpure is a fun and educational game for Christians who want to learn more about the Bible. It is a word connect game that teaches you Bible words and verses while you solve puzzles and quizzes. You can download this game from Apkpure.com, a website that offers free and safe Android apps. This game has many features, benefits, and alternatives that make it an enjoyable and worthwhile game for Christians.</p>
|
74 |
-
<h2>FAQs</h2>
|
75 |
-
<p>Here are some frequently asked questions about Bible Word Puzzle Apkpure:</p>
|
76 |
-
<table>
|
77 |
-
<tr><td><strong>Q: How do I download Bible Word Puzzle Apkpure?</strong></td><td><strong>A: You can download this game from <a href="">Apkpure.com</a>, a website that offers free and safe Android apps. You need to have an Android device with Android 4.4 or higher version.</strong></td></tr>
|
78 |
-
<tr><td><strong>Q: How do I play Bible Word Puzzle Apkpure?</strong></td><td><strong>A: You can play this game by connecting letters to build valid words and unlock Bible verses. You can also find illustration fragments in word puzzles to complete Bible stories.</strong></td></tr>
|
79 |
-
<tr><td><strong>Q: What are the rewards for playing Bible Word Puzzle Apkpure?</strong></td><td><strong>A: You can earn rewards and coins every day by playing the game. You can also collect colorful illustrations and interactive contents of Bible stories, and share them with your friends.</strong></td></tr>
|
80 |
-
<tr><td><strong>Q: What are the challenges for playing Bible Word Puzzle Apkpure?</strong></td><td><strong>A: You will encounter challenging puzzles and quizzes that test your knowledge of the Bible and your vocabulary skills. You will also face different levels of difficulty that challenge your brain and keep it sharp.</strong></td></tr>
|
81 |
-
<tr><td><strong>Q: What are the alternatives for playing Bible Word Puzzle Apkpure?</strong></td><td><strong>A: You can try other games that are similar to Bible Word Puzzle Apkpure, such as Bible Verse Collect, Bible Word Search Puzzle Games, and Holyscapes - Bible Word Game.</strong></td></tr>
|
82 |
-
</table></p> 401be4b1e0<br />
|
83 |
-
<br />
|
84 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Music Playlist with One Click - No Ads No Fees.md
DELETED
@@ -1,133 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download Music Playlists and Enjoy Your Favorite Songs Offline</h1>
|
3 |
-
<p>If you love listening to music, you probably have some favorite songs that you always want to have access to. Whether you want to create the perfect mood for a party, a workout, a road trip, or just relax at home, having a music playlist can help you enjoy your favorite tunes without interruptions.</p>
|
4 |
-
<h2>download music playlist</h2><br /><p><b><b>Download</b> ★★★ <a href="https://jinyurl.com/2uNKYc">https://jinyurl.com/2uNKYc</a></b></p><br /><br />
|
5 |
-
<p>But what if you don't have an internet connection or you want to save your data? Or what if you want to listen to your music in the background while doing other things on your phone? In that case, downloading your music playlists can be a great solution.</p>
|
6 |
-
<p>In this article, we will show you how to download music playlists from different platforms and how to manage and play them on your device. By following these simple steps, you will be able to enjoy your favorite songs offline anytime and anywhere.</p>
|
7 |
-
<h2>Introduction</h2>
|
8 |
-
<h3>What is a music playlist and why you should download it</h3>
|
9 |
-
<p>A music playlist is a collection of songs that are grouped together based on a theme, genre, mood, artist, or any other criteria. You can create your own playlists or find existing ones on various music streaming services.</p>
|
10 |
-
<p>How to download music playlist from YouTube<br />
|
11 |
-
Download music playlist for free online<br />
|
12 |
-
Best sites to download music playlist<br />
|
13 |
-
Download music playlist to iPhone<br />
|
14 |
-
Download music playlist to MP3<br />
|
15 |
-
Download music playlist from Spotify<br />
|
16 |
-
Download music playlist for offline listening<br />
|
17 |
-
Download music playlist for workout<br />
|
18 |
-
Download music playlist for road trip<br />
|
19 |
-
Download music playlist for party<br />
|
20 |
-
How to download music playlist from SoundCloud<br />
|
21 |
-
Download music playlist for meditation<br />
|
22 |
-
Download music playlist for sleep<br />
|
23 |
-
Download music playlist for study<br />
|
24 |
-
Download music playlist for relaxation<br />
|
25 |
-
How to download music playlist from Apple Music<br />
|
26 |
-
Download music playlist to Android<br />
|
27 |
-
Download music playlist to computer<br />
|
28 |
-
Best apps to download music playlist<br />
|
29 |
-
Download music playlist for running<br />
|
30 |
-
Download music playlist from Amazon Music<br />
|
31 |
-
Download music playlist for yoga<br />
|
32 |
-
Download music playlist for gaming<br />
|
33 |
-
Download music playlist for wedding<br />
|
34 |
-
How to download music playlist from Deezer<br />
|
35 |
-
Download music playlist to USB<br />
|
36 |
-
Download music playlist to CD<br />
|
37 |
-
Best software to download music playlist<br />
|
38 |
-
Download music playlist for karaoke<br />
|
39 |
-
Download music playlist for kids<br />
|
40 |
-
How to download music playlist from Pandora<br />
|
41 |
-
Download music playlist for Christmas<br />
|
42 |
-
Download music playlist for Halloween<br />
|
43 |
-
Download music playlist for birthday<br />
|
44 |
-
How to download music playlist from Tidal<br />
|
45 |
-
Download music playlist to SD card<br />
|
46 |
-
Download music playlist to iPod<br />
|
47 |
-
Best websites to download music playlist<br />
|
48 |
-
Download music playlist for cooking<br />
|
49 |
-
Download music playlist for shower<br />
|
50 |
-
How to download music playlist from Google Play Music<br />
|
51 |
-
Download music playlist for summer<br />
|
52 |
-
Download music playlist for winter<br />
|
53 |
-
Download music playlist for spring<br />
|
54 |
-
How to download music playlist from Audiomack<br />
|
55 |
-
Download music playlist to Dropbox<br />
|
56 |
-
Download music playlist to iTunes<br />
|
57 |
-
Best tools to download music playlist<br />
|
58 |
-
Download music playlist for rap</p>
|
59 |
-
<p>Downloading your music playlists can have many benefits, such as:</p>
|
60 |
-
<ul>
|
61 |
-
<li>You can listen to your music offline without relying on an internet connection or using your data.</li>
|
62 |
-
<li>You can listen to your music in the background while using other apps on your phone.</li>
|
63 |
-
<li>You can save battery life by avoiding streaming and buffering.</li>
|
64 |
-
<li>You can avoid ads and interruptions that may ruin your listening experience.</li>
|
65 |
-
<li>You can have more control over your music library and customize it according to your preferences.</li>
|
66 |
-
</ul>
|
67 |
-
<h3>How to choose the best music streaming service for your needs</h3>
|
68 |
-
<p>There are many music streaming services available today, each offering different features, prices, and catalogs. Some of the most popular ones are YouTube Music, Spotify, Apple Music, Amazon Music, Deezer, Tidal, and more.</p>
|
69 |
-
<p>To choose the best music streaming service for your needs, you should consider the following factors:</p>
|
70 |
-
<ul>
|
71 |
-
<li>The size and variety of the music catalog. You want a service that has a large and diverse selection of songs, artists, genres, and playlists that suit your taste and mood.</li>
|
72 |
-
<li>The quality and format of the audio. You want a service that offers high-quality audio and supports different formats such as MP3, AAC, FLAC, etc.</li>
|
73 |
-
<li>The availability and compatibility of the service. You want a service that is available in your country and compatible with your device and operating system.</li>
|
74 |
-
<li>The price and features of the subscription. You want a service that offers a reasonable price and features that match your needs and expectations. For example, some services offer offline listening, ad-free playback, background play, family <h2>How to Download Music Playlists from Different Platforms</h2>
|
75 |
-
<h3>How to download music playlists from YouTube Music</h3>
|
76 |
-
<p>YouTube Music is a music streaming service that allows you to access millions of songs, albums, and playlists from YouTube and other sources. You can also create your own playlists and upload your own music to the service.</p>
|
77 |
-
<p>To download music playlists from YouTube Music, you need to have a YouTube Music Premium or YouTube Premium subscription, which costs $9.99 or $11.99 per month respectively. With these subscriptions, you can download up to 100,000 songs and listen to them offline for up to 30 days.</p>
|
78 |
-
<p>Here are the steps to download music playlists from YouTube Music:</p>
|
79 |
-
<h4>Step 1: Get a YouTube Music Premium or YouTube Premium subscription</h4>
|
80 |
-
<p>To get a YouTube Music Premium or YouTube Premium subscription, you need to sign in to your Google account and go to the YouTube Music or YouTube website or app. Then, you need to click on the profile icon and select "Get YouTube Premium" or "Get YouTube Music Premium". You can then choose your payment method and confirm your purchase.</p>
|
81 |
-
<h4>Step 2: Choose the songs, albums, or playlists that you want to download</h4>
|
82 |
-
<p>To choose the songs, albums, or playlists that you want to download, you need to browse or search for them on the YouTube Music website or app. You can also access your own playlists and uploads by clicking on the library icon.</p>
|
83 |
-
<h4>Step 3: Tap the download button and wait for the process to finish</h4>
|
84 |
-
<p>To download the songs, albums, or playlists that you have chosen, you need to tap on the download button that appears next to them. You can also tap on the menu icon and select "Download" from the options. You will see a progress bar that shows how much of the download is completed. Once the download is finished, you will see a checkmark icon that indicates that the songs, albums, or playlists are available offline.</p>
|
85 |
-
<h3>How to download music playlists from Spotify</h3>
|
86 |
-
<p>Spotify is another popular music streaming service that offers over 70 million songs, podcasts, and playlists. You can also create your own playlists and follow other users and artists on the service.</p>
|
87 |
-
<p>To download music playlists from Spotify, you need to have a Spotify Premium subscription, which costs $9.99 per month. With this subscription, you can download up to 10,000 songs and listen to them offline for up to 30 days.</p>
|
88 |
-
<p>Here are the steps to download music playlists from Spotify:</p>
|
89 |
-
<h4>Step 1: Get a Spotify Premium subscription</h4>
|
90 |
-
<p>To get a Spotify Premium subscription, you need to sign up for a Spotify account and go to the Spotify website or app. Then, you need to click on the profile icon and select "Upgrade". You can then choose your payment method and confirm your purchase.</p>
|
91 |
-
<h4>Step 2: Create or find the playlists that you want to download</h4>
|
92 |
-
<p>To create or find the playlists that you want to download, you need to use the search function or browse through the categories on the Spotify website or app. You can also access your own playlists and followings by clicking on the library icon.</p>
|
93 |
-
<h4>Step 3: Toggle the download switch and wait for the process to finish</h4>
|
94 |
-
<p>To download the playlists that you have created or found, you need to toggle the download switch that appears at the top of each playlist. You will see a green arrow icon that shows that the playlist is being downloaded. Once the download is finished, you will see a green checkmark icon that indicates that the playlist is available offline.</p> <h3>Summarize the main points of the article</h3>
|
95 |
-
<p>In this article, we have learned how to download music playlists and enjoy your favorite songs offline. We have covered the following topics:</p>
|
96 |
-
<ul>
|
97 |
-
<li>What is a music playlist and why you should download it.</li>
|
98 |
-
<li>How to choose the best music streaming service for your needs.</li>
|
99 |
-
<li>How to download music playlists from different platforms, such as YouTube Music, Spotify, Apple Music, Amazon Music, Deezer, Tidal, and more.</li>
|
100 |
-
<li>How to manage and play your downloaded music playlists on your device.</li>
|
101 |
-
</ul>
|
102 |
-
<h3>Provide some tips and recommendations for downloading music playlists</h3>
|
103 |
-
<p>Here are some tips and recommendations for downloading music playlists:</p>
|
104 |
-
<ul>
|
105 |
-
<li>Make sure you have enough space on your device before downloading music playlists. You can check your storage settings or use a memory card to expand your capacity.</li>
|
106 |
-
<li>Make sure you have a stable and fast internet connection before downloading music playlists. You can use Wi-Fi or a mobile hotspot to avoid interruptions or errors.</li>
|
107 |
-
<li>Make sure you have a valid and active subscription to the music streaming service that you want to download music playlists from. You can check your subscription status or renew it if necessary.</li>
|
108 |
-
<li>Make sure you download music playlists that you really like and listen to frequently. You can create your own playlists or explore the curated ones on the music streaming service.</li>
|
109 |
-
<li>Make sure you update your downloaded music playlists regularly. You can add new songs, remove old ones, or sync them with the online version.</li>
|
110 |
-
</ul>
|
111 |
-
<h3>Include a call-to-action and invite the readers to share their feedback</h3>
|
112 |
-
<p>We hope you have found this article helpful and informative. Now you know how to download music playlists and enjoy your favorite songs offline. You can use this skill to create the perfect soundtrack for any occasion or mood.</p>
|
113 |
-
<p>If you have any questions, comments, or suggestions, please feel free to share them with us. We would love to hear from you and learn from your experience. You can also share this article with your friends and family who might be interested in downloading music playlists.</p>
|
114 |
-
<p>Thank you for reading and happy listening!</p>
|
115 |
-
<h2>Frequently Asked Questions</h2>
|
116 |
-
<h3>Q: How do I download music playlists from YouTube without YouTube Music Premium or YouTube Premium?</h3>
|
117 |
-
<p>A: There are some third-party apps or websites that claim to allow you to download music playlists from YouTube without YouTube Music Premium or YouTube Premium. However, these methods are not authorized by YouTube and may violate its terms of service or infringe on the rights of the content owners. Therefore, we do not recommend using them and we advise you to respect the law and the creators.</p>
|
118 |
-
<h3>Q: How do I download music playlists from Spotify without Spotify Premium?</h3>
|
119 |
-
<p>A: There is no official way to download music playlists from Spotify without Spotify Premium. However, there are some alternatives that you can try, such as:</p>
|
120 |
-
<ul>
|
121 |
-
<li>Using the free trial of Spotify Premium for 30 days.</li>
|
122 |
-
<li>Using a family plan or a student discount to get Spotify Premium for a lower price.</li>
|
123 |
-
<li>Using a VPN or a proxy to access Spotify Premium in a different country where it is cheaper.</li>
|
124 |
-
</ul>
|
125 |
-
<p>However, these methods may not work for everyone and may have some risks or limitations. Therefore, we do not guarantee their effectiveness and we advise you to be careful and responsible.</p>
|
126 |
-
<h3>Q: How do I transfer my downloaded music playlists from one device to another?</h3>
|
127 |
-
<p>A: To transfer your downloaded music playlists from one device to another, you need to use the same music streaming service and account on both devices. Then, you need to sync your downloads or offline library on both devices. You may also need to connect both devices to the same Wi-Fi network or use a USB cable or Bluetooth connection.</p>
|
128 |
-
<h3>Q: How do I edit my downloaded music playlists?</h3>
|
129 |
-
<p>A: To edit your downloaded music playlists, you need to go to the music streaming app that you used to download them. Then, you need to find the playlist that you want to edit and tap on the menu icon or the edit button. You can then add or remove songs, change the order, rename the playlist, or change the cover image.</p>
|
130 |
-
<h3>Q: How do I share my downloaded music playlists with others?</h3>
|
131 |
-
<p>A: To share your downloaded music playlists with others, you need to go to the music streaming app that you used to download them. Then, you need to find the playlist that you want to share and tap on the menu icon or the share button. You can then choose the method or platform that you want to use to share your playlist, such as email , text message, social media, etc. You can also copy the link or the code of your playlist and paste it wherever you want. However, keep in mind that the people who receive your playlist may not be able to listen to it offline unless they have the same music streaming service and subscription as you.</p> 401be4b1e0<br />
|
132 |
-
<br />
|
133 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_lms_discrete.py
DELETED
@@ -1,257 +0,0 @@
|
|
1 |
-
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
2 |
-
# Copyright 2022 Katherine Crowson and The HuggingFace Team. All rights reserved.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
import warnings
|
16 |
-
from dataclasses import dataclass
|
17 |
-
from typing import List, Optional, Tuple, Union
|
18 |
-
|
19 |
-
import numpy as np
|
20 |
-
import paddle
|
21 |
-
from scipy import integrate
|
22 |
-
|
23 |
-
from ..configuration_utils import ConfigMixin, register_to_config
|
24 |
-
from ..utils import _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS, BaseOutput
|
25 |
-
from .scheduling_utils import SchedulerMixin
|
26 |
-
|
27 |
-
|
28 |
-
@dataclass
|
29 |
-
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->LMSDiscrete
|
30 |
-
class LMSDiscreteSchedulerOutput(BaseOutput):
|
31 |
-
"""
|
32 |
-
Output class for the scheduler's step function output.
|
33 |
-
|
34 |
-
Args:
|
35 |
-
prev_sample (`paddle.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
|
36 |
-
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
|
37 |
-
denoising loop.
|
38 |
-
pred_original_sample (`paddle.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
|
39 |
-
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
|
40 |
-
`pred_original_sample` can be used to preview progress or for guidance.
|
41 |
-
"""
|
42 |
-
|
43 |
-
prev_sample: paddle.Tensor
|
44 |
-
pred_original_sample: Optional[paddle.Tensor] = None
|
45 |
-
|
46 |
-
|
47 |
-
class LMSDiscreteScheduler(SchedulerMixin, ConfigMixin):
|
48 |
-
"""
|
49 |
-
Linear Multistep Scheduler for discrete beta schedules. Based on the original k-diffusion implementation by
|
50 |
-
Katherine Crowson:
|
51 |
-
https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L181
|
52 |
-
|
53 |
-
[`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
|
54 |
-
function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
|
55 |
-
[`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
|
56 |
-
[`~SchedulerMixin.from_pretrained`] functions.
|
57 |
-
|
58 |
-
Args:
|
59 |
-
num_train_timesteps (`int`): number of diffusion steps used to train the model.
|
60 |
-
beta_start (`float`): the starting `beta` value of inference.
|
61 |
-
beta_end (`float`): the final `beta` value.
|
62 |
-
beta_schedule (`str`):
|
63 |
-
the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
|
64 |
-
`linear` or `scaled_linear`.
|
65 |
-
trained_betas (`np.ndarray`, optional):
|
66 |
-
option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
|
67 |
-
prediction_type (`str`, default `epsilon`, optional):
|
68 |
-
prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion
|
69 |
-
process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4
|
70 |
-
https://imagen.research.google/video/paper.pdf)
|
71 |
-
"""
|
72 |
-
|
73 |
-
_compatibles = _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS.copy()
|
74 |
-
order = 1
|
75 |
-
|
76 |
-
@register_to_config
|
77 |
-
def __init__(
|
78 |
-
self,
|
79 |
-
num_train_timesteps: int = 1000,
|
80 |
-
beta_start: float = 0.0001,
|
81 |
-
beta_end: float = 0.02,
|
82 |
-
beta_schedule: str = "linear",
|
83 |
-
trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
|
84 |
-
prediction_type: str = "epsilon",
|
85 |
-
):
|
86 |
-
if trained_betas is not None:
|
87 |
-
self.betas = paddle.to_tensor(trained_betas, dtype="float32")
|
88 |
-
elif beta_schedule == "linear":
|
89 |
-
self.betas = paddle.linspace(beta_start, beta_end, num_train_timesteps, dtype="float32")
|
90 |
-
elif beta_schedule == "scaled_linear":
|
91 |
-
# this schedule is very specific to the latent diffusion model.
|
92 |
-
self.betas = paddle.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype="float32") ** 2
|
93 |
-
else:
|
94 |
-
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
|
95 |
-
|
96 |
-
self.alphas = 1.0 - self.betas
|
97 |
-
self.alphas_cumprod = paddle.cumprod(self.alphas, 0)
|
98 |
-
|
99 |
-
sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
|
100 |
-
sigmas = np.concatenate([sigmas[::-1], [0.0]]).astype(np.float32)
|
101 |
-
self.sigmas = paddle.to_tensor(sigmas)
|
102 |
-
|
103 |
-
# standard deviation of the initial noise distribution
|
104 |
-
self.init_noise_sigma = self.sigmas.max()
|
105 |
-
|
106 |
-
# setable values
|
107 |
-
self.num_inference_steps = None
|
108 |
-
timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=float)[::-1].copy()
|
109 |
-
self.timesteps = paddle.to_tensor(timesteps, dtype="float32")
|
110 |
-
self.derivatives = []
|
111 |
-
self.is_scale_input_called = False
|
112 |
-
|
113 |
-
def scale_model_input(self, sample: paddle.Tensor, timestep: Union[float, paddle.Tensor]) -> paddle.Tensor:
|
114 |
-
"""
|
115 |
-
Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the K-LMS algorithm.
|
116 |
-
|
117 |
-
Args:
|
118 |
-
sample (`paddle.Tensor`): input sample
|
119 |
-
timestep (`float` or `paddle.Tensor`): the current timestep in the diffusion chain
|
120 |
-
|
121 |
-
Returns:
|
122 |
-
`paddle.Tensor`: scaled input sample
|
123 |
-
"""
|
124 |
-
step_index = (self.timesteps == timestep).nonzero().item()
|
125 |
-
sigma = self.sigmas[step_index]
|
126 |
-
sample = sample / ((sigma**2 + 1) ** 0.5)
|
127 |
-
self.is_scale_input_called = True
|
128 |
-
return sample
|
129 |
-
|
130 |
-
def get_lms_coefficient(self, order, t, current_order):
|
131 |
-
"""
|
132 |
-
Compute a linear multistep coefficient.
|
133 |
-
|
134 |
-
Args:
|
135 |
-
order (TODO):
|
136 |
-
t (TODO):
|
137 |
-
current_order (TODO):
|
138 |
-
"""
|
139 |
-
|
140 |
-
def lms_derivative(tau):
|
141 |
-
prod = 1.0
|
142 |
-
for k in range(order):
|
143 |
-
if current_order == k:
|
144 |
-
continue
|
145 |
-
prod *= (tau - self.sigmas[t - k]) / (self.sigmas[t - current_order] - self.sigmas[t - k])
|
146 |
-
return prod
|
147 |
-
|
148 |
-
integrated_coeff = integrate.quad(lms_derivative, self.sigmas[t], self.sigmas[t + 1], epsrel=1e-4)[0]
|
149 |
-
|
150 |
-
return integrated_coeff
|
151 |
-
|
152 |
-
def set_timesteps(self, num_inference_steps: int):
|
153 |
-
"""
|
154 |
-
Sets the timesteps used for the diffusion chain. Supporting function to be run before inference.
|
155 |
-
|
156 |
-
Args:
|
157 |
-
num_inference_steps (`int`):
|
158 |
-
the number of diffusion steps used when generating samples with a pre-trained model.
|
159 |
-
"""
|
160 |
-
self.num_inference_steps = num_inference_steps
|
161 |
-
|
162 |
-
timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps, dtype=float)[::-1].copy()
|
163 |
-
sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
|
164 |
-
sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas)
|
165 |
-
sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32)
|
166 |
-
self.sigmas = paddle.to_tensor(sigmas)
|
167 |
-
self.timesteps = paddle.to_tensor(timesteps, dtype="float32")
|
168 |
-
|
169 |
-
self.derivatives = []
|
170 |
-
|
171 |
-
def step(
|
172 |
-
self,
|
173 |
-
model_output: paddle.Tensor,
|
174 |
-
timestep: Union[float, paddle.Tensor],
|
175 |
-
sample: paddle.Tensor,
|
176 |
-
order: int = 4,
|
177 |
-
return_dict: bool = True,
|
178 |
-
) -> Union[LMSDiscreteSchedulerOutput, Tuple]:
|
179 |
-
"""
|
180 |
-
Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
|
181 |
-
process from the learned model outputs (most often the predicted noise).
|
182 |
-
|
183 |
-
Args:
|
184 |
-
model_output (`paddle.Tensor`): direct output from learned diffusion model.
|
185 |
-
timestep (`float`): current timestep in the diffusion chain.
|
186 |
-
sample (`paddle.Tensor`):
|
187 |
-
current instance of sample being created by diffusion process.
|
188 |
-
order: coefficient for multi-step inference.
|
189 |
-
return_dict (`bool`): option for returning tuple rather than LMSDiscreteSchedulerOutput class
|
190 |
-
|
191 |
-
Returns:
|
192 |
-
[`~schedulers.scheduling_utils.LMSDiscreteSchedulerOutput`] or `tuple`:
|
193 |
-
[`~schedulers.scheduling_utils.LMSDiscreteSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`.
|
194 |
-
When returning a tuple, the first element is the sample tensor.
|
195 |
-
|
196 |
-
"""
|
197 |
-
if not self.is_scale_input_called:
|
198 |
-
warnings.warn(
|
199 |
-
"The `scale_model_input` function should be called before `step` to ensure correct denoising. "
|
200 |
-
"See `StableDiffusionPipeline` for a usage example."
|
201 |
-
)
|
202 |
-
|
203 |
-
step_index = (self.timesteps == timestep).nonzero().item()
|
204 |
-
sigma = self.sigmas[step_index]
|
205 |
-
|
206 |
-
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
|
207 |
-
if self.config.prediction_type == "epsilon":
|
208 |
-
pred_original_sample = sample - sigma * model_output
|
209 |
-
elif self.config.prediction_type == "v_prediction":
|
210 |
-
# * c_out + input * c_skip
|
211 |
-
pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1))
|
212 |
-
else:
|
213 |
-
raise ValueError(
|
214 |
-
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`"
|
215 |
-
)
|
216 |
-
|
217 |
-
# 2. Convert to an ODE derivative
|
218 |
-
derivative = (sample - pred_original_sample) / sigma
|
219 |
-
self.derivatives.append(derivative)
|
220 |
-
if len(self.derivatives) > order:
|
221 |
-
self.derivatives.pop(0)
|
222 |
-
|
223 |
-
# 3. Compute linear multistep coefficients
|
224 |
-
order = min(step_index + 1, order)
|
225 |
-
lms_coeffs = [self.get_lms_coefficient(order, step_index, curr_order) for curr_order in range(order)]
|
226 |
-
|
227 |
-
# 4. Compute previous sample based on the derivatives path
|
228 |
-
prev_sample = sample + sum(
|
229 |
-
coeff * derivative for coeff, derivative in zip(lms_coeffs, reversed(self.derivatives))
|
230 |
-
)
|
231 |
-
|
232 |
-
if not return_dict:
|
233 |
-
return (prev_sample,)
|
234 |
-
|
235 |
-
return LMSDiscreteSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample)
|
236 |
-
|
237 |
-
def add_noise(
|
238 |
-
self,
|
239 |
-
original_samples: paddle.Tensor,
|
240 |
-
noise: paddle.Tensor,
|
241 |
-
timesteps: paddle.Tensor,
|
242 |
-
) -> paddle.Tensor:
|
243 |
-
# Make sure sigmas and timesteps have the same dtype as original_samples
|
244 |
-
sigmas = self.sigmas.cast(original_samples.dtype)
|
245 |
-
schedule_timesteps = self.timesteps
|
246 |
-
|
247 |
-
step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps]
|
248 |
-
|
249 |
-
sigma = sigmas[step_indices].flatten()
|
250 |
-
while len(sigma.shape) < len(original_samples.shape):
|
251 |
-
sigma = sigma.unsqueeze(-1)
|
252 |
-
|
253 |
-
noisy_samples = original_samples + noise * sigma
|
254 |
-
return noisy_samples
|
255 |
-
|
256 |
-
def __len__(self):
|
257 |
-
return self.config.num_train_timesteps
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/4f20/text_generator/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Text Generator
|
3 |
-
emoji: 👀
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.12.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio/wav_evaluation/models/utils.py
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import yaml
|
3 |
-
import sys
|
4 |
-
|
5 |
-
def read_config_as_args(config_path,args=None,is_config_str=False):
|
6 |
-
return_dict = {}
|
7 |
-
|
8 |
-
if config_path is not None:
|
9 |
-
if is_config_str:
|
10 |
-
yml_config = yaml.load(config_path, Loader=yaml.FullLoader)
|
11 |
-
else:
|
12 |
-
with open(config_path, "r") as f:
|
13 |
-
yml_config = yaml.load(f, Loader=yaml.FullLoader)
|
14 |
-
|
15 |
-
if args != None:
|
16 |
-
for k, v in yml_config.items():
|
17 |
-
if k in args.__dict__:
|
18 |
-
args.__dict__[k] = v
|
19 |
-
else:
|
20 |
-
sys.stderr.write("Ignored unknown parameter {} in yaml.\n".format(k))
|
21 |
-
else:
|
22 |
-
for k, v in yml_config.items():
|
23 |
-
return_dict[k] = v
|
24 |
-
|
25 |
-
args = args if args != None else return_dict
|
26 |
-
return argparse.Namespace(**args)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/encoders/open_clap/__init__.py
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
from .factory import list_models, create_model, create_model_and_transforms, add_model_config
|
2 |
-
from .loss import ClipLoss, gather_features, LPLoss, lp_gather_features, LPMetrics
|
3 |
-
from .model import CLAP, CLAPTextCfg, CLAPVisionCfg, CLAPAudioCfp, convert_weights_to_fp16, trace_model
|
4 |
-
from .openai import load_openai_model, list_openai_models
|
5 |
-
from .pretrained import list_pretrained, list_pretrained_tag_models, list_pretrained_model_tags,\
|
6 |
-
get_pretrained_url, download_pretrained
|
7 |
-
from .tokenizer import SimpleTokenizer, tokenize
|
8 |
-
from .transform import image_transform
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIZero2Hero4Health/9-Seq2SeqQAGenerator-GR/qasrl_model_pipeline.py
DELETED
@@ -1,183 +0,0 @@
|
|
1 |
-
from typing import Optional
|
2 |
-
import json
|
3 |
-
from argparse import Namespace
|
4 |
-
from pathlib import Path
|
5 |
-
from transformers import Text2TextGenerationPipeline, AutoModelForSeq2SeqLM, AutoTokenizer
|
6 |
-
|
7 |
-
def get_markers_for_model(is_t5_model: bool) -> Namespace:
|
8 |
-
special_tokens_constants = Namespace()
|
9 |
-
if is_t5_model:
|
10 |
-
# T5 model have 100 special tokens by default
|
11 |
-
special_tokens_constants.separator_input_question_predicate = "<extra_id_1>"
|
12 |
-
special_tokens_constants.separator_output_answers = "<extra_id_3>"
|
13 |
-
special_tokens_constants.separator_output_questions = "<extra_id_5>" # if using only questions
|
14 |
-
special_tokens_constants.separator_output_question_answer = "<extra_id_7>"
|
15 |
-
special_tokens_constants.separator_output_pairs = "<extra_id_9>"
|
16 |
-
special_tokens_constants.predicate_generic_marker = "<extra_id_10>"
|
17 |
-
special_tokens_constants.predicate_verb_marker = "<extra_id_11>"
|
18 |
-
special_tokens_constants.predicate_nominalization_marker = "<extra_id_12>"
|
19 |
-
|
20 |
-
else:
|
21 |
-
special_tokens_constants.separator_input_question_predicate = "<question_predicate_sep>"
|
22 |
-
special_tokens_constants.separator_output_answers = "<answers_sep>"
|
23 |
-
special_tokens_constants.separator_output_questions = "<question_sep>" # if using only questions
|
24 |
-
special_tokens_constants.separator_output_question_answer = "<question_answer_sep>"
|
25 |
-
special_tokens_constants.separator_output_pairs = "<qa_pairs_sep>"
|
26 |
-
special_tokens_constants.predicate_generic_marker = "<predicate_marker>"
|
27 |
-
special_tokens_constants.predicate_verb_marker = "<verbal_predicate_marker>"
|
28 |
-
special_tokens_constants.predicate_nominalization_marker = "<nominalization_predicate_marker>"
|
29 |
-
return special_tokens_constants
|
30 |
-
|
31 |
-
def load_trained_model(name_or_path):
|
32 |
-
import huggingface_hub as HFhub
|
33 |
-
tokenizer = AutoTokenizer.from_pretrained(name_or_path)
|
34 |
-
model = AutoModelForSeq2SeqLM.from_pretrained(name_or_path)
|
35 |
-
# load preprocessing_kwargs from the model repo on HF hub, or from the local model directory
|
36 |
-
kwargs_filename = None
|
37 |
-
if name_or_path.startswith("kleinay/"): # and 'preprocessing_kwargs.json' in HFhub.list_repo_files(name_or_path): # the supported version of HFhub doesn't support list_repo_files
|
38 |
-
kwargs_filename = HFhub.hf_hub_download(repo_id=name_or_path, filename="preprocessing_kwargs.json")
|
39 |
-
elif Path(name_or_path).is_dir() and (Path(name_or_path) / "experiment_kwargs.json").exists():
|
40 |
-
kwargs_filename = Path(name_or_path) / "experiment_kwargs.json"
|
41 |
-
|
42 |
-
if kwargs_filename:
|
43 |
-
preprocessing_kwargs = json.load(open(kwargs_filename))
|
44 |
-
# integrate into model.config (for decoding args, e.g. "num_beams"), and save also as standalone object for preprocessing
|
45 |
-
model.config.preprocessing_kwargs = Namespace(**preprocessing_kwargs)
|
46 |
-
model.config.update(preprocessing_kwargs)
|
47 |
-
return model, tokenizer
|
48 |
-
|
49 |
-
|
50 |
-
class QASRL_Pipeline(Text2TextGenerationPipeline):
|
51 |
-
def __init__(self, model_repo: str, **kwargs):
|
52 |
-
model, tokenizer = load_trained_model(model_repo)
|
53 |
-
super().__init__(model, tokenizer, framework="pt")
|
54 |
-
self.is_t5_model = "t5" in model.config.model_type
|
55 |
-
self.special_tokens = get_markers_for_model(self.is_t5_model)
|
56 |
-
self.data_args = model.config.preprocessing_kwargs
|
57 |
-
# backward compatibility - default keyword values implemeted in `run_summarization`, thus not saved in `preprocessing_kwargs`
|
58 |
-
if "predicate_marker_type" not in vars(self.data_args):
|
59 |
-
self.data_args.predicate_marker_type = "generic"
|
60 |
-
if "use_bilateral_predicate_marker" not in vars(self.data_args):
|
61 |
-
self.data_args.use_bilateral_predicate_marker = True
|
62 |
-
if "append_verb_form" not in vars(self.data_args):
|
63 |
-
self.data_args.append_verb_form = True
|
64 |
-
self._update_config(**kwargs)
|
65 |
-
|
66 |
-
def _update_config(self, **kwargs):
|
67 |
-
" Update self.model.config with initialization parameters and necessary defaults. "
|
68 |
-
# set default values that will always override model.config, but can overriden by __init__ kwargs
|
69 |
-
kwargs["max_length"] = kwargs.get("max_length", 80)
|
70 |
-
# override model.config with kwargs
|
71 |
-
for k,v in kwargs.items():
|
72 |
-
self.model.config.__dict__[k] = v
|
73 |
-
|
74 |
-
def _sanitize_parameters(self, **kwargs):
|
75 |
-
preprocess_kwargs, forward_kwargs, postprocess_kwargs = {}, {}, {}
|
76 |
-
if "predicate_marker" in kwargs:
|
77 |
-
preprocess_kwargs["predicate_marker"] = kwargs["predicate_marker"]
|
78 |
-
if "predicate_type" in kwargs:
|
79 |
-
preprocess_kwargs["predicate_type"] = kwargs["predicate_type"]
|
80 |
-
if "verb_form" in kwargs:
|
81 |
-
preprocess_kwargs["verb_form"] = kwargs["verb_form"]
|
82 |
-
return preprocess_kwargs, forward_kwargs, postprocess_kwargs
|
83 |
-
|
84 |
-
def preprocess(self, inputs, predicate_marker="<predicate>", predicate_type=None, verb_form=None):
|
85 |
-
# Here, inputs is string or list of strings; apply string postprocessing
|
86 |
-
if isinstance(inputs, str):
|
87 |
-
processed_inputs = self._preprocess_string(inputs, predicate_marker, predicate_type, verb_form)
|
88 |
-
elif hasattr(inputs, "__iter__"):
|
89 |
-
processed_inputs = [self._preprocess_string(s, predicate_marker, predicate_type, verb_form) for s in inputs]
|
90 |
-
else:
|
91 |
-
raise ValueError("inputs must be str or Iterable[str]")
|
92 |
-
# Now pass to super.preprocess for tokenization
|
93 |
-
return super().preprocess(processed_inputs)
|
94 |
-
|
95 |
-
def _preprocess_string(self, seq: str, predicate_marker: str, predicate_type: Optional[str], verb_form: Optional[str]) -> str:
|
96 |
-
sent_tokens = seq.split(" ")
|
97 |
-
assert predicate_marker in sent_tokens, f"Input sentence must include a predicate-marker token ('{predicate_marker}') before the target predicate word"
|
98 |
-
predicate_idx = sent_tokens.index(predicate_marker)
|
99 |
-
sent_tokens.remove(predicate_marker)
|
100 |
-
sentence_before_predicate = " ".join([sent_tokens[i] for i in range(predicate_idx)])
|
101 |
-
predicate = sent_tokens[predicate_idx]
|
102 |
-
sentence_after_predicate = " ".join([sent_tokens[i] for i in range(predicate_idx+1, len(sent_tokens))])
|
103 |
-
|
104 |
-
if self.data_args.predicate_marker_type == "generic":
|
105 |
-
predicate_marker = self.special_tokens.predicate_generic_marker
|
106 |
-
# In case we want special marker for each predicate type: """
|
107 |
-
elif self.data_args.predicate_marker_type == "pred_type":
|
108 |
-
assert predicate_type is not None, "For this model, you must provide the `predicate_type` either when initializing QASRL_Pipeline(...) or when applying __call__(...) on it"
|
109 |
-
assert predicate_type in ("verbal", "nominal"), f"`predicate_type` must be either 'verbal' or 'nominal'; got '{predicate_type}'"
|
110 |
-
predicate_marker = {"verbal": self.special_tokens.predicate_verb_marker ,
|
111 |
-
"nominal": self.special_tokens.predicate_nominalization_marker
|
112 |
-
}[predicate_type]
|
113 |
-
|
114 |
-
if self.data_args.use_bilateral_predicate_marker:
|
115 |
-
seq = f"{sentence_before_predicate} {predicate_marker} {predicate} {predicate_marker} {sentence_after_predicate}"
|
116 |
-
else:
|
117 |
-
seq = f"{sentence_before_predicate} {predicate_marker} {predicate} {sentence_after_predicate}"
|
118 |
-
|
119 |
-
# embed also verb_form
|
120 |
-
if self.data_args.append_verb_form and verb_form is None:
|
121 |
-
raise ValueError(f"For this model, you must provide the `verb_form` of the predicate when applying __call__(...)")
|
122 |
-
elif self.data_args.append_verb_form:
|
123 |
-
seq = f"{seq} {self.special_tokens.separator_input_question_predicate} {verb_form} "
|
124 |
-
else:
|
125 |
-
seq = f"{seq} "
|
126 |
-
|
127 |
-
# append source prefix (for t5 models)
|
128 |
-
prefix = self._get_source_prefix(predicate_type)
|
129 |
-
|
130 |
-
return prefix + seq
|
131 |
-
|
132 |
-
def _get_source_prefix(self, predicate_type: Optional[str]):
|
133 |
-
if not self.is_t5_model or self.data_args.source_prefix is None:
|
134 |
-
return ''
|
135 |
-
if not self.data_args.source_prefix.startswith("<"): # Regular prefix - not dependent on input row x
|
136 |
-
return self.data_args.source_prefix
|
137 |
-
if self.data_args.source_prefix == "<predicate-type>":
|
138 |
-
if predicate_type is None:
|
139 |
-
raise ValueError("source_prefix is '<predicate-type>' but input no `predicate_type`.")
|
140 |
-
else:
|
141 |
-
return f"Generate QAs for {predicate_type} QASRL: "
|
142 |
-
|
143 |
-
def _forward(self, *args, **kwargs):
|
144 |
-
outputs = super()._forward(*args, **kwargs)
|
145 |
-
return outputs
|
146 |
-
|
147 |
-
|
148 |
-
def postprocess(self, model_outputs):
|
149 |
-
output_seq = self.tokenizer.decode(
|
150 |
-
model_outputs["output_ids"].squeeze(),
|
151 |
-
skip_special_tokens=False,
|
152 |
-
clean_up_tokenization_spaces=False,
|
153 |
-
)
|
154 |
-
output_seq = output_seq.strip(self.tokenizer.pad_token).strip(self.tokenizer.eos_token).strip()
|
155 |
-
qa_subseqs = output_seq.split(self.special_tokens.separator_output_pairs)
|
156 |
-
qas = [self._postrocess_qa(qa_subseq) for qa_subseq in qa_subseqs]
|
157 |
-
return {"generated_text": output_seq,
|
158 |
-
"QAs": qas}
|
159 |
-
|
160 |
-
def _postrocess_qa(self, seq: str) -> str:
|
161 |
-
# split question and answers
|
162 |
-
if self.special_tokens.separator_output_question_answer in seq:
|
163 |
-
question, answer = seq.split(self.special_tokens.separator_output_question_answer)[:2]
|
164 |
-
else:
|
165 |
-
print("invalid format: no separator between question and answer found...")
|
166 |
-
return None
|
167 |
-
# question, answer = seq, '' # Or: backoff to only question
|
168 |
-
# skip "_" slots in questions
|
169 |
-
question = ' '.join(t for t in question.split(' ') if t != '_')
|
170 |
-
answers = [a.strip() for a in answer.split(self.special_tokens.separator_output_answers)]
|
171 |
-
return {"question": question, "answers": answers}
|
172 |
-
|
173 |
-
|
174 |
-
if __name__ == "__main__":
|
175 |
-
pipe = QASRL_Pipeline("kleinay/qanom-seq2seq-model-baseline")
|
176 |
-
res1 = pipe("The student was interested in Luke 's <predicate> research about sea animals .", verb_form="research", predicate_type="nominal")
|
177 |
-
res2 = pipe(["The doctor was interested in Luke 's <predicate> treatment .",
|
178 |
-
"The Veterinary student was interested in Luke 's <predicate> treatment of sea animals ."], verb_form="treat", predicate_type="nominal", num_beams=10)
|
179 |
-
res3 = pipe("A number of professions have <predicate> developed that specialize in the treatment of mental disorders .", verb_form="develop", predicate_type="verbal")
|
180 |
-
print(res1)
|
181 |
-
print(res2)
|
182 |
-
print(res3)
|
183 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/models/encodec.py
DELETED
@@ -1,302 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
from abc import ABC, abstractmethod
|
8 |
-
import typing as tp
|
9 |
-
|
10 |
-
from einops import rearrange
|
11 |
-
import torch
|
12 |
-
from torch import nn
|
13 |
-
|
14 |
-
from .. import quantization as qt
|
15 |
-
|
16 |
-
|
17 |
-
class CompressionModel(ABC, nn.Module):
|
18 |
-
|
19 |
-
@abstractmethod
|
20 |
-
def forward(self, x: torch.Tensor) -> qt.QuantizedResult:
|
21 |
-
...
|
22 |
-
|
23 |
-
@abstractmethod
|
24 |
-
def encode(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
|
25 |
-
"""See `EncodecModel.encode`"""
|
26 |
-
...
|
27 |
-
|
28 |
-
@abstractmethod
|
29 |
-
def decode(self, codes: torch.Tensor, scale: tp.Optional[torch.Tensor] = None):
|
30 |
-
"""See `EncodecModel.decode`"""
|
31 |
-
...
|
32 |
-
|
33 |
-
@property
|
34 |
-
@abstractmethod
|
35 |
-
def channels(self) -> int:
|
36 |
-
...
|
37 |
-
|
38 |
-
@property
|
39 |
-
@abstractmethod
|
40 |
-
def frame_rate(self) -> int:
|
41 |
-
...
|
42 |
-
|
43 |
-
@property
|
44 |
-
@abstractmethod
|
45 |
-
def sample_rate(self) -> int:
|
46 |
-
...
|
47 |
-
|
48 |
-
@property
|
49 |
-
@abstractmethod
|
50 |
-
def cardinality(self) -> int:
|
51 |
-
...
|
52 |
-
|
53 |
-
@property
|
54 |
-
@abstractmethod
|
55 |
-
def num_codebooks(self) -> int:
|
56 |
-
...
|
57 |
-
|
58 |
-
@property
|
59 |
-
@abstractmethod
|
60 |
-
def total_codebooks(self) -> int:
|
61 |
-
...
|
62 |
-
|
63 |
-
@abstractmethod
|
64 |
-
def set_num_codebooks(self, n: int):
|
65 |
-
"""Set the active number of codebooks used by the quantizer.
|
66 |
-
"""
|
67 |
-
...
|
68 |
-
|
69 |
-
|
70 |
-
class EncodecModel(CompressionModel):
|
71 |
-
"""Encodec model operating on the raw waveform.
|
72 |
-
|
73 |
-
Args:
|
74 |
-
encoder (nn.Module): Encoder network.
|
75 |
-
decoder (nn.Module): Decoder network.
|
76 |
-
quantizer (qt.BaseQuantizer): Quantizer network.
|
77 |
-
frame_rate (int): Frame rate for the latent representation.
|
78 |
-
sample_rate (int): Audio sample rate.
|
79 |
-
channels (int): Number of audio channels.
|
80 |
-
causal (bool): Whether to use a causal version of the model.
|
81 |
-
renormalize (bool): Whether to renormalize the audio before running the model.
|
82 |
-
"""
|
83 |
-
# we need assignement to override the property in the abstract class,
|
84 |
-
# I couldn't find a better way...
|
85 |
-
frame_rate: int = 0
|
86 |
-
sample_rate: int = 0
|
87 |
-
channels: int = 0
|
88 |
-
|
89 |
-
def __init__(self,
|
90 |
-
encoder: nn.Module,
|
91 |
-
decoder: nn.Module,
|
92 |
-
quantizer: qt.BaseQuantizer,
|
93 |
-
frame_rate: int,
|
94 |
-
sample_rate: int,
|
95 |
-
channels: int,
|
96 |
-
causal: bool = False,
|
97 |
-
renormalize: bool = False):
|
98 |
-
super().__init__()
|
99 |
-
self.encoder = encoder
|
100 |
-
self.decoder = decoder
|
101 |
-
self.quantizer = quantizer
|
102 |
-
self.frame_rate = frame_rate
|
103 |
-
self.sample_rate = sample_rate
|
104 |
-
self.channels = channels
|
105 |
-
self.renormalize = renormalize
|
106 |
-
self.causal = causal
|
107 |
-
if self.causal:
|
108 |
-
# we force disabling here to avoid handling linear overlap of segments
|
109 |
-
# as supported in original EnCodec codebase.
|
110 |
-
assert not self.renormalize, 'Causal model does not support renormalize'
|
111 |
-
|
112 |
-
@property
|
113 |
-
def total_codebooks(self):
|
114 |
-
"""Total number of quantizer codebooks available.
|
115 |
-
"""
|
116 |
-
return self.quantizer.total_codebooks
|
117 |
-
|
118 |
-
@property
|
119 |
-
def num_codebooks(self):
|
120 |
-
"""Active number of codebooks used by the quantizer.
|
121 |
-
"""
|
122 |
-
return self.quantizer.num_codebooks
|
123 |
-
|
124 |
-
def set_num_codebooks(self, n: int):
|
125 |
-
"""Set the active number of codebooks used by the quantizer.
|
126 |
-
"""
|
127 |
-
self.quantizer.set_num_codebooks(n)
|
128 |
-
|
129 |
-
@property
|
130 |
-
def cardinality(self):
|
131 |
-
"""Cardinality of each codebook.
|
132 |
-
"""
|
133 |
-
return self.quantizer.bins
|
134 |
-
|
135 |
-
def preprocess(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
|
136 |
-
scale: tp.Optional[torch.Tensor]
|
137 |
-
if self.renormalize:
|
138 |
-
mono = x.mean(dim=1, keepdim=True)
|
139 |
-
volume = mono.pow(2).mean(dim=2, keepdim=True).sqrt()
|
140 |
-
scale = 1e-8 + volume
|
141 |
-
x = x / scale
|
142 |
-
scale = scale.view(-1, 1)
|
143 |
-
else:
|
144 |
-
scale = None
|
145 |
-
return x, scale
|
146 |
-
|
147 |
-
def postprocess(self,
|
148 |
-
x: torch.Tensor,
|
149 |
-
scale: tp.Optional[torch.Tensor] = None) -> torch.Tensor:
|
150 |
-
if scale is not None:
|
151 |
-
assert self.renormalize
|
152 |
-
x = x * scale.view(-1, 1, 1)
|
153 |
-
return x
|
154 |
-
|
155 |
-
def forward(self, x: torch.Tensor) -> qt.QuantizedResult:
|
156 |
-
assert x.dim() == 3
|
157 |
-
length = x.shape[-1]
|
158 |
-
x, scale = self.preprocess(x)
|
159 |
-
|
160 |
-
emb = self.encoder(x)
|
161 |
-
q_res = self.quantizer(emb, self.frame_rate)
|
162 |
-
out = self.decoder(q_res.x)
|
163 |
-
|
164 |
-
# remove extra padding added by the encoder and decoder
|
165 |
-
assert out.shape[-1] >= length, (out.shape[-1], length)
|
166 |
-
out = out[..., :length]
|
167 |
-
|
168 |
-
q_res.x = self.postprocess(out, scale)
|
169 |
-
|
170 |
-
return q_res
|
171 |
-
|
172 |
-
def encode(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
|
173 |
-
"""Encode the given input tensor to quantized representation along with scale parameter.
|
174 |
-
|
175 |
-
Args:
|
176 |
-
x (torch.Tensor): Float tensor of shape [B, C, T]
|
177 |
-
|
178 |
-
Returns:
|
179 |
-
codes, scale (tp.Tuple[torch.Tensor, torch.Tensor]): Tuple composed of:
|
180 |
-
codes a float tensor of shape [B, K, T] with K the number of codebooks used and T the timestep.
|
181 |
-
scale a float tensor containing the scale for audio renormalizealization.
|
182 |
-
"""
|
183 |
-
assert x.dim() == 3
|
184 |
-
x, scale = self.preprocess(x)
|
185 |
-
emb = self.encoder(x)
|
186 |
-
codes = self.quantizer.encode(emb)
|
187 |
-
return codes, scale
|
188 |
-
|
189 |
-
def decode(self, codes: torch.Tensor, scale: tp.Optional[torch.Tensor] = None):
|
190 |
-
"""Decode the given codes to a reconstructed representation, using the scale to perform
|
191 |
-
audio denormalization if needed.
|
192 |
-
|
193 |
-
Args:
|
194 |
-
codes (torch.Tensor): Int tensor of shape [B, K, T]
|
195 |
-
scale (tp.Optional[torch.Tensor]): Float tensor containing the scale value.
|
196 |
-
|
197 |
-
Returns:
|
198 |
-
out (torch.Tensor): Float tensor of shape [B, C, T], the reconstructed audio.
|
199 |
-
"""
|
200 |
-
emb = self.quantizer.decode(codes)
|
201 |
-
out = self.decoder(emb)
|
202 |
-
out = self.postprocess(out, scale)
|
203 |
-
# out contains extra padding added by the encoder and decoder
|
204 |
-
return out
|
205 |
-
|
206 |
-
|
207 |
-
class FlattenedCompressionModel(CompressionModel):
|
208 |
-
"""Wraps a CompressionModel and flatten its codebooks, e.g.
|
209 |
-
instead of returning [B, K, T], return [B, S, T * (K // S)] with
|
210 |
-
S the number of codebooks per step, and `K // S` the number of 'virtual steps'
|
211 |
-
for each real time step.
|
212 |
-
|
213 |
-
Args:
|
214 |
-
model (CompressionModel): compression model to wrap.
|
215 |
-
codebooks_per_step (int): number of codebooks to keep per step,
|
216 |
-
this must divide the number of codebooks provided by the wrapped model.
|
217 |
-
extend_cardinality (bool): if True, and for instance if codebooks_per_step = 1,
|
218 |
-
if each codebook has a cardinality N, then the first codebook will
|
219 |
-
use the range [0, N - 1], and the second [N, 2 N - 1] etc.
|
220 |
-
On decoding, this can lead to potentially invalid sequences.
|
221 |
-
Any invalid entry will be silently remapped to the proper range
|
222 |
-
with a modulo.
|
223 |
-
"""
|
224 |
-
def __init__(self, model: CompressionModel, codebooks_per_step: int = 1,
|
225 |
-
extend_cardinality: bool = True):
|
226 |
-
super().__init__()
|
227 |
-
self.model = model
|
228 |
-
self.codebooks_per_step = codebooks_per_step
|
229 |
-
self.extend_cardinality = extend_cardinality
|
230 |
-
|
231 |
-
@property
|
232 |
-
def total_codebooks(self):
|
233 |
-
return self.model.total_codebooks
|
234 |
-
|
235 |
-
@property
|
236 |
-
def num_codebooks(self):
|
237 |
-
"""Active number of codebooks used by the quantizer.
|
238 |
-
|
239 |
-
..Warning:: this reports the number of codebooks after the flattening
|
240 |
-
of the codebooks!
|
241 |
-
"""
|
242 |
-
assert self.model.num_codebooks % self.codebooks_per_step == 0
|
243 |
-
return self.codebooks_per_step
|
244 |
-
|
245 |
-
def set_num_codebooks(self, n: int):
|
246 |
-
"""Set the active number of codebooks used by the quantizer.
|
247 |
-
|
248 |
-
..Warning:: this sets the number of codebooks **before** the flattening
|
249 |
-
of the codebooks.
|
250 |
-
"""
|
251 |
-
assert n % self.codebooks_per_step == 0
|
252 |
-
self.model.set_num_codebooks(n)
|
253 |
-
|
254 |
-
@property
|
255 |
-
def num_virtual_steps(self) -> int:
|
256 |
-
"""Return the number of virtual steps, e.g. one real step
|
257 |
-
will be split into that many steps.
|
258 |
-
"""
|
259 |
-
return self.model.num_codebooks // self.codebooks_per_step
|
260 |
-
|
261 |
-
@property
|
262 |
-
def frame_rate(self) -> int:
|
263 |
-
return self.model.frame_rate * self.num_virtual_steps
|
264 |
-
|
265 |
-
@property
|
266 |
-
def sample_rate(self) -> int:
|
267 |
-
return self.model.sample_rate
|
268 |
-
|
269 |
-
@property
|
270 |
-
def channels(self) -> int:
|
271 |
-
return self.model.channels
|
272 |
-
|
273 |
-
@property
|
274 |
-
def cardinality(self):
|
275 |
-
"""Cardinality of each codebook.
|
276 |
-
"""
|
277 |
-
if self.extend_cardinality:
|
278 |
-
return self.model.cardinality * self.num_virtual_steps
|
279 |
-
else:
|
280 |
-
return self.model.cardinality
|
281 |
-
|
282 |
-
def forward(self, x: torch.Tensor) -> qt.QuantizedResult:
|
283 |
-
raise NotImplementedError("Not supported, use encode and decode.")
|
284 |
-
|
285 |
-
def encode(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
|
286 |
-
indices, scales = self.model.encode(x)
|
287 |
-
B, K, T = indices.shape
|
288 |
-
indices = rearrange(indices, 'b (k v) t -> b k t v', k=self.codebooks_per_step)
|
289 |
-
if self.extend_cardinality:
|
290 |
-
for virtual_step in range(1, self.num_virtual_steps):
|
291 |
-
indices[..., virtual_step] += self.model.cardinality * virtual_step
|
292 |
-
indices = rearrange(indices, 'b k t v -> b k (t v)')
|
293 |
-
return (indices, scales)
|
294 |
-
|
295 |
-
def decode(self, codes: torch.Tensor, scale: tp.Optional[torch.Tensor] = None):
|
296 |
-
B, K, T = codes.shape
|
297 |
-
assert T % self.num_virtual_steps == 0
|
298 |
-
codes = rearrange(codes, 'b k (t v) -> b (k v) t', v=self.num_virtual_steps)
|
299 |
-
# We silently ignore potential errors from the LM when
|
300 |
-
# using extend_cardinality.
|
301 |
-
codes = codes % self.model.cardinality
|
302 |
-
return self.model.decode(codes, scale)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/typing.py
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
from typing import Any, AsyncGenerator, Generator, NewType, Tuple, Union, List, Dict
|
3 |
-
|
4 |
-
if sys.version_info >= (3, 8):
|
5 |
-
from typing import TypedDict
|
6 |
-
else:
|
7 |
-
from typing_extensions import TypedDict
|
8 |
-
|
9 |
-
SHA256 = NewType('sha_256_hash', str)
|
10 |
-
CreateResult = Generator[str, None, None]
|
11 |
-
AsyncResult = AsyncGenerator[str, None]
|
12 |
-
Messages = List[Dict[str, str]]
|
13 |
-
|
14 |
-
__all__ = [
|
15 |
-
'Any',
|
16 |
-
'AsyncGenerator',
|
17 |
-
'Generator',
|
18 |
-
'Tuple',
|
19 |
-
'TypedDict',
|
20 |
-
'SHA256',
|
21 |
-
'CreateResult',
|
22 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/buttons/Buttons.js
DELETED
@@ -1,88 +0,0 @@
|
|
1 |
-
import Sizer from '../sizer/Sizer.js';
|
2 |
-
import AddChildMethods from './AddChildMethods.js';
|
3 |
-
import RemoveChildMethods from './RemoveChildMethods.js';
|
4 |
-
import ButtonGroup from '../utils/buttongroup/ButtonGroup.js';
|
5 |
-
import ButtonMethods from '../utils/buttongroup/ButtonMethods.js';
|
6 |
-
import ButtonStateMethods from '../utils/buttongroup/ButtonStateMethods.js';
|
7 |
-
|
8 |
-
const GetValue = Phaser.Utils.Objects.GetValue;
|
9 |
-
|
10 |
-
class Buttons extends Sizer {
|
11 |
-
constructor(scene, config) {
|
12 |
-
if (config === undefined) {
|
13 |
-
config = {};
|
14 |
-
}
|
15 |
-
|
16 |
-
var buttonSpace = config.space;
|
17 |
-
if (typeof (buttonSpace) === 'number') {
|
18 |
-
config.space = { item: buttonSpace };
|
19 |
-
}
|
20 |
-
|
21 |
-
// Create
|
22 |
-
super(scene, config);
|
23 |
-
this.type = 'rexButtons';
|
24 |
-
this.buttonGroup = new ButtonGroup({
|
25 |
-
parent: this,
|
26 |
-
eventEmitter: GetValue(config, 'eventEmitter', this),
|
27 |
-
groupName: GetValue(config, 'groupName', undefined),
|
28 |
-
clickConfig: GetValue(config, 'click', undefined)
|
29 |
-
})
|
30 |
-
.setButtonsType(config)
|
31 |
-
|
32 |
-
// Add elements
|
33 |
-
var background = GetValue(config, 'background', undefined);
|
34 |
-
var buttons = GetValue(config, 'buttons', undefined);
|
35 |
-
|
36 |
-
// Buttons properties
|
37 |
-
this.buttonsExpand = GetValue(config, 'expand', false);
|
38 |
-
this.buttonsAlign = GetValue(config, 'align', undefined); // undefined/left/top: no space
|
39 |
-
|
40 |
-
if (background) {
|
41 |
-
this.addBackground(background);
|
42 |
-
}
|
43 |
-
|
44 |
-
if (buttons) {
|
45 |
-
this.addButtons(buttons);
|
46 |
-
}
|
47 |
-
|
48 |
-
this.addChildrenMap('background', background);
|
49 |
-
this.addChildrenMap('buttons', this.buttonGroup.buttons);
|
50 |
-
}
|
51 |
-
|
52 |
-
destroy(fromScene) {
|
53 |
-
// This Game Object has already been destroyed
|
54 |
-
if (!this.scene || this.ignoreDestroy) {
|
55 |
-
return;
|
56 |
-
}
|
57 |
-
|
58 |
-
super.destroy(fromScene);
|
59 |
-
this.buttonGroup.destroy();
|
60 |
-
this.buttonGroup = undefined;
|
61 |
-
}
|
62 |
-
|
63 |
-
get buttons() {
|
64 |
-
return this.buttonGroup.buttons;
|
65 |
-
}
|
66 |
-
|
67 |
-
get groupName() {
|
68 |
-
return this.buttonGroup.groupName;
|
69 |
-
}
|
70 |
-
|
71 |
-
set groupName(value) {
|
72 |
-
this.buttonGroup.groupName = value;
|
73 |
-
}
|
74 |
-
|
75 |
-
get eventEmitter() {
|
76 |
-
return this.buttonGroup.eventEmitter;
|
77 |
-
}
|
78 |
-
}
|
79 |
-
|
80 |
-
Object.assign(
|
81 |
-
Buttons.prototype,
|
82 |
-
AddChildMethods,
|
83 |
-
RemoveChildMethods,
|
84 |
-
ButtonMethods,
|
85 |
-
ButtonStateMethods
|
86 |
-
);
|
87 |
-
|
88 |
-
export default Buttons;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/folder/methods/ChildTransition.js
DELETED
@@ -1,24 +0,0 @@
|
|
1 |
-
import OpenCloseTransition from '../../../../plugins/behaviors/openclosetransition/OpenCloseTransition.js';
|
2 |
-
|
3 |
-
class Transition extends OpenCloseTransition {
|
4 |
-
constructor(gameObject, config) {
|
5 |
-
if (config === undefined) {
|
6 |
-
config = {};
|
7 |
-
}
|
8 |
-
config.destroy = false;
|
9 |
-
super(gameObject, config);
|
10 |
-
}
|
11 |
-
|
12 |
-
onOpen() {
|
13 |
-
this.emit('open', this.parent, this);
|
14 |
-
super.onOpen();
|
15 |
-
}
|
16 |
-
|
17 |
-
onClose() {
|
18 |
-
this.emit('close', this.parent, this);
|
19 |
-
super.onClose();
|
20 |
-
}
|
21 |
-
|
22 |
-
}
|
23 |
-
|
24 |
-
export default Transition;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aluxes/anime-remove-background/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Anime Remove Background
|
3 |
-
emoji: 🪄🖼️
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.1.4
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
duplicated_from: skytnt/anime-remove-background
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/pti_models/e4e/stylegan2/op/__init__.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
from .fused_act import FusedLeakyReLU, fused_leaky_relu
|
2 |
-
from .upfirdn2d import upfirdn2d
|
|
|
|
|
|
spaces/Amrrs/textsummarizer/app.py
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import transformers
|
3 |
-
from transformers import BartTokenizer, BartForConditionalGeneration
|
4 |
-
|
5 |
-
model_name = 'facebook/bart-large-cnn'
|
6 |
-
tokenizer = BartTokenizer.from_pretrained(model_name)
|
7 |
-
model = BartForConditionalGeneration.from_pretrained(model_name)
|
8 |
-
|
9 |
-
def summarize(inp):
|
10 |
-
inp = inp.replace('\n','')
|
11 |
-
inp = tokenizer.encode(inp, return_tensors='pt', max_length=1024)
|
12 |
-
summary_ids = model.generate(inp, num_beams=4, max_length=150, early_stopping=True)
|
13 |
-
summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
|
14 |
-
return summary
|
15 |
-
|
16 |
-
gr.Interface(fn=summarize, inputs=gr.inputs.Textbox(lines=7, label="Input Text"), outputs="text").launch(inline=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/vq_diffusion.md
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# VQ Diffusion
|
14 |
-
|
15 |
-
[Vector Quantized Diffusion Model for Text-to-Image Synthesis](https://huggingface.co/papers/2111.14822) is by Shuyang Gu, Dong Chen, Jianmin Bao, Fang Wen, Bo Zhang, Dongdong Chen, Lu Yuan, Baining Guo.
|
16 |
-
|
17 |
-
The abstract from the paper is:
|
18 |
-
|
19 |
-
*We present the vector quantized diffusion (VQ-Diffusion) model for text-to-image generation. This method is based on a vector quantized variational autoencoder (VQ-VAE) whose latent space is modeled by a conditional variant of the recently developed Denoising Diffusion Probabilistic Model (DDPM). We find that this latent-space method is well-suited for text-to-image generation tasks because it not only eliminates the unidirectional bias with existing methods but also allows us to incorporate a mask-and-replace diffusion strategy to avoid the accumulation of errors, which is a serious problem with existing methods. Our experiments show that the VQ-Diffusion produces significantly better text-to-image generation results when compared with conventional autoregressive (AR) models with similar numbers of parameters. Compared with previous GAN-based text-to-image methods, our VQ-Diffusion can handle more complex scenes and improve the synthesized image quality by a large margin. Finally, we show that the image generation computation in our method can be made highly efficient by reparameterization. With traditional AR methods, the text-to-image generation time increases linearly with the output image resolution and hence is quite time consuming even for normal size images. The VQ-Diffusion allows us to achieve a better trade-off between quality and speed. Our experiments indicate that the VQ-Diffusion model with the reparameterization is fifteen times faster than traditional AR methods while achieving a better image quality.*
|
20 |
-
|
21 |
-
The original codebase can be found at [microsoft/VQ-Diffusion](https://github.com/microsoft/VQ-Diffusion).
|
22 |
-
|
23 |
-
<Tip>
|
24 |
-
|
25 |
-
Make sure to check out the Schedulers [guide](/using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](/using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines.
|
26 |
-
|
27 |
-
</Tip>
|
28 |
-
|
29 |
-
## VQDiffusionPipeline
|
30 |
-
[[autodoc]] VQDiffusionPipeline
|
31 |
-
- all
|
32 |
-
- __call__
|
33 |
-
|
34 |
-
## ImagePipelineOutput
|
35 |
-
[[autodoc]] pipelines.ImagePipelineOutput
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py
DELETED
@@ -1,622 +0,0 @@
|
|
1 |
-
# Copyright 2023 TSAIL Team and The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
# DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver
|
16 |
-
|
17 |
-
from dataclasses import dataclass
|
18 |
-
from typing import List, Optional, Tuple, Union
|
19 |
-
|
20 |
-
import flax
|
21 |
-
import jax
|
22 |
-
import jax.numpy as jnp
|
23 |
-
|
24 |
-
from ..configuration_utils import ConfigMixin, register_to_config
|
25 |
-
from .scheduling_utils_flax import (
|
26 |
-
CommonSchedulerState,
|
27 |
-
FlaxKarrasDiffusionSchedulers,
|
28 |
-
FlaxSchedulerMixin,
|
29 |
-
FlaxSchedulerOutput,
|
30 |
-
add_noise_common,
|
31 |
-
)
|
32 |
-
|
33 |
-
|
34 |
-
@flax.struct.dataclass
|
35 |
-
class DPMSolverMultistepSchedulerState:
|
36 |
-
common: CommonSchedulerState
|
37 |
-
alpha_t: jnp.ndarray
|
38 |
-
sigma_t: jnp.ndarray
|
39 |
-
lambda_t: jnp.ndarray
|
40 |
-
|
41 |
-
# setable values
|
42 |
-
init_noise_sigma: jnp.ndarray
|
43 |
-
timesteps: jnp.ndarray
|
44 |
-
num_inference_steps: Optional[int] = None
|
45 |
-
|
46 |
-
# running values
|
47 |
-
model_outputs: Optional[jnp.ndarray] = None
|
48 |
-
lower_order_nums: Optional[jnp.int32] = None
|
49 |
-
prev_timestep: Optional[jnp.int32] = None
|
50 |
-
cur_sample: Optional[jnp.ndarray] = None
|
51 |
-
|
52 |
-
@classmethod
|
53 |
-
def create(
|
54 |
-
cls,
|
55 |
-
common: CommonSchedulerState,
|
56 |
-
alpha_t: jnp.ndarray,
|
57 |
-
sigma_t: jnp.ndarray,
|
58 |
-
lambda_t: jnp.ndarray,
|
59 |
-
init_noise_sigma: jnp.ndarray,
|
60 |
-
timesteps: jnp.ndarray,
|
61 |
-
):
|
62 |
-
return cls(
|
63 |
-
common=common,
|
64 |
-
alpha_t=alpha_t,
|
65 |
-
sigma_t=sigma_t,
|
66 |
-
lambda_t=lambda_t,
|
67 |
-
init_noise_sigma=init_noise_sigma,
|
68 |
-
timesteps=timesteps,
|
69 |
-
)
|
70 |
-
|
71 |
-
|
72 |
-
@dataclass
|
73 |
-
class FlaxDPMSolverMultistepSchedulerOutput(FlaxSchedulerOutput):
|
74 |
-
state: DPMSolverMultistepSchedulerState
|
75 |
-
|
76 |
-
|
77 |
-
class FlaxDPMSolverMultistepScheduler(FlaxSchedulerMixin, ConfigMixin):
|
78 |
-
"""
|
79 |
-
DPM-Solver (and the improved version DPM-Solver++) is a fast dedicated high-order solver for diffusion ODEs with
|
80 |
-
the convergence order guarantee. Empirically, sampling by DPM-Solver with only 20 steps can generate high-quality
|
81 |
-
samples, and it can generate quite good samples even in only 10 steps.
|
82 |
-
|
83 |
-
For more details, see the original paper: https://arxiv.org/abs/2206.00927 and https://arxiv.org/abs/2211.01095
|
84 |
-
|
85 |
-
Currently, we support the multistep DPM-Solver for both noise prediction models and data prediction models. We
|
86 |
-
recommend to use `solver_order=2` for guided sampling, and `solver_order=3` for unconditional sampling.
|
87 |
-
|
88 |
-
We also support the "dynamic thresholding" method in Imagen (https://arxiv.org/abs/2205.11487). For pixel-space
|
89 |
-
diffusion models, you can set both `algorithm_type="dpmsolver++"` and `thresholding=True` to use the dynamic
|
90 |
-
thresholding. Note that the thresholding method is unsuitable for latent-space diffusion models (such as
|
91 |
-
stable-diffusion).
|
92 |
-
|
93 |
-
[`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
|
94 |
-
function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
|
95 |
-
[`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
|
96 |
-
[`~SchedulerMixin.from_pretrained`] functions.
|
97 |
-
|
98 |
-
For more details, see the original paper: https://arxiv.org/abs/2206.00927 and https://arxiv.org/abs/2211.01095
|
99 |
-
|
100 |
-
Args:
|
101 |
-
num_train_timesteps (`int`): number of diffusion steps used to train the model.
|
102 |
-
beta_start (`float`): the starting `beta` value of inference.
|
103 |
-
beta_end (`float`): the final `beta` value.
|
104 |
-
beta_schedule (`str`):
|
105 |
-
the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
|
106 |
-
`linear`, `scaled_linear`, or `squaredcos_cap_v2`.
|
107 |
-
trained_betas (`np.ndarray`, optional):
|
108 |
-
option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
|
109 |
-
solver_order (`int`, default `2`):
|
110 |
-
the order of DPM-Solver; can be `1` or `2` or `3`. We recommend to use `solver_order=2` for guided
|
111 |
-
sampling, and `solver_order=3` for unconditional sampling.
|
112 |
-
prediction_type (`str`, default `epsilon`):
|
113 |
-
indicates whether the model predicts the noise (epsilon), or the data / `x0`. One of `epsilon`, `sample`,
|
114 |
-
or `v-prediction`.
|
115 |
-
thresholding (`bool`, default `False`):
|
116 |
-
whether to use the "dynamic thresholding" method (introduced by Imagen, https://arxiv.org/abs/2205.11487).
|
117 |
-
For pixel-space diffusion models, you can set both `algorithm_type=dpmsolver++` and `thresholding=True` to
|
118 |
-
use the dynamic thresholding. Note that the thresholding method is unsuitable for latent-space diffusion
|
119 |
-
models (such as stable-diffusion).
|
120 |
-
dynamic_thresholding_ratio (`float`, default `0.995`):
|
121 |
-
the ratio for the dynamic thresholding method. Default is `0.995`, the same as Imagen
|
122 |
-
(https://arxiv.org/abs/2205.11487).
|
123 |
-
sample_max_value (`float`, default `1.0`):
|
124 |
-
the threshold value for dynamic thresholding. Valid only when `thresholding=True` and
|
125 |
-
`algorithm_type="dpmsolver++`.
|
126 |
-
algorithm_type (`str`, default `dpmsolver++`):
|
127 |
-
the algorithm type for the solver. Either `dpmsolver` or `dpmsolver++`. The `dpmsolver` type implements the
|
128 |
-
algorithms in https://arxiv.org/abs/2206.00927, and the `dpmsolver++` type implements the algorithms in
|
129 |
-
https://arxiv.org/abs/2211.01095. We recommend to use `dpmsolver++` with `solver_order=2` for guided
|
130 |
-
sampling (e.g. stable-diffusion).
|
131 |
-
solver_type (`str`, default `midpoint`):
|
132 |
-
the solver type for the second-order solver. Either `midpoint` or `heun`. The solver type slightly affects
|
133 |
-
the sample quality, especially for small number of steps. We empirically find that `midpoint` solvers are
|
134 |
-
slightly better, so we recommend to use the `midpoint` type.
|
135 |
-
lower_order_final (`bool`, default `True`):
|
136 |
-
whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. We empirically
|
137 |
-
find this trick can stabilize the sampling of DPM-Solver for steps < 15, especially for steps <= 10.
|
138 |
-
dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`):
|
139 |
-
the `dtype` used for params and computation.
|
140 |
-
"""
|
141 |
-
|
142 |
-
_compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers]
|
143 |
-
|
144 |
-
dtype: jnp.dtype
|
145 |
-
|
146 |
-
@property
|
147 |
-
def has_state(self):
|
148 |
-
return True
|
149 |
-
|
150 |
-
@register_to_config
|
151 |
-
def __init__(
|
152 |
-
self,
|
153 |
-
num_train_timesteps: int = 1000,
|
154 |
-
beta_start: float = 0.0001,
|
155 |
-
beta_end: float = 0.02,
|
156 |
-
beta_schedule: str = "linear",
|
157 |
-
trained_betas: Optional[jnp.ndarray] = None,
|
158 |
-
solver_order: int = 2,
|
159 |
-
prediction_type: str = "epsilon",
|
160 |
-
thresholding: bool = False,
|
161 |
-
dynamic_thresholding_ratio: float = 0.995,
|
162 |
-
sample_max_value: float = 1.0,
|
163 |
-
algorithm_type: str = "dpmsolver++",
|
164 |
-
solver_type: str = "midpoint",
|
165 |
-
lower_order_final: bool = True,
|
166 |
-
dtype: jnp.dtype = jnp.float32,
|
167 |
-
):
|
168 |
-
self.dtype = dtype
|
169 |
-
|
170 |
-
def create_state(self, common: Optional[CommonSchedulerState] = None) -> DPMSolverMultistepSchedulerState:
|
171 |
-
if common is None:
|
172 |
-
common = CommonSchedulerState.create(self)
|
173 |
-
|
174 |
-
# Currently we only support VP-type noise schedule
|
175 |
-
alpha_t = jnp.sqrt(common.alphas_cumprod)
|
176 |
-
sigma_t = jnp.sqrt(1 - common.alphas_cumprod)
|
177 |
-
lambda_t = jnp.log(alpha_t) - jnp.log(sigma_t)
|
178 |
-
|
179 |
-
# settings for DPM-Solver
|
180 |
-
if self.config.algorithm_type not in ["dpmsolver", "dpmsolver++"]:
|
181 |
-
raise NotImplementedError(f"{self.config.algorithm_type} does is not implemented for {self.__class__}")
|
182 |
-
if self.config.solver_type not in ["midpoint", "heun"]:
|
183 |
-
raise NotImplementedError(f"{self.config.solver_type} does is not implemented for {self.__class__}")
|
184 |
-
|
185 |
-
# standard deviation of the initial noise distribution
|
186 |
-
init_noise_sigma = jnp.array(1.0, dtype=self.dtype)
|
187 |
-
|
188 |
-
timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1]
|
189 |
-
|
190 |
-
return DPMSolverMultistepSchedulerState.create(
|
191 |
-
common=common,
|
192 |
-
alpha_t=alpha_t,
|
193 |
-
sigma_t=sigma_t,
|
194 |
-
lambda_t=lambda_t,
|
195 |
-
init_noise_sigma=init_noise_sigma,
|
196 |
-
timesteps=timesteps,
|
197 |
-
)
|
198 |
-
|
199 |
-
def set_timesteps(
|
200 |
-
self, state: DPMSolverMultistepSchedulerState, num_inference_steps: int, shape: Tuple
|
201 |
-
) -> DPMSolverMultistepSchedulerState:
|
202 |
-
"""
|
203 |
-
Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
|
204 |
-
|
205 |
-
Args:
|
206 |
-
state (`DPMSolverMultistepSchedulerState`):
|
207 |
-
the `FlaxDPMSolverMultistepScheduler` state data class instance.
|
208 |
-
num_inference_steps (`int`):
|
209 |
-
the number of diffusion steps used when generating samples with a pre-trained model.
|
210 |
-
shape (`Tuple`):
|
211 |
-
the shape of the samples to be generated.
|
212 |
-
"""
|
213 |
-
|
214 |
-
timesteps = (
|
215 |
-
jnp.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps + 1)
|
216 |
-
.round()[::-1][:-1]
|
217 |
-
.astype(jnp.int32)
|
218 |
-
)
|
219 |
-
|
220 |
-
# initial running values
|
221 |
-
|
222 |
-
model_outputs = jnp.zeros((self.config.solver_order,) + shape, dtype=self.dtype)
|
223 |
-
lower_order_nums = jnp.int32(0)
|
224 |
-
prev_timestep = jnp.int32(-1)
|
225 |
-
cur_sample = jnp.zeros(shape, dtype=self.dtype)
|
226 |
-
|
227 |
-
return state.replace(
|
228 |
-
num_inference_steps=num_inference_steps,
|
229 |
-
timesteps=timesteps,
|
230 |
-
model_outputs=model_outputs,
|
231 |
-
lower_order_nums=lower_order_nums,
|
232 |
-
prev_timestep=prev_timestep,
|
233 |
-
cur_sample=cur_sample,
|
234 |
-
)
|
235 |
-
|
236 |
-
def convert_model_output(
|
237 |
-
self,
|
238 |
-
state: DPMSolverMultistepSchedulerState,
|
239 |
-
model_output: jnp.ndarray,
|
240 |
-
timestep: int,
|
241 |
-
sample: jnp.ndarray,
|
242 |
-
) -> jnp.ndarray:
|
243 |
-
"""
|
244 |
-
Convert the model output to the corresponding type that the algorithm (DPM-Solver / DPM-Solver++) needs.
|
245 |
-
|
246 |
-
DPM-Solver is designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to
|
247 |
-
discretize an integral of the data prediction model. So we need to first convert the model output to the
|
248 |
-
corresponding type to match the algorithm.
|
249 |
-
|
250 |
-
Note that the algorithm type and the model type is decoupled. That is to say, we can use either DPM-Solver or
|
251 |
-
DPM-Solver++ for both noise prediction model and data prediction model.
|
252 |
-
|
253 |
-
Args:
|
254 |
-
model_output (`jnp.ndarray`): direct output from learned diffusion model.
|
255 |
-
timestep (`int`): current discrete timestep in the diffusion chain.
|
256 |
-
sample (`jnp.ndarray`):
|
257 |
-
current instance of sample being created by diffusion process.
|
258 |
-
|
259 |
-
Returns:
|
260 |
-
`jnp.ndarray`: the converted model output.
|
261 |
-
"""
|
262 |
-
# DPM-Solver++ needs to solve an integral of the data prediction model.
|
263 |
-
if self.config.algorithm_type == "dpmsolver++":
|
264 |
-
if self.config.prediction_type == "epsilon":
|
265 |
-
alpha_t, sigma_t = state.alpha_t[timestep], state.sigma_t[timestep]
|
266 |
-
x0_pred = (sample - sigma_t * model_output) / alpha_t
|
267 |
-
elif self.config.prediction_type == "sample":
|
268 |
-
x0_pred = model_output
|
269 |
-
elif self.config.prediction_type == "v_prediction":
|
270 |
-
alpha_t, sigma_t = state.alpha_t[timestep], state.sigma_t[timestep]
|
271 |
-
x0_pred = alpha_t * sample - sigma_t * model_output
|
272 |
-
else:
|
273 |
-
raise ValueError(
|
274 |
-
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, "
|
275 |
-
" or `v_prediction` for the FlaxDPMSolverMultistepScheduler."
|
276 |
-
)
|
277 |
-
|
278 |
-
if self.config.thresholding:
|
279 |
-
# Dynamic thresholding in https://arxiv.org/abs/2205.11487
|
280 |
-
dynamic_max_val = jnp.percentile(
|
281 |
-
jnp.abs(x0_pred), self.config.dynamic_thresholding_ratio, axis=tuple(range(1, x0_pred.ndim))
|
282 |
-
)
|
283 |
-
dynamic_max_val = jnp.maximum(
|
284 |
-
dynamic_max_val, self.config.sample_max_value * jnp.ones_like(dynamic_max_val)
|
285 |
-
)
|
286 |
-
x0_pred = jnp.clip(x0_pred, -dynamic_max_val, dynamic_max_val) / dynamic_max_val
|
287 |
-
return x0_pred
|
288 |
-
# DPM-Solver needs to solve an integral of the noise prediction model.
|
289 |
-
elif self.config.algorithm_type == "dpmsolver":
|
290 |
-
if self.config.prediction_type == "epsilon":
|
291 |
-
return model_output
|
292 |
-
elif self.config.prediction_type == "sample":
|
293 |
-
alpha_t, sigma_t = state.alpha_t[timestep], state.sigma_t[timestep]
|
294 |
-
epsilon = (sample - alpha_t * model_output) / sigma_t
|
295 |
-
return epsilon
|
296 |
-
elif self.config.prediction_type == "v_prediction":
|
297 |
-
alpha_t, sigma_t = state.alpha_t[timestep], state.sigma_t[timestep]
|
298 |
-
epsilon = alpha_t * model_output + sigma_t * sample
|
299 |
-
return epsilon
|
300 |
-
else:
|
301 |
-
raise ValueError(
|
302 |
-
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, "
|
303 |
-
" or `v_prediction` for the FlaxDPMSolverMultistepScheduler."
|
304 |
-
)
|
305 |
-
|
306 |
-
def dpm_solver_first_order_update(
|
307 |
-
self,
|
308 |
-
state: DPMSolverMultistepSchedulerState,
|
309 |
-
model_output: jnp.ndarray,
|
310 |
-
timestep: int,
|
311 |
-
prev_timestep: int,
|
312 |
-
sample: jnp.ndarray,
|
313 |
-
) -> jnp.ndarray:
|
314 |
-
"""
|
315 |
-
One step for the first-order DPM-Solver (equivalent to DDIM).
|
316 |
-
|
317 |
-
See https://arxiv.org/abs/2206.00927 for the detailed derivation.
|
318 |
-
|
319 |
-
Args:
|
320 |
-
model_output (`jnp.ndarray`): direct output from learned diffusion model.
|
321 |
-
timestep (`int`): current discrete timestep in the diffusion chain.
|
322 |
-
prev_timestep (`int`): previous discrete timestep in the diffusion chain.
|
323 |
-
sample (`jnp.ndarray`):
|
324 |
-
current instance of sample being created by diffusion process.
|
325 |
-
|
326 |
-
Returns:
|
327 |
-
`jnp.ndarray`: the sample tensor at the previous timestep.
|
328 |
-
"""
|
329 |
-
t, s0 = prev_timestep, timestep
|
330 |
-
m0 = model_output
|
331 |
-
lambda_t, lambda_s = state.lambda_t[t], state.lambda_t[s0]
|
332 |
-
alpha_t, alpha_s = state.alpha_t[t], state.alpha_t[s0]
|
333 |
-
sigma_t, sigma_s = state.sigma_t[t], state.sigma_t[s0]
|
334 |
-
h = lambda_t - lambda_s
|
335 |
-
if self.config.algorithm_type == "dpmsolver++":
|
336 |
-
x_t = (sigma_t / sigma_s) * sample - (alpha_t * (jnp.exp(-h) - 1.0)) * m0
|
337 |
-
elif self.config.algorithm_type == "dpmsolver":
|
338 |
-
x_t = (alpha_t / alpha_s) * sample - (sigma_t * (jnp.exp(h) - 1.0)) * m0
|
339 |
-
return x_t
|
340 |
-
|
341 |
-
def multistep_dpm_solver_second_order_update(
|
342 |
-
self,
|
343 |
-
state: DPMSolverMultistepSchedulerState,
|
344 |
-
model_output_list: jnp.ndarray,
|
345 |
-
timestep_list: List[int],
|
346 |
-
prev_timestep: int,
|
347 |
-
sample: jnp.ndarray,
|
348 |
-
) -> jnp.ndarray:
|
349 |
-
"""
|
350 |
-
One step for the second-order multistep DPM-Solver.
|
351 |
-
|
352 |
-
Args:
|
353 |
-
model_output_list (`List[jnp.ndarray]`):
|
354 |
-
direct outputs from learned diffusion model at current and latter timesteps.
|
355 |
-
timestep (`int`): current and latter discrete timestep in the diffusion chain.
|
356 |
-
prev_timestep (`int`): previous discrete timestep in the diffusion chain.
|
357 |
-
sample (`jnp.ndarray`):
|
358 |
-
current instance of sample being created by diffusion process.
|
359 |
-
|
360 |
-
Returns:
|
361 |
-
`jnp.ndarray`: the sample tensor at the previous timestep.
|
362 |
-
"""
|
363 |
-
t, s0, s1 = prev_timestep, timestep_list[-1], timestep_list[-2]
|
364 |
-
m0, m1 = model_output_list[-1], model_output_list[-2]
|
365 |
-
lambda_t, lambda_s0, lambda_s1 = state.lambda_t[t], state.lambda_t[s0], state.lambda_t[s1]
|
366 |
-
alpha_t, alpha_s0 = state.alpha_t[t], state.alpha_t[s0]
|
367 |
-
sigma_t, sigma_s0 = state.sigma_t[t], state.sigma_t[s0]
|
368 |
-
h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1
|
369 |
-
r0 = h_0 / h
|
370 |
-
D0, D1 = m0, (1.0 / r0) * (m0 - m1)
|
371 |
-
if self.config.algorithm_type == "dpmsolver++":
|
372 |
-
# See https://arxiv.org/abs/2211.01095 for detailed derivations
|
373 |
-
if self.config.solver_type == "midpoint":
|
374 |
-
x_t = (
|
375 |
-
(sigma_t / sigma_s0) * sample
|
376 |
-
- (alpha_t * (jnp.exp(-h) - 1.0)) * D0
|
377 |
-
- 0.5 * (alpha_t * (jnp.exp(-h) - 1.0)) * D1
|
378 |
-
)
|
379 |
-
elif self.config.solver_type == "heun":
|
380 |
-
x_t = (
|
381 |
-
(sigma_t / sigma_s0) * sample
|
382 |
-
- (alpha_t * (jnp.exp(-h) - 1.0)) * D0
|
383 |
-
+ (alpha_t * ((jnp.exp(-h) - 1.0) / h + 1.0)) * D1
|
384 |
-
)
|
385 |
-
elif self.config.algorithm_type == "dpmsolver":
|
386 |
-
# See https://arxiv.org/abs/2206.00927 for detailed derivations
|
387 |
-
if self.config.solver_type == "midpoint":
|
388 |
-
x_t = (
|
389 |
-
(alpha_t / alpha_s0) * sample
|
390 |
-
- (sigma_t * (jnp.exp(h) - 1.0)) * D0
|
391 |
-
- 0.5 * (sigma_t * (jnp.exp(h) - 1.0)) * D1
|
392 |
-
)
|
393 |
-
elif self.config.solver_type == "heun":
|
394 |
-
x_t = (
|
395 |
-
(alpha_t / alpha_s0) * sample
|
396 |
-
- (sigma_t * (jnp.exp(h) - 1.0)) * D0
|
397 |
-
- (sigma_t * ((jnp.exp(h) - 1.0) / h - 1.0)) * D1
|
398 |
-
)
|
399 |
-
return x_t
|
400 |
-
|
401 |
-
def multistep_dpm_solver_third_order_update(
|
402 |
-
self,
|
403 |
-
state: DPMSolverMultistepSchedulerState,
|
404 |
-
model_output_list: jnp.ndarray,
|
405 |
-
timestep_list: List[int],
|
406 |
-
prev_timestep: int,
|
407 |
-
sample: jnp.ndarray,
|
408 |
-
) -> jnp.ndarray:
|
409 |
-
"""
|
410 |
-
One step for the third-order multistep DPM-Solver.
|
411 |
-
|
412 |
-
Args:
|
413 |
-
model_output_list (`List[jnp.ndarray]`):
|
414 |
-
direct outputs from learned diffusion model at current and latter timesteps.
|
415 |
-
timestep (`int`): current and latter discrete timestep in the diffusion chain.
|
416 |
-
prev_timestep (`int`): previous discrete timestep in the diffusion chain.
|
417 |
-
sample (`jnp.ndarray`):
|
418 |
-
current instance of sample being created by diffusion process.
|
419 |
-
|
420 |
-
Returns:
|
421 |
-
`jnp.ndarray`: the sample tensor at the previous timestep.
|
422 |
-
"""
|
423 |
-
t, s0, s1, s2 = prev_timestep, timestep_list[-1], timestep_list[-2], timestep_list[-3]
|
424 |
-
m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3]
|
425 |
-
lambda_t, lambda_s0, lambda_s1, lambda_s2 = (
|
426 |
-
state.lambda_t[t],
|
427 |
-
state.lambda_t[s0],
|
428 |
-
state.lambda_t[s1],
|
429 |
-
state.lambda_t[s2],
|
430 |
-
)
|
431 |
-
alpha_t, alpha_s0 = state.alpha_t[t], state.alpha_t[s0]
|
432 |
-
sigma_t, sigma_s0 = state.sigma_t[t], state.sigma_t[s0]
|
433 |
-
h, h_0, h_1 = lambda_t - lambda_s0, lambda_s0 - lambda_s1, lambda_s1 - lambda_s2
|
434 |
-
r0, r1 = h_0 / h, h_1 / h
|
435 |
-
D0 = m0
|
436 |
-
D1_0, D1_1 = (1.0 / r0) * (m0 - m1), (1.0 / r1) * (m1 - m2)
|
437 |
-
D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1)
|
438 |
-
D2 = (1.0 / (r0 + r1)) * (D1_0 - D1_1)
|
439 |
-
if self.config.algorithm_type == "dpmsolver++":
|
440 |
-
# See https://arxiv.org/abs/2206.00927 for detailed derivations
|
441 |
-
x_t = (
|
442 |
-
(sigma_t / sigma_s0) * sample
|
443 |
-
- (alpha_t * (jnp.exp(-h) - 1.0)) * D0
|
444 |
-
+ (alpha_t * ((jnp.exp(-h) - 1.0) / h + 1.0)) * D1
|
445 |
-
- (alpha_t * ((jnp.exp(-h) - 1.0 + h) / h**2 - 0.5)) * D2
|
446 |
-
)
|
447 |
-
elif self.config.algorithm_type == "dpmsolver":
|
448 |
-
# See https://arxiv.org/abs/2206.00927 for detailed derivations
|
449 |
-
x_t = (
|
450 |
-
(alpha_t / alpha_s0) * sample
|
451 |
-
- (sigma_t * (jnp.exp(h) - 1.0)) * D0
|
452 |
-
- (sigma_t * ((jnp.exp(h) - 1.0) / h - 1.0)) * D1
|
453 |
-
- (sigma_t * ((jnp.exp(h) - 1.0 - h) / h**2 - 0.5)) * D2
|
454 |
-
)
|
455 |
-
return x_t
|
456 |
-
|
457 |
-
def step(
|
458 |
-
self,
|
459 |
-
state: DPMSolverMultistepSchedulerState,
|
460 |
-
model_output: jnp.ndarray,
|
461 |
-
timestep: int,
|
462 |
-
sample: jnp.ndarray,
|
463 |
-
return_dict: bool = True,
|
464 |
-
) -> Union[FlaxDPMSolverMultistepSchedulerOutput, Tuple]:
|
465 |
-
"""
|
466 |
-
Predict the sample at the previous timestep by DPM-Solver. Core function to propagate the diffusion process
|
467 |
-
from the learned model outputs (most often the predicted noise).
|
468 |
-
|
469 |
-
Args:
|
470 |
-
state (`DPMSolverMultistepSchedulerState`):
|
471 |
-
the `FlaxDPMSolverMultistepScheduler` state data class instance.
|
472 |
-
model_output (`jnp.ndarray`): direct output from learned diffusion model.
|
473 |
-
timestep (`int`): current discrete timestep in the diffusion chain.
|
474 |
-
sample (`jnp.ndarray`):
|
475 |
-
current instance of sample being created by diffusion process.
|
476 |
-
return_dict (`bool`): option for returning tuple rather than FlaxDPMSolverMultistepSchedulerOutput class
|
477 |
-
|
478 |
-
Returns:
|
479 |
-
[`FlaxDPMSolverMultistepSchedulerOutput`] or `tuple`: [`FlaxDPMSolverMultistepSchedulerOutput`] if
|
480 |
-
`return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor.
|
481 |
-
|
482 |
-
"""
|
483 |
-
if state.num_inference_steps is None:
|
484 |
-
raise ValueError(
|
485 |
-
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
|
486 |
-
)
|
487 |
-
|
488 |
-
(step_index,) = jnp.where(state.timesteps == timestep, size=1)
|
489 |
-
step_index = step_index[0]
|
490 |
-
|
491 |
-
prev_timestep = jax.lax.select(step_index == len(state.timesteps) - 1, 0, state.timesteps[step_index + 1])
|
492 |
-
|
493 |
-
model_output = self.convert_model_output(state, model_output, timestep, sample)
|
494 |
-
|
495 |
-
model_outputs_new = jnp.roll(state.model_outputs, -1, axis=0)
|
496 |
-
model_outputs_new = model_outputs_new.at[-1].set(model_output)
|
497 |
-
state = state.replace(
|
498 |
-
model_outputs=model_outputs_new,
|
499 |
-
prev_timestep=prev_timestep,
|
500 |
-
cur_sample=sample,
|
501 |
-
)
|
502 |
-
|
503 |
-
def step_1(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray:
|
504 |
-
return self.dpm_solver_first_order_update(
|
505 |
-
state,
|
506 |
-
state.model_outputs[-1],
|
507 |
-
state.timesteps[step_index],
|
508 |
-
state.prev_timestep,
|
509 |
-
state.cur_sample,
|
510 |
-
)
|
511 |
-
|
512 |
-
def step_23(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray:
|
513 |
-
def step_2(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray:
|
514 |
-
timestep_list = jnp.array([state.timesteps[step_index - 1], state.timesteps[step_index]])
|
515 |
-
return self.multistep_dpm_solver_second_order_update(
|
516 |
-
state,
|
517 |
-
state.model_outputs,
|
518 |
-
timestep_list,
|
519 |
-
state.prev_timestep,
|
520 |
-
state.cur_sample,
|
521 |
-
)
|
522 |
-
|
523 |
-
def step_3(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray:
|
524 |
-
timestep_list = jnp.array(
|
525 |
-
[
|
526 |
-
state.timesteps[step_index - 2],
|
527 |
-
state.timesteps[step_index - 1],
|
528 |
-
state.timesteps[step_index],
|
529 |
-
]
|
530 |
-
)
|
531 |
-
return self.multistep_dpm_solver_third_order_update(
|
532 |
-
state,
|
533 |
-
state.model_outputs,
|
534 |
-
timestep_list,
|
535 |
-
state.prev_timestep,
|
536 |
-
state.cur_sample,
|
537 |
-
)
|
538 |
-
|
539 |
-
step_2_output = step_2(state)
|
540 |
-
step_3_output = step_3(state)
|
541 |
-
|
542 |
-
if self.config.solver_order == 2:
|
543 |
-
return step_2_output
|
544 |
-
elif self.config.lower_order_final and len(state.timesteps) < 15:
|
545 |
-
return jax.lax.select(
|
546 |
-
state.lower_order_nums < 2,
|
547 |
-
step_2_output,
|
548 |
-
jax.lax.select(
|
549 |
-
step_index == len(state.timesteps) - 2,
|
550 |
-
step_2_output,
|
551 |
-
step_3_output,
|
552 |
-
),
|
553 |
-
)
|
554 |
-
else:
|
555 |
-
return jax.lax.select(
|
556 |
-
state.lower_order_nums < 2,
|
557 |
-
step_2_output,
|
558 |
-
step_3_output,
|
559 |
-
)
|
560 |
-
|
561 |
-
step_1_output = step_1(state)
|
562 |
-
step_23_output = step_23(state)
|
563 |
-
|
564 |
-
if self.config.solver_order == 1:
|
565 |
-
prev_sample = step_1_output
|
566 |
-
|
567 |
-
elif self.config.lower_order_final and len(state.timesteps) < 15:
|
568 |
-
prev_sample = jax.lax.select(
|
569 |
-
state.lower_order_nums < 1,
|
570 |
-
step_1_output,
|
571 |
-
jax.lax.select(
|
572 |
-
step_index == len(state.timesteps) - 1,
|
573 |
-
step_1_output,
|
574 |
-
step_23_output,
|
575 |
-
),
|
576 |
-
)
|
577 |
-
|
578 |
-
else:
|
579 |
-
prev_sample = jax.lax.select(
|
580 |
-
state.lower_order_nums < 1,
|
581 |
-
step_1_output,
|
582 |
-
step_23_output,
|
583 |
-
)
|
584 |
-
|
585 |
-
state = state.replace(
|
586 |
-
lower_order_nums=jnp.minimum(state.lower_order_nums + 1, self.config.solver_order),
|
587 |
-
)
|
588 |
-
|
589 |
-
if not return_dict:
|
590 |
-
return (prev_sample, state)
|
591 |
-
|
592 |
-
return FlaxDPMSolverMultistepSchedulerOutput(prev_sample=prev_sample, state=state)
|
593 |
-
|
594 |
-
def scale_model_input(
|
595 |
-
self, state: DPMSolverMultistepSchedulerState, sample: jnp.ndarray, timestep: Optional[int] = None
|
596 |
-
) -> jnp.ndarray:
|
597 |
-
"""
|
598 |
-
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
|
599 |
-
current timestep.
|
600 |
-
|
601 |
-
Args:
|
602 |
-
state (`DPMSolverMultistepSchedulerState`):
|
603 |
-
the `FlaxDPMSolverMultistepScheduler` state data class instance.
|
604 |
-
sample (`jnp.ndarray`): input sample
|
605 |
-
timestep (`int`, optional): current timestep
|
606 |
-
|
607 |
-
Returns:
|
608 |
-
`jnp.ndarray`: scaled input sample
|
609 |
-
"""
|
610 |
-
return sample
|
611 |
-
|
612 |
-
def add_noise(
|
613 |
-
self,
|
614 |
-
state: DPMSolverMultistepSchedulerState,
|
615 |
-
original_samples: jnp.ndarray,
|
616 |
-
noise: jnp.ndarray,
|
617 |
-
timesteps: jnp.ndarray,
|
618 |
-
) -> jnp.ndarray:
|
619 |
-
return add_noise_common(state.common, original_samples, noise, timesteps)
|
620 |
-
|
621 |
-
def __len__(self):
|
622 |
-
return self.config.num_train_timesteps
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_sde_vp.py
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
# Copyright 2023 Google Brain and The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
|
16 |
-
|
17 |
-
import math
|
18 |
-
from typing import Union
|
19 |
-
|
20 |
-
import torch
|
21 |
-
|
22 |
-
from ..configuration_utils import ConfigMixin, register_to_config
|
23 |
-
from ..utils import randn_tensor
|
24 |
-
from .scheduling_utils import SchedulerMixin
|
25 |
-
|
26 |
-
|
27 |
-
class ScoreSdeVpScheduler(SchedulerMixin, ConfigMixin):
|
28 |
-
"""
|
29 |
-
The variance preserving stochastic differential equation (SDE) scheduler.
|
30 |
-
|
31 |
-
[`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
|
32 |
-
function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
|
33 |
-
[`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
|
34 |
-
[`~SchedulerMixin.from_pretrained`] functions.
|
35 |
-
|
36 |
-
For more information, see the original paper: https://arxiv.org/abs/2011.13456
|
37 |
-
|
38 |
-
UNDER CONSTRUCTION
|
39 |
-
|
40 |
-
"""
|
41 |
-
|
42 |
-
order = 1
|
43 |
-
|
44 |
-
@register_to_config
|
45 |
-
def __init__(self, num_train_timesteps=2000, beta_min=0.1, beta_max=20, sampling_eps=1e-3):
|
46 |
-
self.sigmas = None
|
47 |
-
self.discrete_sigmas = None
|
48 |
-
self.timesteps = None
|
49 |
-
|
50 |
-
def set_timesteps(self, num_inference_steps, device: Union[str, torch.device] = None):
|
51 |
-
self.timesteps = torch.linspace(1, self.config.sampling_eps, num_inference_steps, device=device)
|
52 |
-
|
53 |
-
def step_pred(self, score, x, t, generator=None):
|
54 |
-
if self.timesteps is None:
|
55 |
-
raise ValueError(
|
56 |
-
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler"
|
57 |
-
)
|
58 |
-
|
59 |
-
# TODO(Patrick) better comments + non-PyTorch
|
60 |
-
# postprocess model score
|
61 |
-
log_mean_coeff = (
|
62 |
-
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
|
63 |
-
)
|
64 |
-
std = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff))
|
65 |
-
std = std.flatten()
|
66 |
-
while len(std.shape) < len(score.shape):
|
67 |
-
std = std.unsqueeze(-1)
|
68 |
-
score = -score / std
|
69 |
-
|
70 |
-
# compute
|
71 |
-
dt = -1.0 / len(self.timesteps)
|
72 |
-
|
73 |
-
beta_t = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
|
74 |
-
beta_t = beta_t.flatten()
|
75 |
-
while len(beta_t.shape) < len(x.shape):
|
76 |
-
beta_t = beta_t.unsqueeze(-1)
|
77 |
-
drift = -0.5 * beta_t * x
|
78 |
-
|
79 |
-
diffusion = torch.sqrt(beta_t)
|
80 |
-
drift = drift - diffusion**2 * score
|
81 |
-
x_mean = x + drift * dt
|
82 |
-
|
83 |
-
# add noise
|
84 |
-
noise = randn_tensor(x.shape, layout=x.layout, generator=generator, device=x.device, dtype=x.dtype)
|
85 |
-
x = x_mean + diffusion * math.sqrt(-dt) * noise
|
86 |
-
|
87 |
-
return x, x_mean
|
88 |
-
|
89 |
-
def __len__(self):
|
90 |
-
return self.config.num_train_timesteps
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/karras_ve/test_karras_ve.py
DELETED
@@ -1,86 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import unittest
|
17 |
-
|
18 |
-
import numpy as np
|
19 |
-
import torch
|
20 |
-
|
21 |
-
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNet2DModel
|
22 |
-
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
|
23 |
-
|
24 |
-
|
25 |
-
enable_full_determinism()
|
26 |
-
|
27 |
-
|
28 |
-
class KarrasVePipelineFastTests(unittest.TestCase):
|
29 |
-
@property
|
30 |
-
def dummy_uncond_unet(self):
|
31 |
-
torch.manual_seed(0)
|
32 |
-
model = UNet2DModel(
|
33 |
-
block_out_channels=(32, 64),
|
34 |
-
layers_per_block=2,
|
35 |
-
sample_size=32,
|
36 |
-
in_channels=3,
|
37 |
-
out_channels=3,
|
38 |
-
down_block_types=("DownBlock2D", "AttnDownBlock2D"),
|
39 |
-
up_block_types=("AttnUpBlock2D", "UpBlock2D"),
|
40 |
-
)
|
41 |
-
return model
|
42 |
-
|
43 |
-
def test_inference(self):
|
44 |
-
unet = self.dummy_uncond_unet
|
45 |
-
scheduler = KarrasVeScheduler()
|
46 |
-
|
47 |
-
pipe = KarrasVePipeline(unet=unet, scheduler=scheduler)
|
48 |
-
pipe.to(torch_device)
|
49 |
-
pipe.set_progress_bar_config(disable=None)
|
50 |
-
|
51 |
-
generator = torch.manual_seed(0)
|
52 |
-
image = pipe(num_inference_steps=2, generator=generator, output_type="numpy").images
|
53 |
-
|
54 |
-
generator = torch.manual_seed(0)
|
55 |
-
image_from_tuple = pipe(num_inference_steps=2, generator=generator, output_type="numpy", return_dict=False)[0]
|
56 |
-
|
57 |
-
image_slice = image[0, -3:, -3:, -1]
|
58 |
-
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
|
59 |
-
|
60 |
-
assert image.shape == (1, 32, 32, 3)
|
61 |
-
expected_slice = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
|
62 |
-
|
63 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
64 |
-
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
|
65 |
-
|
66 |
-
|
67 |
-
@slow
|
68 |
-
@require_torch
|
69 |
-
class KarrasVePipelineIntegrationTests(unittest.TestCase):
|
70 |
-
def test_inference(self):
|
71 |
-
model_id = "google/ncsnpp-celebahq-256"
|
72 |
-
model = UNet2DModel.from_pretrained(model_id)
|
73 |
-
scheduler = KarrasVeScheduler()
|
74 |
-
|
75 |
-
pipe = KarrasVePipeline(unet=model, scheduler=scheduler)
|
76 |
-
pipe.to(torch_device)
|
77 |
-
pipe.set_progress_bar_config(disable=None)
|
78 |
-
|
79 |
-
generator = torch.manual_seed(0)
|
80 |
-
image = pipe(num_inference_steps=20, generator=generator, output_type="numpy").images
|
81 |
-
|
82 |
-
image_slice = image[0, -3:, -3:, -1]
|
83 |
-
assert image.shape == (1, 256, 256, 3)
|
84 |
-
expected_slice = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586])
|
85 |
-
|
86 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/datasets/__init__.py
DELETED
@@ -1,24 +0,0 @@
|
|
1 |
-
from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset
|
2 |
-
from .cityscapes import CityscapesDataset
|
3 |
-
from .coco import CocoDataset
|
4 |
-
from .custom import CustomDataset
|
5 |
-
from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset,
|
6 |
-
RepeatDataset)
|
7 |
-
from .deepfashion import DeepFashionDataset
|
8 |
-
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
|
9 |
-
from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler
|
10 |
-
from .utils import (NumClassCheckHook, get_loading_pipeline,
|
11 |
-
replace_ImageToTensor)
|
12 |
-
from .voc import VOCDataset
|
13 |
-
from .wider_face import WIDERFaceDataset
|
14 |
-
from .xml_style import XMLDataset
|
15 |
-
|
16 |
-
__all__ = [
|
17 |
-
'CustomDataset', 'XMLDataset', 'CocoDataset', 'DeepFashionDataset',
|
18 |
-
'VOCDataset', 'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset',
|
19 |
-
'LVISV1Dataset', 'GroupSampler', 'DistributedGroupSampler',
|
20 |
-
'DistributedSampler', 'build_dataloader', 'ConcatDataset', 'RepeatDataset',
|
21 |
-
'ClassBalancedDataset', 'WIDERFaceDataset', 'DATASETS', 'PIPELINES',
|
22 |
-
'build_dataset', 'replace_ImageToTensor', 'get_loading_pipeline',
|
23 |
-
'NumClassCheckHook'
|
24 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/datasets/wider_face.py
DELETED
@@ -1,51 +0,0 @@
|
|
1 |
-
import os.path as osp
|
2 |
-
import xml.etree.ElementTree as ET
|
3 |
-
|
4 |
-
import mmcv
|
5 |
-
|
6 |
-
from .builder import DATASETS
|
7 |
-
from .xml_style import XMLDataset
|
8 |
-
|
9 |
-
|
10 |
-
@DATASETS.register_module()
|
11 |
-
class WIDERFaceDataset(XMLDataset):
|
12 |
-
"""Reader for the WIDER Face dataset in PASCAL VOC format.
|
13 |
-
|
14 |
-
Conversion scripts can be found in
|
15 |
-
https://github.com/sovrasov/wider-face-pascal-voc-annotations
|
16 |
-
"""
|
17 |
-
CLASSES = ('face', )
|
18 |
-
|
19 |
-
def __init__(self, **kwargs):
|
20 |
-
super(WIDERFaceDataset, self).__init__(**kwargs)
|
21 |
-
|
22 |
-
def load_annotations(self, ann_file):
|
23 |
-
"""Load annotation from WIDERFace XML style annotation file.
|
24 |
-
|
25 |
-
Args:
|
26 |
-
ann_file (str): Path of XML file.
|
27 |
-
|
28 |
-
Returns:
|
29 |
-
list[dict]: Annotation info from XML file.
|
30 |
-
"""
|
31 |
-
|
32 |
-
data_infos = []
|
33 |
-
img_ids = mmcv.list_from_file(ann_file)
|
34 |
-
for img_id in img_ids:
|
35 |
-
filename = f'{img_id}.jpg'
|
36 |
-
xml_path = osp.join(self.img_prefix, 'Annotations',
|
37 |
-
f'{img_id}.xml')
|
38 |
-
tree = ET.parse(xml_path)
|
39 |
-
root = tree.getroot()
|
40 |
-
size = root.find('size')
|
41 |
-
width = int(size.find('width').text)
|
42 |
-
height = int(size.find('height').text)
|
43 |
-
folder = root.find('folder').text
|
44 |
-
data_infos.append(
|
45 |
-
dict(
|
46 |
-
id=img_id,
|
47 |
-
filename=osp.join(folder, filename),
|
48 |
-
width=width,
|
49 |
-
height=height))
|
50 |
-
|
51 |
-
return data_infos
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/dmnet/dmnet_r101-d8_512x1024_40k_cityscapes.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './dmnet_r50-d8_512x1024_40k_cityscapes.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/logits.py
DELETED
@@ -1,56 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
|
3 |
-
from modules import sampler_hijack, shared
|
4 |
-
from modules.logging_colors import logger
|
5 |
-
from modules.text_generation import generate_reply
|
6 |
-
|
7 |
-
global_scores = None
|
8 |
-
|
9 |
-
|
10 |
-
def get_next_logits(prompt, state, use_samplers, previous):
|
11 |
-
if shared.model is None:
|
12 |
-
logger.error("No model is loaded! Select one in the Model tab.")
|
13 |
-
return 'Error: No model is loaded1 Select one in the Model tab.', previous
|
14 |
-
|
15 |
-
is_non_hf_exllamav2 = shared.model.__class__.__name__ == 'Exllamav2Model'
|
16 |
-
is_non_hf_exllamav1 = shared.model.__class__.__name__ == 'ExllamaModel'
|
17 |
-
is_non_hf_llamacpp = shared.model.__class__.__name__ == 'LlamaCppModel'
|
18 |
-
|
19 |
-
if use_samplers:
|
20 |
-
if any([is_non_hf_exllamav2, is_non_hf_exllamav1, is_non_hf_llamacpp]):
|
21 |
-
logger.error("Sampler hijacking is not supported non-Huggingface loaders.")
|
22 |
-
# sampling is all done in c for exllama, so it is really hard to hijack
|
23 |
-
# it should be possible to hijack llamacpp sampler by hijacking all their sampling methods,
|
24 |
-
# but it is not implemented yet
|
25 |
-
return 'Error: Sampler hijacking is not supported non-Huggingface loaders. Please disable the "Use samplers" option.', previous
|
26 |
-
|
27 |
-
state['max_new_tokens'] = 1
|
28 |
-
state['auto_max_new_tokens'] = False
|
29 |
-
for _ in generate_reply(prompt, state):
|
30 |
-
pass
|
31 |
-
|
32 |
-
scores = sampler_hijack.global_scores[-1]
|
33 |
-
else:
|
34 |
-
if is_non_hf_exllamav2 or is_non_hf_exllamav1:
|
35 |
-
tokens = shared.tokenizer.encode(prompt).cuda()
|
36 |
-
scores = shared.model.get_logits(tokens)[-1][-1]
|
37 |
-
elif is_non_hf_llamacpp:
|
38 |
-
tokens = shared.tokenizer.encode(prompt)
|
39 |
-
scores = shared.model.get_logits(tokens)[-1][-1]
|
40 |
-
else:
|
41 |
-
tokens = shared.tokenizer.encode(prompt, return_tensors='pt').cuda()
|
42 |
-
output = shared.model(input_ids=tokens)
|
43 |
-
scores = output['logits'][-1][-1]
|
44 |
-
|
45 |
-
probs = torch.softmax(scores, dim=-1, dtype=torch.float)
|
46 |
-
topk_values, topk_indices = torch.topk(probs, k=50, largest=True, sorted=True)
|
47 |
-
topk_values = [f"{float(i):.5f}" for i in topk_values]
|
48 |
-
if is_non_hf_exllamav1 or is_non_hf_llamacpp:
|
49 |
-
topk_indices = [i.expand((1, 1)) for i in topk_indices]
|
50 |
-
|
51 |
-
tokens = [shared.tokenizer.decode(i) for i in topk_indices]
|
52 |
-
output = ''
|
53 |
-
for row in list(zip(topk_values, tokens)):
|
54 |
-
output += f"{row[0]} - {repr(row[1])}\n"
|
55 |
-
|
56 |
-
return output, previous
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/CLIP/clip/simple_tokenizer.py
DELETED
@@ -1,132 +0,0 @@
|
|
1 |
-
import gzip
|
2 |
-
import html
|
3 |
-
import os
|
4 |
-
from functools import lru_cache
|
5 |
-
|
6 |
-
import ftfy
|
7 |
-
import regex as re
|
8 |
-
|
9 |
-
|
10 |
-
@lru_cache()
|
11 |
-
def default_bpe():
|
12 |
-
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
|
13 |
-
|
14 |
-
|
15 |
-
@lru_cache()
|
16 |
-
def bytes_to_unicode():
|
17 |
-
"""
|
18 |
-
Returns list of utf-8 byte and a corresponding list of unicode strings.
|
19 |
-
The reversible bpe codes work on unicode strings.
|
20 |
-
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
|
21 |
-
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
|
22 |
-
This is a signficant percentage of your normal, say, 32K bpe vocab.
|
23 |
-
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
|
24 |
-
And avoids mapping to whitespace/control characters the bpe code barfs on.
|
25 |
-
"""
|
26 |
-
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
|
27 |
-
cs = bs[:]
|
28 |
-
n = 0
|
29 |
-
for b in range(2**8):
|
30 |
-
if b not in bs:
|
31 |
-
bs.append(b)
|
32 |
-
cs.append(2**8+n)
|
33 |
-
n += 1
|
34 |
-
cs = [chr(n) for n in cs]
|
35 |
-
return dict(zip(bs, cs))
|
36 |
-
|
37 |
-
|
38 |
-
def get_pairs(word):
|
39 |
-
"""Return set of symbol pairs in a word.
|
40 |
-
Word is represented as tuple of symbols (symbols being variable-length strings).
|
41 |
-
"""
|
42 |
-
pairs = set()
|
43 |
-
prev_char = word[0]
|
44 |
-
for char in word[1:]:
|
45 |
-
pairs.add((prev_char, char))
|
46 |
-
prev_char = char
|
47 |
-
return pairs
|
48 |
-
|
49 |
-
|
50 |
-
def basic_clean(text):
|
51 |
-
text = ftfy.fix_text(text)
|
52 |
-
text = html.unescape(html.unescape(text))
|
53 |
-
return text.strip()
|
54 |
-
|
55 |
-
|
56 |
-
def whitespace_clean(text):
|
57 |
-
text = re.sub(r'\s+', ' ', text)
|
58 |
-
text = text.strip()
|
59 |
-
return text
|
60 |
-
|
61 |
-
|
62 |
-
class SimpleTokenizer(object):
|
63 |
-
def __init__(self, bpe_path: str = default_bpe()):
|
64 |
-
self.byte_encoder = bytes_to_unicode()
|
65 |
-
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
|
66 |
-
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
|
67 |
-
merges = merges[1:49152-256-2+1]
|
68 |
-
merges = [tuple(merge.split()) for merge in merges]
|
69 |
-
vocab = list(bytes_to_unicode().values())
|
70 |
-
vocab = vocab + [v+'</w>' for v in vocab]
|
71 |
-
for merge in merges:
|
72 |
-
vocab.append(''.join(merge))
|
73 |
-
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
|
74 |
-
self.encoder = dict(zip(vocab, range(len(vocab))))
|
75 |
-
self.decoder = {v: k for k, v in self.encoder.items()}
|
76 |
-
self.bpe_ranks = dict(zip(merges, range(len(merges))))
|
77 |
-
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
|
78 |
-
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
|
79 |
-
|
80 |
-
def bpe(self, token):
|
81 |
-
if token in self.cache:
|
82 |
-
return self.cache[token]
|
83 |
-
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
|
84 |
-
pairs = get_pairs(word)
|
85 |
-
|
86 |
-
if not pairs:
|
87 |
-
return token+'</w>'
|
88 |
-
|
89 |
-
while True:
|
90 |
-
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
|
91 |
-
if bigram not in self.bpe_ranks:
|
92 |
-
break
|
93 |
-
first, second = bigram
|
94 |
-
new_word = []
|
95 |
-
i = 0
|
96 |
-
while i < len(word):
|
97 |
-
try:
|
98 |
-
j = word.index(first, i)
|
99 |
-
new_word.extend(word[i:j])
|
100 |
-
i = j
|
101 |
-
except:
|
102 |
-
new_word.extend(word[i:])
|
103 |
-
break
|
104 |
-
|
105 |
-
if word[i] == first and i < len(word)-1 and word[i+1] == second:
|
106 |
-
new_word.append(first+second)
|
107 |
-
i += 2
|
108 |
-
else:
|
109 |
-
new_word.append(word[i])
|
110 |
-
i += 1
|
111 |
-
new_word = tuple(new_word)
|
112 |
-
word = new_word
|
113 |
-
if len(word) == 1:
|
114 |
-
break
|
115 |
-
else:
|
116 |
-
pairs = get_pairs(word)
|
117 |
-
word = ' '.join(word)
|
118 |
-
self.cache[token] = word
|
119 |
-
return word
|
120 |
-
|
121 |
-
def encode(self, text):
|
122 |
-
bpe_tokens = []
|
123 |
-
text = whitespace_clean(basic_clean(text)).lower()
|
124 |
-
for token in re.findall(self.pat, text):
|
125 |
-
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
|
126 |
-
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
|
127 |
-
return bpe_tokens
|
128 |
-
|
129 |
-
def decode(self, tokens):
|
130 |
-
text = ''.join([self.decoder[token] for token in tokens])
|
131 |
-
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
|
132 |
-
return text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/evaluations/fid_score.py
DELETED
@@ -1,246 +0,0 @@
|
|
1 |
-
"""Calculates the Frechet Inception Distance (FID) to evalulate GANs
|
2 |
-
The FID metric calculates the distance between two distributions of examples.
|
3 |
-
Typically, we have summary statistics (mean & covariance matrix) of one
|
4 |
-
of these distributions, while the 2nd distribution is given by a GAN.
|
5 |
-
When run as a stand-alone program, it compares the distribution of
|
6 |
-
examples that are stored as PNG/JPEG at a specified location with a
|
7 |
-
distribution given by summary statistics (in pickle format).
|
8 |
-
The FID is calculated by assuming that X_1 and X_2 are the activations of
|
9 |
-
the pool_3 layer of the inception net for generated samples and real world
|
10 |
-
samples respectively.
|
11 |
-
See --help to see further details.
|
12 |
-
Code apapted from https://github.com/bioinf-jku/TTUR to use PyTorch instead
|
13 |
-
of Tensorflow
|
14 |
-
Copyright 2018 Institute of Bioinformatics, JKU Linz
|
15 |
-
Licensed under the Apache License, Version 2.0 (the "License");
|
16 |
-
you may not use this file except in compliance with the License.
|
17 |
-
You may obtain a copy of the License at
|
18 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
19 |
-
Unless required by applicable law or agreed to in writing, software
|
20 |
-
distributed under the License is distributed on an "AS IS" BASIS,
|
21 |
-
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
22 |
-
See the License for the specific language governing permissions and
|
23 |
-
limitations under the License.
|
24 |
-
"""
|
25 |
-
import os
|
26 |
-
import pathlib
|
27 |
-
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
|
28 |
-
|
29 |
-
import numpy as np
|
30 |
-
import torch
|
31 |
-
from scipy import linalg
|
32 |
-
from torch.nn.functional import adaptive_avg_pool2d
|
33 |
-
|
34 |
-
from PIL import Image
|
35 |
-
from evaluations.inception import InceptionV3
|
36 |
-
from dataloader.image_folder import make_dataset
|
37 |
-
|
38 |
-
try:
|
39 |
-
from tqdm import tqdm
|
40 |
-
except ImportError:
|
41 |
-
# If not tqdm is not available, provide a mock version of it
|
42 |
-
def tqdm(x): return x
|
43 |
-
|
44 |
-
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
|
45 |
-
parser.add_argument('--batch-size', type=int, default=50,
|
46 |
-
help='Batch size to use')
|
47 |
-
parser.add_argument('--dims', type=int, default=2048,
|
48 |
-
choices=list(InceptionV3.BLOCK_INDEX_BY_DIM),
|
49 |
-
help=('Dimensionality of Inception features to use. '
|
50 |
-
'By default, uses pool3 features'))
|
51 |
-
parser.add_argument('-c', '--gpu', default='', type=str,
|
52 |
-
help='GPU to use (leave blank for CPU only)')
|
53 |
-
parser.add_argument('path', type=str, nargs=2,
|
54 |
-
help=('Paths to the generated examples or '
|
55 |
-
'to .npz statistic files'))
|
56 |
-
|
57 |
-
|
58 |
-
def imread(filename):
|
59 |
-
"""
|
60 |
-
Loads an image file into a (height, width, 3) uint8 ndarray. .resize((229, 229), Image.BILINEAR)
|
61 |
-
"""
|
62 |
-
return np.asarray(Image.open(filename).convert('RGB').resize((229, 229), Image.BILINEAR), dtype=np.uint8)[..., :3]
|
63 |
-
|
64 |
-
|
65 |
-
def get_activations(files, model, batch_size=50, dims=2048, cuda=False):
|
66 |
-
"""Calculates the activations of the pool_3 layer for all examples.
|
67 |
-
Params:
|
68 |
-
-- files : List of image files paths
|
69 |
-
-- model : Instance of inception model
|
70 |
-
-- batch_size : Batch size of examples for the model to process at once.
|
71 |
-
Make sure that the number of samples is a multiple of
|
72 |
-
the batch size, otherwise some samples are ignored. This
|
73 |
-
behavior is retained to match the original FID score
|
74 |
-
implementation.
|
75 |
-
-- dims : Dimensionality of features returned by Inception
|
76 |
-
-- cuda : If set to True, use GPU
|
77 |
-
Returns:
|
78 |
-
-- A numpy array of dimension (num examples, dims) that contains the
|
79 |
-
activations of the given tensor when feeding inception with the
|
80 |
-
query tensor.
|
81 |
-
"""
|
82 |
-
model.eval()
|
83 |
-
|
84 |
-
if batch_size > len(files):
|
85 |
-
print(('Warning: batch size is bigger than the data size. '
|
86 |
-
'Setting batch size to data size'))
|
87 |
-
batch_size = len(files)
|
88 |
-
|
89 |
-
pred_arr = np.empty((len(files), dims))
|
90 |
-
|
91 |
-
for i in tqdm(range(0, len(files), batch_size)):
|
92 |
-
start = i
|
93 |
-
end = i + batch_size
|
94 |
-
|
95 |
-
images = np.array([imread(str(f)).astype(np.float32)
|
96 |
-
for f in files[start:end]])
|
97 |
-
|
98 |
-
# Reshape to (n_images, 3, height, width)
|
99 |
-
images = images.transpose((0, 3, 1, 2))
|
100 |
-
images /= 255
|
101 |
-
|
102 |
-
batch = torch.from_numpy(images).type(torch.FloatTensor)
|
103 |
-
if cuda:
|
104 |
-
batch = batch.cuda()
|
105 |
-
|
106 |
-
pred = model(batch)[0]
|
107 |
-
|
108 |
-
# If model output is not scalar, apply global spatial average pooling.
|
109 |
-
# This happens if you choose a dimensionality not equal 2048.
|
110 |
-
if pred.size(2) != 1 or pred.size(3) != 1:
|
111 |
-
pred = adaptive_avg_pool2d(pred, output_size=(1, 1))
|
112 |
-
|
113 |
-
pred_arr[start:end] = pred.cpu().data.numpy().reshape(pred.size(0), -1)
|
114 |
-
|
115 |
-
return pred_arr
|
116 |
-
|
117 |
-
|
118 |
-
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
|
119 |
-
"""Numpy implementation of the Frechet Distance.
|
120 |
-
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
|
121 |
-
and X_2 ~ N(mu_2, C_2) is
|
122 |
-
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
|
123 |
-
Stable version by Dougal J. Sutherland.
|
124 |
-
Params:
|
125 |
-
-- mu1 : Numpy array containing the activations of a layer of the
|
126 |
-
inception net (like returned by the function 'get_predictions')
|
127 |
-
for generated samples.
|
128 |
-
-- mu2 : The sample mean over activations, precalculated on an
|
129 |
-
representative data set.
|
130 |
-
-- sigma1: The covariance matrix over activations for generated samples.
|
131 |
-
-- sigma2: The covariance matrix over activations, precalculated on an
|
132 |
-
representative data set.
|
133 |
-
Returns:
|
134 |
-
-- : The Frechet Distance.
|
135 |
-
"""
|
136 |
-
|
137 |
-
mu1 = np.atleast_1d(mu1)
|
138 |
-
mu2 = np.atleast_1d(mu2)
|
139 |
-
|
140 |
-
sigma1 = np.atleast_2d(sigma1)
|
141 |
-
sigma2 = np.atleast_2d(sigma2)
|
142 |
-
|
143 |
-
assert mu1.shape == mu2.shape, \
|
144 |
-
'Training and test mean vectors have different lengths'
|
145 |
-
assert sigma1.shape == sigma2.shape, \
|
146 |
-
'Training and test covariances have different dimensions'
|
147 |
-
|
148 |
-
diff = mu1 - mu2
|
149 |
-
|
150 |
-
# Product might be almost singular
|
151 |
-
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
|
152 |
-
if not np.isfinite(covmean).all():
|
153 |
-
msg = ('fid calculation produces singular product; '
|
154 |
-
'adding %s to diagonal of cov estimates') % eps
|
155 |
-
print(msg)
|
156 |
-
offset = np.eye(sigma1.shape[0]) * eps
|
157 |
-
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
|
158 |
-
|
159 |
-
# Numerical error might give slight imaginary component
|
160 |
-
if np.iscomplexobj(covmean):
|
161 |
-
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
|
162 |
-
m = np.max(np.abs(covmean.imag))
|
163 |
-
raise ValueError('Imaginary component {}'.format(m))
|
164 |
-
covmean = covmean.real
|
165 |
-
|
166 |
-
tr_covmean = np.trace(covmean)
|
167 |
-
|
168 |
-
return (diff.dot(diff) + np.trace(sigma1) +
|
169 |
-
np.trace(sigma2) - 2 * tr_covmean)
|
170 |
-
|
171 |
-
|
172 |
-
def calculate_activation_statistics(files, model, batch_size=50, dims=2048,
|
173 |
-
cuda=False):
|
174 |
-
"""Calculation of the statistics used by the FID.
|
175 |
-
Params:
|
176 |
-
-- files : List of image files paths
|
177 |
-
-- model : Instance of inception model
|
178 |
-
-- batch_size : The examples numpy array is split into batches with
|
179 |
-
batch size batch_size. A reasonable batch size
|
180 |
-
depends on the hardware.
|
181 |
-
-- dims : Dimensionality of features returned by Inception
|
182 |
-
-- cuda : If set to True, use GPU
|
183 |
-
Returns:
|
184 |
-
-- mu : The mean over samples of the activations of the pool_3 layer of
|
185 |
-
the inception model.
|
186 |
-
-- sigma : The covariance matrix of the activations of the pool_3 layer of
|
187 |
-
the inception model.
|
188 |
-
"""
|
189 |
-
act = get_activations(files, model, batch_size, dims, cuda)
|
190 |
-
mu = np.mean(act, axis=0)
|
191 |
-
sigma = np.cov(act, rowvar=False)
|
192 |
-
return mu, sigma
|
193 |
-
|
194 |
-
|
195 |
-
def _compute_statistics_of_path(path, model, batch_size, dims, cuda):
|
196 |
-
if path.endswith('.npz'):
|
197 |
-
f = np.load(path)
|
198 |
-
m, s = f['mu'][:], f['sigma'][:]
|
199 |
-
f.close()
|
200 |
-
elif path.endswith('.txt'):
|
201 |
-
files, file_size = make_dataset(path)
|
202 |
-
m, s = calculate_activation_statistics(files, model, batch_size,
|
203 |
-
dims, cuda)
|
204 |
-
else:
|
205 |
-
path = pathlib.Path(path)
|
206 |
-
files = list(path.glob('*.jpg')) + list(path.glob('*.png'))
|
207 |
-
m, s = calculate_activation_statistics(files, model, batch_size,
|
208 |
-
dims, cuda)
|
209 |
-
|
210 |
-
return m, s
|
211 |
-
|
212 |
-
|
213 |
-
def calculate_fid_given_paths(paths, batch_size, cuda, dims):
|
214 |
-
"""Calculates the FID of two paths"""
|
215 |
-
for p in paths:
|
216 |
-
if not os.path.exists(p):
|
217 |
-
raise RuntimeError('Invalid path: %s' % p)
|
218 |
-
|
219 |
-
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
|
220 |
-
|
221 |
-
model = InceptionV3([block_idx])
|
222 |
-
if cuda:
|
223 |
-
model.cuda()
|
224 |
-
|
225 |
-
m1, s1 = _compute_statistics_of_path(paths[0], model, batch_size,
|
226 |
-
dims, cuda)
|
227 |
-
m2, s2 = _compute_statistics_of_path(paths[1], model, batch_size,
|
228 |
-
dims, cuda)
|
229 |
-
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
|
230 |
-
|
231 |
-
return fid_value
|
232 |
-
|
233 |
-
|
234 |
-
def main():
|
235 |
-
args = parser.parse_args()
|
236 |
-
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
|
237 |
-
|
238 |
-
fid_value = calculate_fid_given_paths(args.path,
|
239 |
-
args.batch_size,
|
240 |
-
args.gpu != '',
|
241 |
-
args.dims)
|
242 |
-
print('FID: ', fid_value)
|
243 |
-
|
244 |
-
|
245 |
-
if __name__ == '__main__':
|
246 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/resolution/legacy/__init__.py
DELETED
File without changes
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/colorama/tests/winterm_test.py
DELETED
@@ -1,131 +0,0 @@
|
|
1 |
-
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
|
2 |
-
import sys
|
3 |
-
from unittest import TestCase, main, skipUnless
|
4 |
-
|
5 |
-
try:
|
6 |
-
from unittest.mock import Mock, patch
|
7 |
-
except ImportError:
|
8 |
-
from mock import Mock, patch
|
9 |
-
|
10 |
-
from ..winterm import WinColor, WinStyle, WinTerm
|
11 |
-
|
12 |
-
|
13 |
-
class WinTermTest(TestCase):
|
14 |
-
|
15 |
-
@patch('colorama.winterm.win32')
|
16 |
-
def testInit(self, mockWin32):
|
17 |
-
mockAttr = Mock()
|
18 |
-
mockAttr.wAttributes = 7 + 6 * 16 + 8
|
19 |
-
mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr
|
20 |
-
term = WinTerm()
|
21 |
-
self.assertEqual(term._fore, 7)
|
22 |
-
self.assertEqual(term._back, 6)
|
23 |
-
self.assertEqual(term._style, 8)
|
24 |
-
|
25 |
-
@skipUnless(sys.platform.startswith("win"), "requires Windows")
|
26 |
-
def testGetAttrs(self):
|
27 |
-
term = WinTerm()
|
28 |
-
|
29 |
-
term._fore = 0
|
30 |
-
term._back = 0
|
31 |
-
term._style = 0
|
32 |
-
self.assertEqual(term.get_attrs(), 0)
|
33 |
-
|
34 |
-
term._fore = WinColor.YELLOW
|
35 |
-
self.assertEqual(term.get_attrs(), WinColor.YELLOW)
|
36 |
-
|
37 |
-
term._back = WinColor.MAGENTA
|
38 |
-
self.assertEqual(
|
39 |
-
term.get_attrs(),
|
40 |
-
WinColor.YELLOW + WinColor.MAGENTA * 16)
|
41 |
-
|
42 |
-
term._style = WinStyle.BRIGHT
|
43 |
-
self.assertEqual(
|
44 |
-
term.get_attrs(),
|
45 |
-
WinColor.YELLOW + WinColor.MAGENTA * 16 + WinStyle.BRIGHT)
|
46 |
-
|
47 |
-
@patch('colorama.winterm.win32')
|
48 |
-
def testResetAll(self, mockWin32):
|
49 |
-
mockAttr = Mock()
|
50 |
-
mockAttr.wAttributes = 1 + 2 * 16 + 8
|
51 |
-
mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr
|
52 |
-
term = WinTerm()
|
53 |
-
|
54 |
-
term.set_console = Mock()
|
55 |
-
term._fore = -1
|
56 |
-
term._back = -1
|
57 |
-
term._style = -1
|
58 |
-
|
59 |
-
term.reset_all()
|
60 |
-
|
61 |
-
self.assertEqual(term._fore, 1)
|
62 |
-
self.assertEqual(term._back, 2)
|
63 |
-
self.assertEqual(term._style, 8)
|
64 |
-
self.assertEqual(term.set_console.called, True)
|
65 |
-
|
66 |
-
@skipUnless(sys.platform.startswith("win"), "requires Windows")
|
67 |
-
def testFore(self):
|
68 |
-
term = WinTerm()
|
69 |
-
term.set_console = Mock()
|
70 |
-
term._fore = 0
|
71 |
-
|
72 |
-
term.fore(5)
|
73 |
-
|
74 |
-
self.assertEqual(term._fore, 5)
|
75 |
-
self.assertEqual(term.set_console.called, True)
|
76 |
-
|
77 |
-
@skipUnless(sys.platform.startswith("win"), "requires Windows")
|
78 |
-
def testBack(self):
|
79 |
-
term = WinTerm()
|
80 |
-
term.set_console = Mock()
|
81 |
-
term._back = 0
|
82 |
-
|
83 |
-
term.back(5)
|
84 |
-
|
85 |
-
self.assertEqual(term._back, 5)
|
86 |
-
self.assertEqual(term.set_console.called, True)
|
87 |
-
|
88 |
-
@skipUnless(sys.platform.startswith("win"), "requires Windows")
|
89 |
-
def testStyle(self):
|
90 |
-
term = WinTerm()
|
91 |
-
term.set_console = Mock()
|
92 |
-
term._style = 0
|
93 |
-
|
94 |
-
term.style(22)
|
95 |
-
|
96 |
-
self.assertEqual(term._style, 22)
|
97 |
-
self.assertEqual(term.set_console.called, True)
|
98 |
-
|
99 |
-
@patch('colorama.winterm.win32')
|
100 |
-
def testSetConsole(self, mockWin32):
|
101 |
-
mockAttr = Mock()
|
102 |
-
mockAttr.wAttributes = 0
|
103 |
-
mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr
|
104 |
-
term = WinTerm()
|
105 |
-
term.windll = Mock()
|
106 |
-
|
107 |
-
term.set_console()
|
108 |
-
|
109 |
-
self.assertEqual(
|
110 |
-
mockWin32.SetConsoleTextAttribute.call_args,
|
111 |
-
((mockWin32.STDOUT, term.get_attrs()), {})
|
112 |
-
)
|
113 |
-
|
114 |
-
@patch('colorama.winterm.win32')
|
115 |
-
def testSetConsoleOnStderr(self, mockWin32):
|
116 |
-
mockAttr = Mock()
|
117 |
-
mockAttr.wAttributes = 0
|
118 |
-
mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr
|
119 |
-
term = WinTerm()
|
120 |
-
term.windll = Mock()
|
121 |
-
|
122 |
-
term.set_console(on_stderr=True)
|
123 |
-
|
124 |
-
self.assertEqual(
|
125 |
-
mockWin32.SetConsoleTextAttribute.call_args,
|
126 |
-
((mockWin32.STDERR, term.get_attrs()), {})
|
127 |
-
)
|
128 |
-
|
129 |
-
|
130 |
-
if __name__ == '__main__':
|
131 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyproject_hooks/_impl.py
DELETED
@@ -1,330 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import os
|
3 |
-
import sys
|
4 |
-
import tempfile
|
5 |
-
from contextlib import contextmanager
|
6 |
-
from os.path import abspath
|
7 |
-
from os.path import join as pjoin
|
8 |
-
from subprocess import STDOUT, check_call, check_output
|
9 |
-
|
10 |
-
from ._in_process import _in_proc_script_path
|
11 |
-
|
12 |
-
|
13 |
-
def write_json(obj, path, **kwargs):
|
14 |
-
with open(path, 'w', encoding='utf-8') as f:
|
15 |
-
json.dump(obj, f, **kwargs)
|
16 |
-
|
17 |
-
|
18 |
-
def read_json(path):
|
19 |
-
with open(path, encoding='utf-8') as f:
|
20 |
-
return json.load(f)
|
21 |
-
|
22 |
-
|
23 |
-
class BackendUnavailable(Exception):
|
24 |
-
"""Will be raised if the backend cannot be imported in the hook process."""
|
25 |
-
def __init__(self, traceback):
|
26 |
-
self.traceback = traceback
|
27 |
-
|
28 |
-
|
29 |
-
class BackendInvalid(Exception):
|
30 |
-
"""Will be raised if the backend is invalid."""
|
31 |
-
def __init__(self, backend_name, backend_path, message):
|
32 |
-
super().__init__(message)
|
33 |
-
self.backend_name = backend_name
|
34 |
-
self.backend_path = backend_path
|
35 |
-
|
36 |
-
|
37 |
-
class HookMissing(Exception):
|
38 |
-
"""Will be raised on missing hooks (if a fallback can't be used)."""
|
39 |
-
def __init__(self, hook_name):
|
40 |
-
super().__init__(hook_name)
|
41 |
-
self.hook_name = hook_name
|
42 |
-
|
43 |
-
|
44 |
-
class UnsupportedOperation(Exception):
|
45 |
-
"""May be raised by build_sdist if the backend indicates that it can't."""
|
46 |
-
def __init__(self, traceback):
|
47 |
-
self.traceback = traceback
|
48 |
-
|
49 |
-
|
50 |
-
def default_subprocess_runner(cmd, cwd=None, extra_environ=None):
|
51 |
-
"""The default method of calling the wrapper subprocess.
|
52 |
-
|
53 |
-
This uses :func:`subprocess.check_call` under the hood.
|
54 |
-
"""
|
55 |
-
env = os.environ.copy()
|
56 |
-
if extra_environ:
|
57 |
-
env.update(extra_environ)
|
58 |
-
|
59 |
-
check_call(cmd, cwd=cwd, env=env)
|
60 |
-
|
61 |
-
|
62 |
-
def quiet_subprocess_runner(cmd, cwd=None, extra_environ=None):
|
63 |
-
"""Call the subprocess while suppressing output.
|
64 |
-
|
65 |
-
This uses :func:`subprocess.check_output` under the hood.
|
66 |
-
"""
|
67 |
-
env = os.environ.copy()
|
68 |
-
if extra_environ:
|
69 |
-
env.update(extra_environ)
|
70 |
-
|
71 |
-
check_output(cmd, cwd=cwd, env=env, stderr=STDOUT)
|
72 |
-
|
73 |
-
|
74 |
-
def norm_and_check(source_tree, requested):
|
75 |
-
"""Normalise and check a backend path.
|
76 |
-
|
77 |
-
Ensure that the requested backend path is specified as a relative path,
|
78 |
-
and resolves to a location under the given source tree.
|
79 |
-
|
80 |
-
Return an absolute version of the requested path.
|
81 |
-
"""
|
82 |
-
if os.path.isabs(requested):
|
83 |
-
raise ValueError("paths must be relative")
|
84 |
-
|
85 |
-
abs_source = os.path.abspath(source_tree)
|
86 |
-
abs_requested = os.path.normpath(os.path.join(abs_source, requested))
|
87 |
-
# We have to use commonprefix for Python 2.7 compatibility. So we
|
88 |
-
# normalise case to avoid problems because commonprefix is a character
|
89 |
-
# based comparison :-(
|
90 |
-
norm_source = os.path.normcase(abs_source)
|
91 |
-
norm_requested = os.path.normcase(abs_requested)
|
92 |
-
if os.path.commonprefix([norm_source, norm_requested]) != norm_source:
|
93 |
-
raise ValueError("paths must be inside source tree")
|
94 |
-
|
95 |
-
return abs_requested
|
96 |
-
|
97 |
-
|
98 |
-
class BuildBackendHookCaller:
|
99 |
-
"""A wrapper to call the build backend hooks for a source directory.
|
100 |
-
"""
|
101 |
-
|
102 |
-
def __init__(
|
103 |
-
self,
|
104 |
-
source_dir,
|
105 |
-
build_backend,
|
106 |
-
backend_path=None,
|
107 |
-
runner=None,
|
108 |
-
python_executable=None,
|
109 |
-
):
|
110 |
-
"""
|
111 |
-
:param source_dir: The source directory to invoke the build backend for
|
112 |
-
:param build_backend: The build backend spec
|
113 |
-
:param backend_path: Additional path entries for the build backend spec
|
114 |
-
:param runner: The :ref:`subprocess runner <Subprocess Runners>` to use
|
115 |
-
:param python_executable:
|
116 |
-
The Python executable used to invoke the build backend
|
117 |
-
"""
|
118 |
-
if runner is None:
|
119 |
-
runner = default_subprocess_runner
|
120 |
-
|
121 |
-
self.source_dir = abspath(source_dir)
|
122 |
-
self.build_backend = build_backend
|
123 |
-
if backend_path:
|
124 |
-
backend_path = [
|
125 |
-
norm_and_check(self.source_dir, p) for p in backend_path
|
126 |
-
]
|
127 |
-
self.backend_path = backend_path
|
128 |
-
self._subprocess_runner = runner
|
129 |
-
if not python_executable:
|
130 |
-
python_executable = sys.executable
|
131 |
-
self.python_executable = python_executable
|
132 |
-
|
133 |
-
@contextmanager
|
134 |
-
def subprocess_runner(self, runner):
|
135 |
-
"""A context manager for temporarily overriding the default
|
136 |
-
:ref:`subprocess runner <Subprocess Runners>`.
|
137 |
-
|
138 |
-
.. code-block:: python
|
139 |
-
|
140 |
-
hook_caller = BuildBackendHookCaller(...)
|
141 |
-
with hook_caller.subprocess_runner(quiet_subprocess_runner):
|
142 |
-
...
|
143 |
-
"""
|
144 |
-
prev = self._subprocess_runner
|
145 |
-
self._subprocess_runner = runner
|
146 |
-
try:
|
147 |
-
yield
|
148 |
-
finally:
|
149 |
-
self._subprocess_runner = prev
|
150 |
-
|
151 |
-
def _supported_features(self):
|
152 |
-
"""Return the list of optional features supported by the backend."""
|
153 |
-
return self._call_hook('_supported_features', {})
|
154 |
-
|
155 |
-
def get_requires_for_build_wheel(self, config_settings=None):
|
156 |
-
"""Get additional dependencies required for building a wheel.
|
157 |
-
|
158 |
-
:returns: A list of :pep:`dependency specifiers <508>`.
|
159 |
-
:rtype: list[str]
|
160 |
-
|
161 |
-
.. admonition:: Fallback
|
162 |
-
|
163 |
-
If the build backend does not defined a hook with this name, an
|
164 |
-
empty list will be returned.
|
165 |
-
"""
|
166 |
-
return self._call_hook('get_requires_for_build_wheel', {
|
167 |
-
'config_settings': config_settings
|
168 |
-
})
|
169 |
-
|
170 |
-
def prepare_metadata_for_build_wheel(
|
171 |
-
self, metadata_directory, config_settings=None,
|
172 |
-
_allow_fallback=True):
|
173 |
-
"""Prepare a ``*.dist-info`` folder with metadata for this project.
|
174 |
-
|
175 |
-
:returns: Name of the newly created subfolder within
|
176 |
-
``metadata_directory``, containing the metadata.
|
177 |
-
:rtype: str
|
178 |
-
|
179 |
-
.. admonition:: Fallback
|
180 |
-
|
181 |
-
If the build backend does not define a hook with this name and
|
182 |
-
``_allow_fallback`` is truthy, the backend will be asked to build a
|
183 |
-
wheel via the ``build_wheel`` hook and the dist-info extracted from
|
184 |
-
that will be returned.
|
185 |
-
"""
|
186 |
-
return self._call_hook('prepare_metadata_for_build_wheel', {
|
187 |
-
'metadata_directory': abspath(metadata_directory),
|
188 |
-
'config_settings': config_settings,
|
189 |
-
'_allow_fallback': _allow_fallback,
|
190 |
-
})
|
191 |
-
|
192 |
-
def build_wheel(
|
193 |
-
self, wheel_directory, config_settings=None,
|
194 |
-
metadata_directory=None):
|
195 |
-
"""Build a wheel from this project.
|
196 |
-
|
197 |
-
:returns:
|
198 |
-
The name of the newly created wheel within ``wheel_directory``.
|
199 |
-
|
200 |
-
.. admonition:: Interaction with fallback
|
201 |
-
|
202 |
-
If the ``build_wheel`` hook was called in the fallback for
|
203 |
-
:meth:`prepare_metadata_for_build_wheel`, the build backend would
|
204 |
-
not be invoked. Instead, the previously built wheel will be copied
|
205 |
-
to ``wheel_directory`` and the name of that file will be returned.
|
206 |
-
"""
|
207 |
-
if metadata_directory is not None:
|
208 |
-
metadata_directory = abspath(metadata_directory)
|
209 |
-
return self._call_hook('build_wheel', {
|
210 |
-
'wheel_directory': abspath(wheel_directory),
|
211 |
-
'config_settings': config_settings,
|
212 |
-
'metadata_directory': metadata_directory,
|
213 |
-
})
|
214 |
-
|
215 |
-
def get_requires_for_build_editable(self, config_settings=None):
|
216 |
-
"""Get additional dependencies required for building an editable wheel.
|
217 |
-
|
218 |
-
:returns: A list of :pep:`dependency specifiers <508>`.
|
219 |
-
:rtype: list[str]
|
220 |
-
|
221 |
-
.. admonition:: Fallback
|
222 |
-
|
223 |
-
If the build backend does not defined a hook with this name, an
|
224 |
-
empty list will be returned.
|
225 |
-
"""
|
226 |
-
return self._call_hook('get_requires_for_build_editable', {
|
227 |
-
'config_settings': config_settings
|
228 |
-
})
|
229 |
-
|
230 |
-
def prepare_metadata_for_build_editable(
|
231 |
-
self, metadata_directory, config_settings=None,
|
232 |
-
_allow_fallback=True):
|
233 |
-
"""Prepare a ``*.dist-info`` folder with metadata for this project.
|
234 |
-
|
235 |
-
:returns: Name of the newly created subfolder within
|
236 |
-
``metadata_directory``, containing the metadata.
|
237 |
-
:rtype: str
|
238 |
-
|
239 |
-
.. admonition:: Fallback
|
240 |
-
|
241 |
-
If the build backend does not define a hook with this name and
|
242 |
-
``_allow_fallback`` is truthy, the backend will be asked to build a
|
243 |
-
wheel via the ``build_editable`` hook and the dist-info
|
244 |
-
extracted from that will be returned.
|
245 |
-
"""
|
246 |
-
return self._call_hook('prepare_metadata_for_build_editable', {
|
247 |
-
'metadata_directory': abspath(metadata_directory),
|
248 |
-
'config_settings': config_settings,
|
249 |
-
'_allow_fallback': _allow_fallback,
|
250 |
-
})
|
251 |
-
|
252 |
-
def build_editable(
|
253 |
-
self, wheel_directory, config_settings=None,
|
254 |
-
metadata_directory=None):
|
255 |
-
"""Build an editable wheel from this project.
|
256 |
-
|
257 |
-
:returns:
|
258 |
-
The name of the newly created wheel within ``wheel_directory``.
|
259 |
-
|
260 |
-
.. admonition:: Interaction with fallback
|
261 |
-
|
262 |
-
If the ``build_editable`` hook was called in the fallback for
|
263 |
-
:meth:`prepare_metadata_for_build_editable`, the build backend
|
264 |
-
would not be invoked. Instead, the previously built wheel will be
|
265 |
-
copied to ``wheel_directory`` and the name of that file will be
|
266 |
-
returned.
|
267 |
-
"""
|
268 |
-
if metadata_directory is not None:
|
269 |
-
metadata_directory = abspath(metadata_directory)
|
270 |
-
return self._call_hook('build_editable', {
|
271 |
-
'wheel_directory': abspath(wheel_directory),
|
272 |
-
'config_settings': config_settings,
|
273 |
-
'metadata_directory': metadata_directory,
|
274 |
-
})
|
275 |
-
|
276 |
-
def get_requires_for_build_sdist(self, config_settings=None):
|
277 |
-
"""Get additional dependencies required for building an sdist.
|
278 |
-
|
279 |
-
:returns: A list of :pep:`dependency specifiers <508>`.
|
280 |
-
:rtype: list[str]
|
281 |
-
"""
|
282 |
-
return self._call_hook('get_requires_for_build_sdist', {
|
283 |
-
'config_settings': config_settings
|
284 |
-
})
|
285 |
-
|
286 |
-
def build_sdist(self, sdist_directory, config_settings=None):
|
287 |
-
"""Build an sdist from this project.
|
288 |
-
|
289 |
-
:returns:
|
290 |
-
The name of the newly created sdist within ``wheel_directory``.
|
291 |
-
"""
|
292 |
-
return self._call_hook('build_sdist', {
|
293 |
-
'sdist_directory': abspath(sdist_directory),
|
294 |
-
'config_settings': config_settings,
|
295 |
-
})
|
296 |
-
|
297 |
-
def _call_hook(self, hook_name, kwargs):
|
298 |
-
extra_environ = {'PEP517_BUILD_BACKEND': self.build_backend}
|
299 |
-
|
300 |
-
if self.backend_path:
|
301 |
-
backend_path = os.pathsep.join(self.backend_path)
|
302 |
-
extra_environ['PEP517_BACKEND_PATH'] = backend_path
|
303 |
-
|
304 |
-
with tempfile.TemporaryDirectory() as td:
|
305 |
-
hook_input = {'kwargs': kwargs}
|
306 |
-
write_json(hook_input, pjoin(td, 'input.json'), indent=2)
|
307 |
-
|
308 |
-
# Run the hook in a subprocess
|
309 |
-
with _in_proc_script_path() as script:
|
310 |
-
python = self.python_executable
|
311 |
-
self._subprocess_runner(
|
312 |
-
[python, abspath(str(script)), hook_name, td],
|
313 |
-
cwd=self.source_dir,
|
314 |
-
extra_environ=extra_environ
|
315 |
-
)
|
316 |
-
|
317 |
-
data = read_json(pjoin(td, 'output.json'))
|
318 |
-
if data.get('unsupported'):
|
319 |
-
raise UnsupportedOperation(data.get('traceback', ''))
|
320 |
-
if data.get('no_backend'):
|
321 |
-
raise BackendUnavailable(data.get('traceback', ''))
|
322 |
-
if data.get('backend_invalid'):
|
323 |
-
raise BackendInvalid(
|
324 |
-
backend_name=self.build_backend,
|
325 |
-
backend_path=self.backend_path,
|
326 |
-
message=data.get('backend_error', '')
|
327 |
-
)
|
328 |
-
if data.get('hook_missing'):
|
329 |
-
raise HookMissing(data.get('missing_hook_name') or hook_name)
|
330 |
-
return data['return_val']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/command/bdist.py
DELETED
@@ -1,157 +0,0 @@
|
|
1 |
-
"""distutils.command.bdist
|
2 |
-
|
3 |
-
Implements the Distutils 'bdist' command (create a built [binary]
|
4 |
-
distribution)."""
|
5 |
-
|
6 |
-
import os
|
7 |
-
import warnings
|
8 |
-
|
9 |
-
from distutils.core import Command
|
10 |
-
from distutils.errors import DistutilsPlatformError, DistutilsOptionError
|
11 |
-
from distutils.util import get_platform
|
12 |
-
|
13 |
-
|
14 |
-
def show_formats():
|
15 |
-
"""Print list of available formats (arguments to "--format" option)."""
|
16 |
-
from distutils.fancy_getopt import FancyGetopt
|
17 |
-
|
18 |
-
formats = []
|
19 |
-
for format in bdist.format_commands:
|
20 |
-
formats.append(("formats=" + format, None, bdist.format_commands[format][1]))
|
21 |
-
pretty_printer = FancyGetopt(formats)
|
22 |
-
pretty_printer.print_help("List of available distribution formats:")
|
23 |
-
|
24 |
-
|
25 |
-
class ListCompat(dict):
|
26 |
-
# adapter to allow for Setuptools compatibility in format_commands
|
27 |
-
def append(self, item):
|
28 |
-
warnings.warn(
|
29 |
-
"""format_commands is now a dict. append is deprecated.""",
|
30 |
-
DeprecationWarning,
|
31 |
-
stacklevel=2,
|
32 |
-
)
|
33 |
-
|
34 |
-
|
35 |
-
class bdist(Command):
|
36 |
-
|
37 |
-
description = "create a built (binary) distribution"
|
38 |
-
|
39 |
-
user_options = [
|
40 |
-
('bdist-base=', 'b', "temporary directory for creating built distributions"),
|
41 |
-
(
|
42 |
-
'plat-name=',
|
43 |
-
'p',
|
44 |
-
"platform name to embed in generated filenames "
|
45 |
-
"(default: %s)" % get_platform(),
|
46 |
-
),
|
47 |
-
('formats=', None, "formats for distribution (comma-separated list)"),
|
48 |
-
(
|
49 |
-
'dist-dir=',
|
50 |
-
'd',
|
51 |
-
"directory to put final built distributions in " "[default: dist]",
|
52 |
-
),
|
53 |
-
('skip-build', None, "skip rebuilding everything (for testing/debugging)"),
|
54 |
-
(
|
55 |
-
'owner=',
|
56 |
-
'u',
|
57 |
-
"Owner name used when creating a tar file" " [default: current user]",
|
58 |
-
),
|
59 |
-
(
|
60 |
-
'group=',
|
61 |
-
'g',
|
62 |
-
"Group name used when creating a tar file" " [default: current group]",
|
63 |
-
),
|
64 |
-
]
|
65 |
-
|
66 |
-
boolean_options = ['skip-build']
|
67 |
-
|
68 |
-
help_options = [
|
69 |
-
('help-formats', None, "lists available distribution formats", show_formats),
|
70 |
-
]
|
71 |
-
|
72 |
-
# The following commands do not take a format option from bdist
|
73 |
-
no_format_option = ('bdist_rpm',)
|
74 |
-
|
75 |
-
# This won't do in reality: will need to distinguish RPM-ish Linux,
|
76 |
-
# Debian-ish Linux, Solaris, FreeBSD, ..., Windows, Mac OS.
|
77 |
-
default_format = {'posix': 'gztar', 'nt': 'zip'}
|
78 |
-
|
79 |
-
# Define commands in preferred order for the --help-formats option
|
80 |
-
format_commands = ListCompat(
|
81 |
-
{
|
82 |
-
'rpm': ('bdist_rpm', "RPM distribution"),
|
83 |
-
'gztar': ('bdist_dumb', "gzip'ed tar file"),
|
84 |
-
'bztar': ('bdist_dumb', "bzip2'ed tar file"),
|
85 |
-
'xztar': ('bdist_dumb', "xz'ed tar file"),
|
86 |
-
'ztar': ('bdist_dumb', "compressed tar file"),
|
87 |
-
'tar': ('bdist_dumb', "tar file"),
|
88 |
-
'zip': ('bdist_dumb', "ZIP file"),
|
89 |
-
}
|
90 |
-
)
|
91 |
-
|
92 |
-
# for compatibility until consumers only reference format_commands
|
93 |
-
format_command = format_commands
|
94 |
-
|
95 |
-
def initialize_options(self):
|
96 |
-
self.bdist_base = None
|
97 |
-
self.plat_name = None
|
98 |
-
self.formats = None
|
99 |
-
self.dist_dir = None
|
100 |
-
self.skip_build = 0
|
101 |
-
self.group = None
|
102 |
-
self.owner = None
|
103 |
-
|
104 |
-
def finalize_options(self):
|
105 |
-
# have to finalize 'plat_name' before 'bdist_base'
|
106 |
-
if self.plat_name is None:
|
107 |
-
if self.skip_build:
|
108 |
-
self.plat_name = get_platform()
|
109 |
-
else:
|
110 |
-
self.plat_name = self.get_finalized_command('build').plat_name
|
111 |
-
|
112 |
-
# 'bdist_base' -- parent of per-built-distribution-format
|
113 |
-
# temporary directories (eg. we'll probably have
|
114 |
-
# "build/bdist.<plat>/dumb", "build/bdist.<plat>/rpm", etc.)
|
115 |
-
if self.bdist_base is None:
|
116 |
-
build_base = self.get_finalized_command('build').build_base
|
117 |
-
self.bdist_base = os.path.join(build_base, 'bdist.' + self.plat_name)
|
118 |
-
|
119 |
-
self.ensure_string_list('formats')
|
120 |
-
if self.formats is None:
|
121 |
-
try:
|
122 |
-
self.formats = [self.default_format[os.name]]
|
123 |
-
except KeyError:
|
124 |
-
raise DistutilsPlatformError(
|
125 |
-
"don't know how to create built distributions "
|
126 |
-
"on platform %s" % os.name
|
127 |
-
)
|
128 |
-
|
129 |
-
if self.dist_dir is None:
|
130 |
-
self.dist_dir = "dist"
|
131 |
-
|
132 |
-
def run(self):
|
133 |
-
# Figure out which sub-commands we need to run.
|
134 |
-
commands = []
|
135 |
-
for format in self.formats:
|
136 |
-
try:
|
137 |
-
commands.append(self.format_commands[format][0])
|
138 |
-
except KeyError:
|
139 |
-
raise DistutilsOptionError("invalid format '%s'" % format)
|
140 |
-
|
141 |
-
# Reinitialize and run each command.
|
142 |
-
for i in range(len(self.formats)):
|
143 |
-
cmd_name = commands[i]
|
144 |
-
sub_cmd = self.reinitialize_command(cmd_name)
|
145 |
-
if cmd_name not in self.no_format_option:
|
146 |
-
sub_cmd.format = self.formats[i]
|
147 |
-
|
148 |
-
# passing the owner and group names for tar archiving
|
149 |
-
if cmd_name == 'bdist_dumb':
|
150 |
-
sub_cmd.owner = self.owner
|
151 |
-
sub_cmd.group = self.group
|
152 |
-
|
153 |
-
# If we're going to need to run this command again, tell it to
|
154 |
-
# keep its temporary files around so subsequent runs go faster.
|
155 |
-
if cmd_name in commands[i + 1 :]:
|
156 |
-
sub_cmd.keep_temp = 1
|
157 |
-
self.run_command(cmd_name)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/__init__.py
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
from distutils.command.bdist import bdist
|
2 |
-
import sys
|
3 |
-
|
4 |
-
if 'egg' not in bdist.format_commands:
|
5 |
-
try:
|
6 |
-
bdist.format_commands['egg'] = ('bdist_egg', "Python .egg file")
|
7 |
-
except TypeError:
|
8 |
-
# For backward compatibility with older distutils (stdlib)
|
9 |
-
bdist.format_command['egg'] = ('bdist_egg', "Python .egg file")
|
10 |
-
bdist.format_commands.append('egg')
|
11 |
-
|
12 |
-
del bdist, sys
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BLACKHOST/Date/date.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
from datetime import datetime
|
2 |
-
from os import system
|
3 |
-
from time import sleep
|
4 |
-
|
5 |
-
while Ture:
|
6 |
-
time = datetime.now()
|
7 |
-
print(time.strftime(' TiME:'+"[%H: %M: %S:] "))
|
8 |
-
sleep(1)
|
9 |
-
system("clear")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/julius/__init__.py
DELETED
@@ -1,41 +0,0 @@
|
|
1 |
-
# File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details.
|
2 |
-
# Author: adefossez, 2020
|
3 |
-
|
4 |
-
# flake8: noqa
|
5 |
-
"""
|
6 |
-
.. image:: ../logo.png
|
7 |
-
|
8 |
-
Julius contains different Digital Signal Processing algorithms implemented
|
9 |
-
with PyTorch, so that they are differentiable and available on CUDA.
|
10 |
-
Note that all the modules implemented here can be used with TorchScript.
|
11 |
-
|
12 |
-
For now, I have implemented:
|
13 |
-
|
14 |
-
- `julius.resample`: fast sinc resampling.
|
15 |
-
- `julius.fftconv`: FFT based convolutions.
|
16 |
-
- `julius.lowpass`: FIR low pass filter banks.
|
17 |
-
- `julius.filters`: FIR high pass and band pass filters.
|
18 |
-
- `julius.bands`: Decomposition of a waveform signal over mel-scale frequency bands.
|
19 |
-
|
20 |
-
Along that, you might found useful utilities in:
|
21 |
-
|
22 |
-
- `julius.core`: DSP related functions.
|
23 |
-
- `julius.utils`: Generic utilities.
|
24 |
-
|
25 |
-
|
26 |
-
Please checkout [the Github repository](https://github.com/adefossez/julius) for other informations.
|
27 |
-
For a verification of the speed and correctness of Julius, check the benchmark module `bench`.
|
28 |
-
|
29 |
-
|
30 |
-
This package is named in this honor of
|
31 |
-
[Julius O. Smith](https://ccrma.stanford.edu/~jos/),
|
32 |
-
whose books and website were a gold mine of information for me to learn about DSP. Go checkout his website if you want
|
33 |
-
to learn more about DSP.
|
34 |
-
"""
|
35 |
-
|
36 |
-
from .bands import SplitBands, split_bands
|
37 |
-
from .fftconv import fft_conv1d, FFTConv1d
|
38 |
-
from .filters import bandpass_filter, BandPassFilter
|
39 |
-
from .filters import highpass_filter, highpass_filters, HighPassFilter, HighPassFilters
|
40 |
-
from .lowpass import lowpass_filter, lowpass_filters, LowPassFilters, LowPassFilter
|
41 |
-
from .resample import resample_frac, ResampleFrac
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Cinco Noches En Freddy 39s 6 Descarga.md
DELETED
@@ -1,103 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cinco noches en Freddy’s 6 Descargar: Cómo jugar la última entrega de la serie de juegos de terror</h1>
|
3 |
-
<p>Si eres un fan de los juegos de terror, probablemente hayas oído hablar de Five Nights at Freddy’s, una popular serie que cuenta con personajes animatrónicos que intentan matarte en una pizzería. La serie ha generado varias secuelas, spin-offs, novelas e incluso una película en desarrollo. Pero ¿qué pasa con la última entrega, Five Nights at Freddy’s 6? ¿Cómo se puede descargar y jugar? En este artículo, te contaremos todo lo que necesitas saber sobre Five Nights at Freddy’s 6, también conocido como Five Nights at Freddy’s: Security Breach.</p>
|
4 |
-
<h2>cinco noches en freddy 39;s 6 descarga</h2><br /><p><b><b>Download Zip</b> · <a href="https://bltlly.com/2v6LPJ">https://bltlly.com/2v6LPJ</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es Cinco Noches en Freddy’s 6?</h2>
|
6 |
-
<p>Five Nights at Freddy’s 6 es el sexto juego principal de la serie Five Nights at Freddy’s, creado por Scott Cawthon y desarrollado por Steel Wool Studios. Fue lanzado el 16 de diciembre de 2021 para Windows, PlayStation 4 y PlayStation 5. También está planeado para Xbox One, Xbox Series X/S, Nintendo Switch, iOS y Android en 2022. </p>
|
7 |
-
<h3>La trama y la configuración del juego</h3>
|
8 |
-
<p>El juego tiene lugar en Mega Pizzaplex de Freddy Fazbear, un centro de diversión familiar de tres pisos que cuenta con personajes animatrónicos como Freddy Fazbear, Chica, Monty Gator y Roxanne Wolf. Usted juega como Gregory, un joven que está atrapado dentro de la Pizzaplex durante la noche. Con la ayuda del propio Freddy, Gregory debe descubrir los secretos del Pizzaplex, aprender la verdad sobre su pasado y sobrevivir hasta el amanecer. Sin embargo, no está solo. El Pizzaplex es también el hogar de Vanessa, un guardia de seguridad que tiene una agenda oscura, y otros animatrónicos hostiles que no se detendrán ante nada para atraparlo. </p>
|
9 |
-
<h3>La jugabilidad y características del juego</h3>
|
10 |
-
|
11 |
-
<p>El juego también ofrece una variedad de atracciones y actividades para disfrutar en el Pizzaplex. Puedes jugar juegos de árcade como Monty Golf, Roxy Raceway, Bonnie Bowl o Fazbear Blast. También puede explorar diferentes áreas, como las alcantarillas o la arena láser tag. También puede recoger monedas y fichas para comprar artículos o desbloquear secretos. </p>
|
12 |
-
<p></p>
|
13 |
-
<h2>Cómo descargar Five Nights at Freddy’s 6?</h2>
|
14 |
-
<h3>Las plataformas oficiales y los precios del juego</h3>
|
15 |
-
<p>El juego está disponible para su compra en Steam para usuarios de Windows por $39.99. También puedes comprarlo en PlayStation Store para usuarios de PlayStation 4 o PlayStation 5 por $39.99. El juego admite la compra cruzada entre las versiones de PS4 y PS5. </p>
|
16 |
-
<p>El juego aún no está disponible para otras plataformas como Xbox One, Xbox Series X/S, Nintendo Switch, iOS o Android. Sin embargo, se espera que sean liberados en algún momento de 2022. </p>
|
17 |
-
<h3>Los requisitos del sistema y la compatibilidad del juego</h3>
|
18 |
-
<p>Antes de descargar el juego, usted debe asegurarse de que su dispositivo cumple con los requisitos mínimos del sistema para el juego. Estos son los requisitos del sistema para los usuarios de Windows y PlayStation:</p>
|
19 |
-
<tabla>
|
20 |
-
<tr>
|
21 |
-
<th>Plataforma</th>
|
22 |
-
<th>Requisitos mínimos</th>
|
23 |
-
<th>Requisitos recomendados</th>
|
24 |
-
</tr>
|
25 |
-
<tr>
|
26 |
-
<td>Windows</td>
|
27 |
-
<td>
|
28 |
-
<ul>
|
29 |
-
<li>OS: Windows 10 64-bit</li>
|
30 |
-
<li>Procesador: Intel Core i5-2500K o AMD FX-8350</li>
|
31 |
-
<li>Memoria: 8 GB RAM</li>
|
32 |
-
<li>Gráficos: NVIDIA GeForce GTX 960 o AMD Radeon R9 280X</li>
|
33 |
-
<li>DirectX: Versión 11</li>
|
34 |
-
<li>Almacenamiento: 20 GB de espacio disponible</li>
|
35 |
-
</ul>
|
36 |
-
</td>
|
37 |
-
<td>
|
38 |
-
<ul>
|
39 |
-
<li>OS: Windows 10 64-bit</li>
|
40 |
-
<li>Procesador: Intel Core i7-6700K o AMD Ryzen 5 2600X</li>
|
41 |
-
<li>Memoria: 16 GB RAM</li>
|
42 |
-
<li>Gráficos: NVIDIA GeForce GTX 1070 o AMD Radeon RX Vega 56</li>
|
43 |
-
<li>DirectX: Versión 12</li>
|
44 |
-
<li>Almacenamiento: 20 GB de espacio disponible</li>
|
45 |
-
</ul>
|
46 |
-
</td>
|
47 |
-
</tr>
|
48 |
-
<tr>
|
49 |
-
<td>PlayStation 4/5</td>
|
50 |
-
<td colspan="2">
|
51 |
-
<ul>
|
52 |
-
<li>OS: software de sistema PlayStation 4 o PlayStation 5</li>
|
53 |
-
<li>Procesador: N/A</li>
|
54 |
-
|
55 |
-
<li>Gráficos: N/A</li>
|
56 |
-
<li>DirectX: N/A</li>
|
57 |
-
<li>Almacenamiento: 20 GB de espacio disponible</li>
|
58 |
-
</ul>
|
59 |
-
<p>Nota: El juego admite funciones mejoradas de PS4 Pro y PS5, como una resolución más alta, tiempos de carga más rápidos y trazado de rayos. </p>
|
60 |
-
</td>
|
61 |
-
</tr>
|
62 |
-
</tabla>
|
63 |
-
<p>Si tu dispositivo cumple con los requisitos del sistema, puedes descargar el juego desde las plataformas oficiales siguiendo estos pasos:</p>
|
64 |
-
<ol>
|
65 |
-
<li>Crear una cuenta o iniciar sesión en Steam o PlayStation Store.</li>
|
66 |
-
<li>Búsqueda de cinco noches en Freddy’s 6 o cinco noches en Freddy’s: Violación de seguridad en la tienda. </li>
|
67 |
-
<li>Seleccione el juego y haga clic en Comprar o Añadir al carrito.</li>
|
68 |
-
<li>Complete el proceso de pago y confirme su compra. </li>
|
69 |
-
<li>El juego comenzará a descargarse automáticamente a su dispositivo. </li>
|
70 |
-
<li>Una vez completada la descarga, puedes lanzar el juego y disfrutarlo. </li>
|
71 |
-
<h2>Cómo jugar cinco noches en Freddy’s 6?</h2>
|
72 |
-
<p>Ahora que ha descargado el juego, es posible que se pregunte cómo jugarlo. Estos son algunos consejos y trucos para sobrevivir la noche y descubrir los secretos de la Pizzaplex.</p>
|
73 |
-
<h3>Los consejos y trucos para sobrevivir la noche</h3>
|
74 |
-
<p>El objetivo principal del juego es sobrevivir hasta las 6 a.m. sin ser atrapado por Vanessa o los otros animatrónicos. Aquí hay algunos consejos y trucos para ayudarte a hacerlo:</p>
|
75 |
-
<ul>
|
76 |
-
<li>Utilice las cámaras de seguridad para monitorear su entorno y planificar su ruta. Puede cambiar entre diferentes cámaras usando el ratón o el controlador. También puede acercar o alejar usando la rueda de desplazamiento o los disparadores. Las cámaras te mostrarán dónde están Vanessa y los otros animatrónicos, así como dónde puedes encontrar objetos, herramientas, escondites o salidas. </li>
|
77 |
-
|
78 |
-
<li>Escóndete en diferentes lugares o huye del peligro. Puede esconderse en varios lugares, como casilleros, gabinetes, rejillas de ventilación o botes de basura presionando F o A en su teclado o controlador. También puede huir del peligro pulsando Shift o L3 en su teclado o controlador. Sin embargo, debe tener cuidado con su resistencia, potencia de la batería, nivel de ruido y límite de tiempo. Su resistencia disminuirá si corre demasiado, su potencia de la batería disminuirá si usa demasiados artículos o herramientas, su nivel de ruido aumentará si hace demasiado ruido, y su límite de tiempo disminuirá si toma demasiado tiempo para completar sus objetivos. Si cualquiera de estos factores llega a cero, usted será más vulnerable a ser atrapado. </li>
|
79 |
-
<h3>Los secretos y huevos de Pascua para descubrir en el juego</h3>
|
80 |
-
<p>El juego también ofrece muchos secretos y huevos de Pascua para que los descubras en el juego. Estos son algunos de ellos:</p>
|
81 |
-
<ul>
|
82 |
-
<li>Recoge monedas y fichas para comprar objetos o desbloquear secretos. Puedes encontrar monedas y fichas en todo el Pizzaplex. Puede utilizarlos para comprar artículos en máquinas expendedoras o en contadores de premios. También puedes usarlos para desbloquear secretos como juegos de árcade ocultos, habitaciones secretas o finales secretos. </li>
|
83 |
-
<li>Jugar juegos de árcade para ganar recompensas o acceder a mini-juegos. Puedes jugar juegos de árcade como Monty Golf, Roxy Raceway, Bonnie Bowl o Fazbear Blast in the Pizzaplex. Puedes ganar recompensas como monedas, fichas u objetos al jugarlos. También puedes acceder a minijuegos como Princess Quest, Freddy in Space 2 o Corn Maze jugando ciertos juegos de árcade. </li>
|
84 |
-
<li>Explora diferentes áreas para encontrar pistas o huevos de Pascua. Puedes explorar diferentes áreas como las alcantarillas o la arena láser tag en el Pizzaplex. Puedes encontrar pistas o huevos de Pascua como carteles, notas, cintas o referencias a juegos anteriores u otros medios. </li>
|
85 |
-
</ul>
|
86 |
-
<h2>Conclusión</h2>
|
87 |
-
|
88 |
-
<h2>Preguntas frecuentes</h2>
|
89 |
-
<p>Aquí hay algunas preguntas frecuentes sobre Five Nights at Freddy’s 6:</p>
|
90 |
-
<ol>
|
91 |
-
<li>Q: ¿Cinco noches en Freddy’s 6 da miedo? </li>
|
92 |
-
<li>A: Sí, Five Nights at Freddy’s 6 es un juego de terror que presenta sustos de salto, gore, violencia y temas oscuros. No es adecuado para niños o personas que se asustan fácilmente. </li>
|
93 |
-
<li>Q: ¿Son cinco noches en el 6 canon de Freddy? </li>
|
94 |
-
<li>A: Sí, Five Nights at Freddy’s 6 es canon y parte de la línea de tiempo principal de la serie Five Nights at Freddy’s. Tiene lugar después de los eventos de Five Nights at Freddy’s: Help Wanted y Five Nights at Freddy’s: Special Delivery.</li>
|
95 |
-
<li>Q: ¿Cinco noches en Freddy’s 6 es gratis? </li>
|
96 |
-
<li>A: No, Five Nights at Freddy’s 6 no es gratis. Cuesta $39.99 en Steam y PlayStation Store. Sin embargo, puede estar disponible de forma gratuita o con descuento en ciertas ocasiones o plataformas. </li>
|
97 |
-
<li>Q: ¿Son cinco noches en el multijugador de Freddy’s 6? </li>
|
98 |
-
<li>A: No, Five Nights at Freddy’s 6 no es multijugador. Es un juego para un solo jugador que no admite modos cooperativos o versus en línea o locales. </li>
|
99 |
-
<li>Q: ¿Cinco noches en Freddy’s 6 es el juego final de la serie? </li>
|
100 |
-
<li>A: No, Five Nights at Freddy’s 6 no es el juego final de la serie. Scott Cawthon, el creador de la serie, ha confirmado que hay más juegos en desarrollo, como Five Nights at Freddy’s: Into Madness y Five Nights at Freddy’s: The Movie.</li>
|
101 |
-
</ol></p> 64aa2da5cf<br />
|
102 |
-
<br />
|
103 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Bamboo_ViT-B16_demo/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Bamboo ViT-B16 Demo
|
3 |
-
emoji: 🎋
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.0.17
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: cc-by-4.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/GFPGAN-example/setup.py
DELETED
@@ -1,107 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
|
3 |
-
from setuptools import find_packages, setup
|
4 |
-
|
5 |
-
import os
|
6 |
-
import subprocess
|
7 |
-
import time
|
8 |
-
|
9 |
-
version_file = 'gfpgan/version.py'
|
10 |
-
|
11 |
-
|
12 |
-
def readme():
|
13 |
-
with open('README.md', encoding='utf-8') as f:
|
14 |
-
content = f.read()
|
15 |
-
return content
|
16 |
-
|
17 |
-
|
18 |
-
def get_git_hash():
|
19 |
-
|
20 |
-
def _minimal_ext_cmd(cmd):
|
21 |
-
# construct minimal environment
|
22 |
-
env = {}
|
23 |
-
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
|
24 |
-
v = os.environ.get(k)
|
25 |
-
if v is not None:
|
26 |
-
env[k] = v
|
27 |
-
# LANGUAGE is used on win32
|
28 |
-
env['LANGUAGE'] = 'C'
|
29 |
-
env['LANG'] = 'C'
|
30 |
-
env['LC_ALL'] = 'C'
|
31 |
-
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
|
32 |
-
return out
|
33 |
-
|
34 |
-
try:
|
35 |
-
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
|
36 |
-
sha = out.strip().decode('ascii')
|
37 |
-
except OSError:
|
38 |
-
sha = 'unknown'
|
39 |
-
|
40 |
-
return sha
|
41 |
-
|
42 |
-
|
43 |
-
def get_hash():
|
44 |
-
if os.path.exists('.git'):
|
45 |
-
sha = get_git_hash()[:7]
|
46 |
-
else:
|
47 |
-
sha = 'unknown'
|
48 |
-
|
49 |
-
return sha
|
50 |
-
|
51 |
-
|
52 |
-
def write_version_py():
|
53 |
-
content = """# GENERATED VERSION FILE
|
54 |
-
# TIME: {}
|
55 |
-
__version__ = '{}'
|
56 |
-
__gitsha__ = '{}'
|
57 |
-
version_info = ({})
|
58 |
-
"""
|
59 |
-
sha = get_hash()
|
60 |
-
with open('VERSION', 'r') as f:
|
61 |
-
SHORT_VERSION = f.read().strip()
|
62 |
-
VERSION_INFO = ', '.join([x if x.isdigit() else f'"{x}"' for x in SHORT_VERSION.split('.')])
|
63 |
-
|
64 |
-
version_file_str = content.format(time.asctime(), SHORT_VERSION, sha, VERSION_INFO)
|
65 |
-
with open(version_file, 'w') as f:
|
66 |
-
f.write(version_file_str)
|
67 |
-
|
68 |
-
|
69 |
-
def get_version():
|
70 |
-
with open(version_file, 'r') as f:
|
71 |
-
exec(compile(f.read(), version_file, 'exec'))
|
72 |
-
return locals()['__version__']
|
73 |
-
|
74 |
-
|
75 |
-
def get_requirements(filename='requirements.txt'):
|
76 |
-
here = os.path.dirname(os.path.realpath(__file__))
|
77 |
-
with open(os.path.join(here, filename), 'r') as f:
|
78 |
-
requires = [line.replace('\n', '') for line in f.readlines()]
|
79 |
-
return requires
|
80 |
-
|
81 |
-
|
82 |
-
if __name__ == '__main__':
|
83 |
-
write_version_py()
|
84 |
-
setup(
|
85 |
-
name='gfpgan',
|
86 |
-
version=get_version(),
|
87 |
-
description='GFPGAN aims at developing Practical Algorithms for Real-world Face Restoration',
|
88 |
-
long_description=readme(),
|
89 |
-
long_description_content_type='text/markdown',
|
90 |
-
author='Xintao Wang',
|
91 |
-
author_email='[email protected]',
|
92 |
-
keywords='computer vision, pytorch, image restoration, super-resolution, face restoration, gan, gfpgan',
|
93 |
-
url='https://github.com/TencentARC/GFPGAN',
|
94 |
-
include_package_data=True,
|
95 |
-
packages=find_packages(exclude=('options', 'datasets', 'experiments', 'results', 'tb_logger', 'wandb')),
|
96 |
-
classifiers=[
|
97 |
-
'Development Status :: 4 - Beta',
|
98 |
-
'License :: OSI Approved :: Apache Software License',
|
99 |
-
'Operating System :: OS Independent',
|
100 |
-
'Programming Language :: Python :: 3',
|
101 |
-
'Programming Language :: Python :: 3.7',
|
102 |
-
'Programming Language :: Python :: 3.8',
|
103 |
-
],
|
104 |
-
license='Apache License Version 2.0',
|
105 |
-
setup_requires=['cython', 'numpy'],
|
106 |
-
install_requires=get_requirements(),
|
107 |
-
zip_safe=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/detail/allocator_aware_execution_policy.h
DELETED
@@ -1,101 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2018 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
#include <thrust/detail/execute_with_allocator_fwd.h>
|
21 |
-
#include <thrust/detail/alignment.h>
|
22 |
-
|
23 |
-
#if THRUST_CPP_DIALECT >= 2011
|
24 |
-
#include <type_traits>
|
25 |
-
#endif
|
26 |
-
|
27 |
-
namespace thrust
|
28 |
-
{
|
29 |
-
|
30 |
-
namespace mr
|
31 |
-
{
|
32 |
-
|
33 |
-
template<typename T, class MR>
|
34 |
-
class allocator;
|
35 |
-
|
36 |
-
}
|
37 |
-
|
38 |
-
namespace detail
|
39 |
-
{
|
40 |
-
|
41 |
-
template<template <typename> class ExecutionPolicyCRTPBase>
|
42 |
-
struct allocator_aware_execution_policy
|
43 |
-
{
|
44 |
-
template<typename MemoryResource>
|
45 |
-
struct execute_with_memory_resource_type
|
46 |
-
{
|
47 |
-
typedef thrust::detail::execute_with_allocator<
|
48 |
-
thrust::mr::allocator<
|
49 |
-
thrust::detail::max_align_t,
|
50 |
-
MemoryResource
|
51 |
-
>,
|
52 |
-
ExecutionPolicyCRTPBase
|
53 |
-
> type;
|
54 |
-
};
|
55 |
-
|
56 |
-
template<typename Allocator>
|
57 |
-
struct execute_with_allocator_type
|
58 |
-
{
|
59 |
-
typedef thrust::detail::execute_with_allocator<
|
60 |
-
Allocator,
|
61 |
-
ExecutionPolicyCRTPBase
|
62 |
-
> type;
|
63 |
-
};
|
64 |
-
|
65 |
-
template<typename MemoryResource>
|
66 |
-
typename execute_with_memory_resource_type<MemoryResource>::type
|
67 |
-
operator()(MemoryResource * mem_res) const
|
68 |
-
{
|
69 |
-
return typename execute_with_memory_resource_type<MemoryResource>::type(mem_res);
|
70 |
-
}
|
71 |
-
|
72 |
-
template<typename Allocator>
|
73 |
-
typename execute_with_allocator_type<Allocator&>::type
|
74 |
-
operator()(Allocator &alloc) const
|
75 |
-
{
|
76 |
-
return typename execute_with_allocator_type<Allocator&>::type(alloc);
|
77 |
-
}
|
78 |
-
|
79 |
-
template<typename Allocator>
|
80 |
-
typename execute_with_allocator_type<Allocator>::type
|
81 |
-
operator()(const Allocator &alloc) const
|
82 |
-
{
|
83 |
-
return typename execute_with_allocator_type<Allocator>::type(alloc);
|
84 |
-
}
|
85 |
-
|
86 |
-
#if THRUST_CPP_DIALECT >= 2011
|
87 |
-
// just the rvalue overload
|
88 |
-
// perfect forwarding doesn't help, because a const reference has to be turned
|
89 |
-
// into a value by copying for the purpose of storing it in execute_with_allocator
|
90 |
-
template<typename Allocator,
|
91 |
-
typename std::enable_if<!std::is_lvalue_reference<Allocator>::value>::type * = nullptr>
|
92 |
-
typename execute_with_allocator_type<Allocator>::type
|
93 |
-
operator()(Allocator &&alloc) const
|
94 |
-
{
|
95 |
-
return typename execute_with_allocator_type<Allocator>::type(std::move(alloc));
|
96 |
-
}
|
97 |
-
#endif
|
98 |
-
};
|
99 |
-
|
100 |
-
}
|
101 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/detail/trivial_sequence.h
DELETED
@@ -1,95 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
/*! \file trivial_sequence.h
|
18 |
-
* \brief Container-like class for wrapping sequences. The wrapped
|
19 |
-
* sequence always has trivial iterators, even when the input
|
20 |
-
* sequence does not.
|
21 |
-
*/
|
22 |
-
|
23 |
-
|
24 |
-
#pragma once
|
25 |
-
|
26 |
-
#include <thrust/iterator/iterator_traits.h>
|
27 |
-
#include <thrust/detail/type_traits.h>
|
28 |
-
#include <thrust/detail/execution_policy.h>
|
29 |
-
#include <thrust/detail/temporary_array.h>
|
30 |
-
#include <thrust/type_traits/is_contiguous_iterator.h>
|
31 |
-
|
32 |
-
namespace thrust
|
33 |
-
{
|
34 |
-
|
35 |
-
namespace detail
|
36 |
-
{
|
37 |
-
|
38 |
-
// never instantiated
|
39 |
-
template<typename Iterator, typename DerivedPolicy, typename is_trivial> struct _trivial_sequence { };
|
40 |
-
|
41 |
-
// trivial case
|
42 |
-
template<typename Iterator, typename DerivedPolicy>
|
43 |
-
struct _trivial_sequence<Iterator, DerivedPolicy, thrust::detail::true_type>
|
44 |
-
{
|
45 |
-
typedef Iterator iterator_type;
|
46 |
-
Iterator first, last;
|
47 |
-
|
48 |
-
__host__ __device__
|
49 |
-
_trivial_sequence(thrust::execution_policy<DerivedPolicy> &, Iterator _first, Iterator _last) : first(_first), last(_last)
|
50 |
-
{
|
51 |
-
}
|
52 |
-
|
53 |
-
__host__ __device__
|
54 |
-
iterator_type begin() { return first; }
|
55 |
-
|
56 |
-
__host__ __device__
|
57 |
-
iterator_type end() { return last; }
|
58 |
-
};
|
59 |
-
|
60 |
-
// non-trivial case
|
61 |
-
template<typename Iterator, typename DerivedPolicy>
|
62 |
-
struct _trivial_sequence<Iterator, DerivedPolicy, thrust::detail::false_type>
|
63 |
-
{
|
64 |
-
typedef typename thrust::iterator_value<Iterator>::type iterator_value;
|
65 |
-
typedef typename thrust::detail::temporary_array<iterator_value, DerivedPolicy>::iterator iterator_type;
|
66 |
-
|
67 |
-
thrust::detail::temporary_array<iterator_value, DerivedPolicy> buffer;
|
68 |
-
|
69 |
-
__host__ __device__
|
70 |
-
_trivial_sequence(thrust::execution_policy<DerivedPolicy> &exec, Iterator first, Iterator last)
|
71 |
-
: buffer(exec, first, last)
|
72 |
-
{
|
73 |
-
}
|
74 |
-
|
75 |
-
__host__ __device__
|
76 |
-
iterator_type begin() { return buffer.begin(); }
|
77 |
-
|
78 |
-
__host__ __device__
|
79 |
-
iterator_type end() { return buffer.end(); }
|
80 |
-
};
|
81 |
-
|
82 |
-
template <typename Iterator, typename DerivedPolicy>
|
83 |
-
struct trivial_sequence
|
84 |
-
: detail::_trivial_sequence<Iterator, DerivedPolicy, typename thrust::is_contiguous_iterator<Iterator>::type>
|
85 |
-
{
|
86 |
-
typedef _trivial_sequence<Iterator, DerivedPolicy, typename thrust::is_contiguous_iterator<Iterator>::type> super_t;
|
87 |
-
|
88 |
-
__host__ __device__
|
89 |
-
trivial_sequence(thrust::execution_policy<DerivedPolicy> &exec, Iterator first, Iterator last) : super_t(exec, first, last) { }
|
90 |
-
};
|
91 |
-
|
92 |
-
} // end namespace detail
|
93 |
-
|
94 |
-
} // end namespace thrust
|
95 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/Yunzai/Yunzai/lib/plugins/plugin.js
DELETED
@@ -1,119 +0,0 @@
|
|
1 |
-
let stateArr = {}
|
2 |
-
|
3 |
-
export default class plugin {
|
4 |
-
/**
|
5 |
-
* @param name 插件名称
|
6 |
-
* @param dsc 插件描述
|
7 |
-
* @param handler handler配置
|
8 |
-
* @param handler.key handler支持的事件key
|
9 |
-
* @param handler.fn handler的处理func
|
10 |
-
* @param namespace namespace,设置handler时建议设置
|
11 |
-
* @param event 执行事件,默认message
|
12 |
-
* @param priority 优先级,数字越小优先级越高
|
13 |
-
* @param rule
|
14 |
-
* @param rule.reg 命令正则
|
15 |
-
* @param rule.fnc 命令执行方法
|
16 |
-
* @param rule.event 执行事件,默认message
|
17 |
-
* @param rule.log false时不显示执行日志
|
18 |
-
* @param rule.permission 权限 master,owner,admin,all
|
19 |
-
* @param task
|
20 |
-
* @param task.name 定时任务名称
|
21 |
-
* @param task.cron 定时任务cron表达式
|
22 |
-
* @param task.fnc 定时任务方法名
|
23 |
-
* @param task.log false时不显示执行日志
|
24 |
-
*/
|
25 |
-
constructor ({
|
26 |
-
name = 'your-plugin',
|
27 |
-
dsc = '无',
|
28 |
-
handler,
|
29 |
-
namespace,
|
30 |
-
event = 'message',
|
31 |
-
priority = 5000,
|
32 |
-
task = { fnc: '', cron: '' },
|
33 |
-
rule = []
|
34 |
-
}) {
|
35 |
-
/** 插件名称 */
|
36 |
-
this.name = name
|
37 |
-
/** 插件描述 */
|
38 |
-
this.dsc = dsc
|
39 |
-
/** 监听事件,默认message https://oicqjs.github.io/oicq/#events */
|
40 |
-
this.event = event
|
41 |
-
/** 优先级 */
|
42 |
-
this.priority = priority
|
43 |
-
/** 定时任务,可以是数组 */
|
44 |
-
this.task = {
|
45 |
-
/** 任务名 */
|
46 |
-
name: '',
|
47 |
-
/** 任务方法名 */
|
48 |
-
fnc: task.fnc || '',
|
49 |
-
/** 任务cron表达式 */
|
50 |
-
cron: task.cron || ''
|
51 |
-
}
|
52 |
-
/** 命令规则 */
|
53 |
-
this.rule = rule
|
54 |
-
|
55 |
-
if (handler) {
|
56 |
-
this.handler = handler
|
57 |
-
this.namespace = namespace || ''
|
58 |
-
}
|
59 |
-
}
|
60 |
-
|
61 |
-
/**
|
62 |
-
* @param msg 发送的消息
|
63 |
-
* @param quote 是否引用回复
|
64 |
-
* @param data.recallMsg 群聊是否撤回消息,0-120秒,0不撤回
|
65 |
-
* @param data.at 是否at用户
|
66 |
-
*/
|
67 |
-
reply (msg = '', quote = false, data = {}) {
|
68 |
-
if (!this.e.reply || !msg) return false
|
69 |
-
return this.e.reply(msg, quote, data)
|
70 |
-
}
|
71 |
-
|
72 |
-
conKey (isGroup = false) {
|
73 |
-
if (isGroup) {
|
74 |
-
return `${this.name}.${this.e.group_id}`
|
75 |
-
} else {
|
76 |
-
return `${this.name}.${this.userId || this.e.user_id}`
|
77 |
-
}
|
78 |
-
}
|
79 |
-
|
80 |
-
/**
|
81 |
-
* @param type 执行方法
|
82 |
-
* @param isGroup 是否群聊
|
83 |
-
* @param time 操作时间,默认120秒
|
84 |
-
*/
|
85 |
-
setContext (type, isGroup = false, time = 120) {
|
86 |
-
let key = this.conKey(isGroup)
|
87 |
-
if (!stateArr[key]) stateArr[key] = {}
|
88 |
-
stateArr[key][type] = this.e
|
89 |
-
if (time) {
|
90 |
-
/** 操作时间 */
|
91 |
-
setTimeout(() => {
|
92 |
-
if (stateArr[key][type]) {
|
93 |
-
delete stateArr[key][type]
|
94 |
-
this.e.reply('操作超时已取消', true)
|
95 |
-
}
|
96 |
-
}, time * 1000)
|
97 |
-
}
|
98 |
-
}
|
99 |
-
|
100 |
-
getContext () {
|
101 |
-
let key = this.conKey()
|
102 |
-
return stateArr[key]
|
103 |
-
}
|
104 |
-
|
105 |
-
getContextGroup () {
|
106 |
-
let key = this.conKey(true)
|
107 |
-
return stateArr[key]
|
108 |
-
}
|
109 |
-
|
110 |
-
/**
|
111 |
-
* @param type 执行方法
|
112 |
-
* @param isGroup 是否群聊
|
113 |
-
*/
|
114 |
-
finish (type, isGroup = false) {
|
115 |
-
if (stateArr[this.conKey(isGroup)] && stateArr[this.conKey(isGroup)][type]) {
|
116 |
-
delete stateArr[this.conKey(isGroup)][type]
|
117 |
-
}
|
118 |
-
}
|
119 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CleanML/demo/README.md
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: CleanML Demo - Data centric NER MLOps
|
3 |
-
emoji: 📚🔍
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: gray
|
6 |
-
sdk: docker
|
7 |
-
pinned: true
|
8 |
-
license: mit
|
9 |
-
---
|
10 |
-
|
11 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ConvLab/README/README.md
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: README
|
3 |
-
emoji: 👀
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: gray
|
6 |
-
sdk: static
|
7 |
-
pinned: false
|
8 |
-
---
|
9 |
-
|
10 |
-
### Dataset
|
11 |
-
|
12 |
-
To use our unified datasets, you need to install [ConvLab-3](https://github.com/ConvLab/ConvLab-3) platform first. Then you can load the dataset via:
|
13 |
-
```
|
14 |
-
from convlab.util import load_dataset, load_ontology, load_database
|
15 |
-
|
16 |
-
dataset_name = 'multiwoz21' # use the dataset name in our repo
|
17 |
-
dataset = load_dataset(dataset_name)
|
18 |
-
ontology = load_ontology(dataset_name)
|
19 |
-
database = load_database(dataset_name)
|
20 |
-
```
|
21 |
-
Each dataset has a `dummy_data.json` showing a few samples. For the unified data format and more usage please refer to [here](https://github.com/ConvLab/ConvLab-3/tree/master/data/unified_datasets).
|
22 |
-
|
23 |
-
Contributions such as adding new datasets and models are highly welcome!
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cropinky/hana_hanak_houses/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Anti House Generator
|
3 |
-
emoji: 🎨
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: red
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.33.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: true
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DAMO-NLP-SG/CLEX-Chat/style.css
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
h1 {
|
2 |
-
text-align: center;
|
3 |
-
}
|
4 |
-
|
5 |
-
#duplicate-button {
|
6 |
-
margin: auto;
|
7 |
-
color: white;
|
8 |
-
background: #1565c0;
|
9 |
-
border-radius: 100vh;
|
10 |
-
}
|
11 |
-
|
12 |
-
.contain {
|
13 |
-
max-width: 900px;
|
14 |
-
margin: auto;
|
15 |
-
padding-top: 1.5rem;
|
16 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DHEIVER/ThyroidTumorClassificationModel/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: SerdarHelli ThyroidTumorClassificationModel
|
3 |
-
emoji: 🐨
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.44.4
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/web_protocol.py
DELETED
@@ -1,679 +0,0 @@
|
|
1 |
-
import asyncio
|
2 |
-
import asyncio.streams
|
3 |
-
import traceback
|
4 |
-
import warnings
|
5 |
-
from collections import deque
|
6 |
-
from contextlib import suppress
|
7 |
-
from html import escape as html_escape
|
8 |
-
from http import HTTPStatus
|
9 |
-
from logging import Logger
|
10 |
-
from typing import (
|
11 |
-
TYPE_CHECKING,
|
12 |
-
Any,
|
13 |
-
Awaitable,
|
14 |
-
Callable,
|
15 |
-
Deque,
|
16 |
-
Optional,
|
17 |
-
Sequence,
|
18 |
-
Tuple,
|
19 |
-
Type,
|
20 |
-
Union,
|
21 |
-
cast,
|
22 |
-
)
|
23 |
-
|
24 |
-
import attr
|
25 |
-
import yarl
|
26 |
-
|
27 |
-
from .abc import AbstractAccessLogger, AbstractStreamWriter
|
28 |
-
from .base_protocol import BaseProtocol
|
29 |
-
from .helpers import ceil_timeout
|
30 |
-
from .http import (
|
31 |
-
HttpProcessingError,
|
32 |
-
HttpRequestParser,
|
33 |
-
HttpVersion10,
|
34 |
-
RawRequestMessage,
|
35 |
-
StreamWriter,
|
36 |
-
)
|
37 |
-
from .log import access_logger, server_logger
|
38 |
-
from .streams import EMPTY_PAYLOAD, StreamReader
|
39 |
-
from .tcp_helpers import tcp_keepalive
|
40 |
-
from .web_exceptions import HTTPException
|
41 |
-
from .web_log import AccessLogger
|
42 |
-
from .web_request import BaseRequest
|
43 |
-
from .web_response import Response, StreamResponse
|
44 |
-
|
45 |
-
__all__ = ("RequestHandler", "RequestPayloadError", "PayloadAccessError")
|
46 |
-
|
47 |
-
if TYPE_CHECKING: # pragma: no cover
|
48 |
-
from .web_server import Server
|
49 |
-
|
50 |
-
|
51 |
-
_RequestFactory = Callable[
|
52 |
-
[
|
53 |
-
RawRequestMessage,
|
54 |
-
StreamReader,
|
55 |
-
"RequestHandler",
|
56 |
-
AbstractStreamWriter,
|
57 |
-
"asyncio.Task[None]",
|
58 |
-
],
|
59 |
-
BaseRequest,
|
60 |
-
]
|
61 |
-
|
62 |
-
_RequestHandler = Callable[[BaseRequest], Awaitable[StreamResponse]]
|
63 |
-
|
64 |
-
ERROR = RawRequestMessage(
|
65 |
-
"UNKNOWN",
|
66 |
-
"/",
|
67 |
-
HttpVersion10,
|
68 |
-
{}, # type: ignore[arg-type]
|
69 |
-
{}, # type: ignore[arg-type]
|
70 |
-
True,
|
71 |
-
None,
|
72 |
-
False,
|
73 |
-
False,
|
74 |
-
yarl.URL("/"),
|
75 |
-
)
|
76 |
-
|
77 |
-
|
78 |
-
class RequestPayloadError(Exception):
|
79 |
-
"""Payload parsing error."""
|
80 |
-
|
81 |
-
|
82 |
-
class PayloadAccessError(Exception):
|
83 |
-
"""Payload was accessed after response was sent."""
|
84 |
-
|
85 |
-
|
86 |
-
@attr.s(auto_attribs=True, frozen=True, slots=True)
|
87 |
-
class _ErrInfo:
|
88 |
-
status: int
|
89 |
-
exc: BaseException
|
90 |
-
message: str
|
91 |
-
|
92 |
-
|
93 |
-
_MsgType = Tuple[Union[RawRequestMessage, _ErrInfo], StreamReader]
|
94 |
-
|
95 |
-
|
96 |
-
class RequestHandler(BaseProtocol):
|
97 |
-
"""HTTP protocol implementation.
|
98 |
-
|
99 |
-
RequestHandler handles incoming HTTP request. It reads request line,
|
100 |
-
request headers and request payload and calls handle_request() method.
|
101 |
-
By default it always returns with 404 response.
|
102 |
-
|
103 |
-
RequestHandler handles errors in incoming request, like bad
|
104 |
-
status line, bad headers or incomplete payload. If any error occurs,
|
105 |
-
connection gets closed.
|
106 |
-
|
107 |
-
keepalive_timeout -- number of seconds before closing
|
108 |
-
keep-alive connection
|
109 |
-
|
110 |
-
tcp_keepalive -- TCP keep-alive is on, default is on
|
111 |
-
|
112 |
-
debug -- enable debug mode
|
113 |
-
|
114 |
-
logger -- custom logger object
|
115 |
-
|
116 |
-
access_log_class -- custom class for access_logger
|
117 |
-
|
118 |
-
access_log -- custom logging object
|
119 |
-
|
120 |
-
access_log_format -- access log format string
|
121 |
-
|
122 |
-
loop -- Optional event loop
|
123 |
-
|
124 |
-
max_line_size -- Optional maximum header line size
|
125 |
-
|
126 |
-
max_field_size -- Optional maximum header field size
|
127 |
-
|
128 |
-
max_headers -- Optional maximum header size
|
129 |
-
|
130 |
-
"""
|
131 |
-
|
132 |
-
KEEPALIVE_RESCHEDULE_DELAY = 1
|
133 |
-
|
134 |
-
__slots__ = (
|
135 |
-
"_request_count",
|
136 |
-
"_keepalive",
|
137 |
-
"_manager",
|
138 |
-
"_request_handler",
|
139 |
-
"_request_factory",
|
140 |
-
"_tcp_keepalive",
|
141 |
-
"_keepalive_time",
|
142 |
-
"_keepalive_handle",
|
143 |
-
"_keepalive_timeout",
|
144 |
-
"_lingering_time",
|
145 |
-
"_messages",
|
146 |
-
"_message_tail",
|
147 |
-
"_waiter",
|
148 |
-
"_task_handler",
|
149 |
-
"_upgrade",
|
150 |
-
"_payload_parser",
|
151 |
-
"_request_parser",
|
152 |
-
"_reading_paused",
|
153 |
-
"logger",
|
154 |
-
"debug",
|
155 |
-
"access_log",
|
156 |
-
"access_logger",
|
157 |
-
"_close",
|
158 |
-
"_force_close",
|
159 |
-
"_current_request",
|
160 |
-
)
|
161 |
-
|
162 |
-
def __init__(
|
163 |
-
self,
|
164 |
-
manager: "Server",
|
165 |
-
*,
|
166 |
-
loop: asyncio.AbstractEventLoop,
|
167 |
-
keepalive_timeout: float = 75.0, # NGINX default is 75 secs
|
168 |
-
tcp_keepalive: bool = True,
|
169 |
-
logger: Logger = server_logger,
|
170 |
-
access_log_class: Type[AbstractAccessLogger] = AccessLogger,
|
171 |
-
access_log: Logger = access_logger,
|
172 |
-
access_log_format: str = AccessLogger.LOG_FORMAT,
|
173 |
-
debug: bool = False,
|
174 |
-
max_line_size: int = 8190,
|
175 |
-
max_headers: int = 32768,
|
176 |
-
max_field_size: int = 8190,
|
177 |
-
lingering_time: float = 10.0,
|
178 |
-
read_bufsize: int = 2**16,
|
179 |
-
auto_decompress: bool = True,
|
180 |
-
):
|
181 |
-
super().__init__(loop)
|
182 |
-
|
183 |
-
self._request_count = 0
|
184 |
-
self._keepalive = False
|
185 |
-
self._current_request: Optional[BaseRequest] = None
|
186 |
-
self._manager: Optional[Server] = manager
|
187 |
-
self._request_handler: Optional[_RequestHandler] = manager.request_handler
|
188 |
-
self._request_factory: Optional[_RequestFactory] = manager.request_factory
|
189 |
-
|
190 |
-
self._tcp_keepalive = tcp_keepalive
|
191 |
-
# placeholder to be replaced on keepalive timeout setup
|
192 |
-
self._keepalive_time = 0.0
|
193 |
-
self._keepalive_handle: Optional[asyncio.Handle] = None
|
194 |
-
self._keepalive_timeout = keepalive_timeout
|
195 |
-
self._lingering_time = float(lingering_time)
|
196 |
-
|
197 |
-
self._messages: Deque[_MsgType] = deque()
|
198 |
-
self._message_tail = b""
|
199 |
-
|
200 |
-
self._waiter: Optional[asyncio.Future[None]] = None
|
201 |
-
self._task_handler: Optional[asyncio.Task[None]] = None
|
202 |
-
|
203 |
-
self._upgrade = False
|
204 |
-
self._payload_parser: Any = None
|
205 |
-
self._request_parser: Optional[HttpRequestParser] = HttpRequestParser(
|
206 |
-
self,
|
207 |
-
loop,
|
208 |
-
read_bufsize,
|
209 |
-
max_line_size=max_line_size,
|
210 |
-
max_field_size=max_field_size,
|
211 |
-
max_headers=max_headers,
|
212 |
-
payload_exception=RequestPayloadError,
|
213 |
-
auto_decompress=auto_decompress,
|
214 |
-
)
|
215 |
-
|
216 |
-
self.logger = logger
|
217 |
-
self.debug = debug
|
218 |
-
self.access_log = access_log
|
219 |
-
if access_log:
|
220 |
-
self.access_logger: Optional[AbstractAccessLogger] = access_log_class(
|
221 |
-
access_log, access_log_format
|
222 |
-
)
|
223 |
-
else:
|
224 |
-
self.access_logger = None
|
225 |
-
|
226 |
-
self._close = False
|
227 |
-
self._force_close = False
|
228 |
-
|
229 |
-
def __repr__(self) -> str:
|
230 |
-
return "<{} {}>".format(
|
231 |
-
self.__class__.__name__,
|
232 |
-
"connected" if self.transport is not None else "disconnected",
|
233 |
-
)
|
234 |
-
|
235 |
-
@property
|
236 |
-
def keepalive_timeout(self) -> float:
|
237 |
-
return self._keepalive_timeout
|
238 |
-
|
239 |
-
async def shutdown(self, timeout: Optional[float] = 15.0) -> None:
|
240 |
-
"""Do worker process exit preparations.
|
241 |
-
|
242 |
-
We need to clean up everything and stop accepting requests.
|
243 |
-
It is especially important for keep-alive connections.
|
244 |
-
"""
|
245 |
-
self._force_close = True
|
246 |
-
|
247 |
-
if self._keepalive_handle is not None:
|
248 |
-
self._keepalive_handle.cancel()
|
249 |
-
|
250 |
-
if self._waiter:
|
251 |
-
self._waiter.cancel()
|
252 |
-
|
253 |
-
# wait for handlers
|
254 |
-
with suppress(asyncio.CancelledError, asyncio.TimeoutError):
|
255 |
-
async with ceil_timeout(timeout):
|
256 |
-
if self._current_request is not None:
|
257 |
-
self._current_request._cancel(asyncio.CancelledError())
|
258 |
-
|
259 |
-
if self._task_handler is not None and not self._task_handler.done():
|
260 |
-
await self._task_handler
|
261 |
-
|
262 |
-
# force-close non-idle handler
|
263 |
-
if self._task_handler is not None:
|
264 |
-
self._task_handler.cancel()
|
265 |
-
|
266 |
-
if self.transport is not None:
|
267 |
-
self.transport.close()
|
268 |
-
self.transport = None
|
269 |
-
|
270 |
-
def connection_made(self, transport: asyncio.BaseTransport) -> None:
|
271 |
-
super().connection_made(transport)
|
272 |
-
|
273 |
-
real_transport = cast(asyncio.Transport, transport)
|
274 |
-
if self._tcp_keepalive:
|
275 |
-
tcp_keepalive(real_transport)
|
276 |
-
|
277 |
-
self._task_handler = self._loop.create_task(self.start())
|
278 |
-
assert self._manager is not None
|
279 |
-
self._manager.connection_made(self, real_transport)
|
280 |
-
|
281 |
-
def connection_lost(self, exc: Optional[BaseException]) -> None:
|
282 |
-
if self._manager is None:
|
283 |
-
return
|
284 |
-
self._manager.connection_lost(self, exc)
|
285 |
-
|
286 |
-
super().connection_lost(exc)
|
287 |
-
|
288 |
-
self._manager = None
|
289 |
-
self._force_close = True
|
290 |
-
self._request_factory = None
|
291 |
-
self._request_handler = None
|
292 |
-
self._request_parser = None
|
293 |
-
|
294 |
-
if self._keepalive_handle is not None:
|
295 |
-
self._keepalive_handle.cancel()
|
296 |
-
|
297 |
-
if self._current_request is not None:
|
298 |
-
if exc is None:
|
299 |
-
exc = ConnectionResetError("Connection lost")
|
300 |
-
self._current_request._cancel(exc)
|
301 |
-
|
302 |
-
if self._waiter is not None:
|
303 |
-
self._waiter.cancel()
|
304 |
-
|
305 |
-
self._task_handler = None
|
306 |
-
|
307 |
-
if self._payload_parser is not None:
|
308 |
-
self._payload_parser.feed_eof()
|
309 |
-
self._payload_parser = None
|
310 |
-
|
311 |
-
def set_parser(self, parser: Any) -> None:
|
312 |
-
# Actual type is WebReader
|
313 |
-
assert self._payload_parser is None
|
314 |
-
|
315 |
-
self._payload_parser = parser
|
316 |
-
|
317 |
-
if self._message_tail:
|
318 |
-
self._payload_parser.feed_data(self._message_tail)
|
319 |
-
self._message_tail = b""
|
320 |
-
|
321 |
-
def eof_received(self) -> None:
|
322 |
-
pass
|
323 |
-
|
324 |
-
def data_received(self, data: bytes) -> None:
|
325 |
-
if self._force_close or self._close:
|
326 |
-
return
|
327 |
-
# parse http messages
|
328 |
-
messages: Sequence[_MsgType]
|
329 |
-
if self._payload_parser is None and not self._upgrade:
|
330 |
-
assert self._request_parser is not None
|
331 |
-
try:
|
332 |
-
messages, upgraded, tail = self._request_parser.feed_data(data)
|
333 |
-
except HttpProcessingError as exc:
|
334 |
-
messages = [
|
335 |
-
(_ErrInfo(status=400, exc=exc, message=exc.message), EMPTY_PAYLOAD)
|
336 |
-
]
|
337 |
-
upgraded = False
|
338 |
-
tail = b""
|
339 |
-
|
340 |
-
for msg, payload in messages or ():
|
341 |
-
self._request_count += 1
|
342 |
-
self._messages.append((msg, payload))
|
343 |
-
|
344 |
-
waiter = self._waiter
|
345 |
-
if messages and waiter is not None and not waiter.done():
|
346 |
-
# don't set result twice
|
347 |
-
waiter.set_result(None)
|
348 |
-
|
349 |
-
self._upgrade = upgraded
|
350 |
-
if upgraded and tail:
|
351 |
-
self._message_tail = tail
|
352 |
-
|
353 |
-
# no parser, just store
|
354 |
-
elif self._payload_parser is None and self._upgrade and data:
|
355 |
-
self._message_tail += data
|
356 |
-
|
357 |
-
# feed payload
|
358 |
-
elif data:
|
359 |
-
eof, tail = self._payload_parser.feed_data(data)
|
360 |
-
if eof:
|
361 |
-
self.close()
|
362 |
-
|
363 |
-
def keep_alive(self, val: bool) -> None:
|
364 |
-
"""Set keep-alive connection mode.
|
365 |
-
|
366 |
-
:param bool val: new state.
|
367 |
-
"""
|
368 |
-
self._keepalive = val
|
369 |
-
if self._keepalive_handle:
|
370 |
-
self._keepalive_handle.cancel()
|
371 |
-
self._keepalive_handle = None
|
372 |
-
|
373 |
-
def close(self) -> None:
|
374 |
-
"""Close connection.
|
375 |
-
|
376 |
-
Stop accepting new pipelining messages and close
|
377 |
-
connection when handlers done processing messages.
|
378 |
-
"""
|
379 |
-
self._close = True
|
380 |
-
if self._waiter:
|
381 |
-
self._waiter.cancel()
|
382 |
-
|
383 |
-
def force_close(self) -> None:
|
384 |
-
"""Forcefully close connection."""
|
385 |
-
self._force_close = True
|
386 |
-
if self._waiter:
|
387 |
-
self._waiter.cancel()
|
388 |
-
if self.transport is not None:
|
389 |
-
self.transport.close()
|
390 |
-
self.transport = None
|
391 |
-
|
392 |
-
def log_access(
|
393 |
-
self, request: BaseRequest, response: StreamResponse, time: float
|
394 |
-
) -> None:
|
395 |
-
if self.access_logger is not None:
|
396 |
-
self.access_logger.log(request, response, self._loop.time() - time)
|
397 |
-
|
398 |
-
def log_debug(self, *args: Any, **kw: Any) -> None:
|
399 |
-
if self.debug:
|
400 |
-
self.logger.debug(*args, **kw)
|
401 |
-
|
402 |
-
def log_exception(self, *args: Any, **kw: Any) -> None:
|
403 |
-
self.logger.exception(*args, **kw)
|
404 |
-
|
405 |
-
def _process_keepalive(self) -> None:
|
406 |
-
if self._force_close or not self._keepalive:
|
407 |
-
return
|
408 |
-
|
409 |
-
next = self._keepalive_time + self._keepalive_timeout
|
410 |
-
|
411 |
-
# handler in idle state
|
412 |
-
if self._waiter:
|
413 |
-
if self._loop.time() > next:
|
414 |
-
self.force_close()
|
415 |
-
return
|
416 |
-
|
417 |
-
# not all request handlers are done,
|
418 |
-
# reschedule itself to next second
|
419 |
-
self._keepalive_handle = self._loop.call_later(
|
420 |
-
self.KEEPALIVE_RESCHEDULE_DELAY, self._process_keepalive
|
421 |
-
)
|
422 |
-
|
423 |
-
async def _handle_request(
|
424 |
-
self,
|
425 |
-
request: BaseRequest,
|
426 |
-
start_time: float,
|
427 |
-
request_handler: Callable[[BaseRequest], Awaitable[StreamResponse]],
|
428 |
-
) -> Tuple[StreamResponse, bool]:
|
429 |
-
assert self._request_handler is not None
|
430 |
-
try:
|
431 |
-
try:
|
432 |
-
self._current_request = request
|
433 |
-
resp = await request_handler(request)
|
434 |
-
finally:
|
435 |
-
self._current_request = None
|
436 |
-
except HTTPException as exc:
|
437 |
-
resp = exc
|
438 |
-
reset = await self.finish_response(request, resp, start_time)
|
439 |
-
except asyncio.CancelledError:
|
440 |
-
raise
|
441 |
-
except asyncio.TimeoutError as exc:
|
442 |
-
self.log_debug("Request handler timed out.", exc_info=exc)
|
443 |
-
resp = self.handle_error(request, 504)
|
444 |
-
reset = await self.finish_response(request, resp, start_time)
|
445 |
-
except Exception as exc:
|
446 |
-
resp = self.handle_error(request, 500, exc)
|
447 |
-
reset = await self.finish_response(request, resp, start_time)
|
448 |
-
else:
|
449 |
-
# Deprecation warning (See #2415)
|
450 |
-
if getattr(resp, "__http_exception__", False):
|
451 |
-
warnings.warn(
|
452 |
-
"returning HTTPException object is deprecated "
|
453 |
-
"(#2415) and will be removed, "
|
454 |
-
"please raise the exception instead",
|
455 |
-
DeprecationWarning,
|
456 |
-
)
|
457 |
-
|
458 |
-
reset = await self.finish_response(request, resp, start_time)
|
459 |
-
|
460 |
-
return resp, reset
|
461 |
-
|
462 |
-
async def start(self) -> None:
|
463 |
-
"""Process incoming request.
|
464 |
-
|
465 |
-
It reads request line, request headers and request payload, then
|
466 |
-
calls handle_request() method. Subclass has to override
|
467 |
-
handle_request(). start() handles various exceptions in request
|
468 |
-
or response handling. Connection is being closed always unless
|
469 |
-
keep_alive(True) specified.
|
470 |
-
"""
|
471 |
-
loop = self._loop
|
472 |
-
handler = self._task_handler
|
473 |
-
assert handler is not None
|
474 |
-
manager = self._manager
|
475 |
-
assert manager is not None
|
476 |
-
keepalive_timeout = self._keepalive_timeout
|
477 |
-
resp = None
|
478 |
-
assert self._request_factory is not None
|
479 |
-
assert self._request_handler is not None
|
480 |
-
|
481 |
-
while not self._force_close:
|
482 |
-
if not self._messages:
|
483 |
-
try:
|
484 |
-
# wait for next request
|
485 |
-
self._waiter = loop.create_future()
|
486 |
-
await self._waiter
|
487 |
-
except asyncio.CancelledError:
|
488 |
-
break
|
489 |
-
finally:
|
490 |
-
self._waiter = None
|
491 |
-
|
492 |
-
message, payload = self._messages.popleft()
|
493 |
-
|
494 |
-
start = loop.time()
|
495 |
-
|
496 |
-
manager.requests_count += 1
|
497 |
-
writer = StreamWriter(self, loop)
|
498 |
-
if isinstance(message, _ErrInfo):
|
499 |
-
# make request_factory work
|
500 |
-
request_handler = self._make_error_handler(message)
|
501 |
-
message = ERROR
|
502 |
-
else:
|
503 |
-
request_handler = self._request_handler
|
504 |
-
|
505 |
-
request = self._request_factory(message, payload, self, writer, handler)
|
506 |
-
try:
|
507 |
-
# a new task is used for copy context vars (#3406)
|
508 |
-
task = self._loop.create_task(
|
509 |
-
self._handle_request(request, start, request_handler)
|
510 |
-
)
|
511 |
-
try:
|
512 |
-
resp, reset = await task
|
513 |
-
except (asyncio.CancelledError, ConnectionError):
|
514 |
-
self.log_debug("Ignored premature client disconnection")
|
515 |
-
break
|
516 |
-
|
517 |
-
# Drop the processed task from asyncio.Task.all_tasks() early
|
518 |
-
del task
|
519 |
-
if reset:
|
520 |
-
self.log_debug("Ignored premature client disconnection 2")
|
521 |
-
break
|
522 |
-
|
523 |
-
# notify server about keep-alive
|
524 |
-
self._keepalive = bool(resp.keep_alive)
|
525 |
-
|
526 |
-
# check payload
|
527 |
-
if not payload.is_eof():
|
528 |
-
lingering_time = self._lingering_time
|
529 |
-
if not self._force_close and lingering_time:
|
530 |
-
self.log_debug(
|
531 |
-
"Start lingering close timer for %s sec.", lingering_time
|
532 |
-
)
|
533 |
-
|
534 |
-
now = loop.time()
|
535 |
-
end_t = now + lingering_time
|
536 |
-
|
537 |
-
with suppress(asyncio.TimeoutError, asyncio.CancelledError):
|
538 |
-
while not payload.is_eof() and now < end_t:
|
539 |
-
async with ceil_timeout(end_t - now):
|
540 |
-
# read and ignore
|
541 |
-
await payload.readany()
|
542 |
-
now = loop.time()
|
543 |
-
|
544 |
-
# if payload still uncompleted
|
545 |
-
if not payload.is_eof() and not self._force_close:
|
546 |
-
self.log_debug("Uncompleted request.")
|
547 |
-
self.close()
|
548 |
-
|
549 |
-
payload.set_exception(PayloadAccessError())
|
550 |
-
|
551 |
-
except asyncio.CancelledError:
|
552 |
-
self.log_debug("Ignored premature client disconnection ")
|
553 |
-
break
|
554 |
-
except RuntimeError as exc:
|
555 |
-
if self.debug:
|
556 |
-
self.log_exception("Unhandled runtime exception", exc_info=exc)
|
557 |
-
self.force_close()
|
558 |
-
except Exception as exc:
|
559 |
-
self.log_exception("Unhandled exception", exc_info=exc)
|
560 |
-
self.force_close()
|
561 |
-
finally:
|
562 |
-
if self.transport is None and resp is not None:
|
563 |
-
self.log_debug("Ignored premature client disconnection.")
|
564 |
-
elif not self._force_close:
|
565 |
-
if self._keepalive and not self._close:
|
566 |
-
# start keep-alive timer
|
567 |
-
if keepalive_timeout is not None:
|
568 |
-
now = self._loop.time()
|
569 |
-
self._keepalive_time = now
|
570 |
-
if self._keepalive_handle is None:
|
571 |
-
self._keepalive_handle = loop.call_at(
|
572 |
-
now + keepalive_timeout, self._process_keepalive
|
573 |
-
)
|
574 |
-
else:
|
575 |
-
break
|
576 |
-
|
577 |
-
# remove handler, close transport if no handlers left
|
578 |
-
if not self._force_close:
|
579 |
-
self._task_handler = None
|
580 |
-
if self.transport is not None:
|
581 |
-
self.transport.close()
|
582 |
-
|
583 |
-
async def finish_response(
|
584 |
-
self, request: BaseRequest, resp: StreamResponse, start_time: float
|
585 |
-
) -> bool:
|
586 |
-
"""Prepare the response and write_eof, then log access.
|
587 |
-
|
588 |
-
This has to
|
589 |
-
be called within the context of any exception so the access logger
|
590 |
-
can get exception information. Returns True if the client disconnects
|
591 |
-
prematurely.
|
592 |
-
"""
|
593 |
-
if self._request_parser is not None:
|
594 |
-
self._request_parser.set_upgraded(False)
|
595 |
-
self._upgrade = False
|
596 |
-
if self._message_tail:
|
597 |
-
self._request_parser.feed_data(self._message_tail)
|
598 |
-
self._message_tail = b""
|
599 |
-
try:
|
600 |
-
prepare_meth = resp.prepare
|
601 |
-
except AttributeError:
|
602 |
-
if resp is None:
|
603 |
-
raise RuntimeError("Missing return " "statement on request handler")
|
604 |
-
else:
|
605 |
-
raise RuntimeError(
|
606 |
-
"Web-handler should return "
|
607 |
-
"a response instance, "
|
608 |
-
"got {!r}".format(resp)
|
609 |
-
)
|
610 |
-
try:
|
611 |
-
await prepare_meth(request)
|
612 |
-
await resp.write_eof()
|
613 |
-
except ConnectionError:
|
614 |
-
self.log_access(request, resp, start_time)
|
615 |
-
return True
|
616 |
-
else:
|
617 |
-
self.log_access(request, resp, start_time)
|
618 |
-
return False
|
619 |
-
|
620 |
-
def handle_error(
|
621 |
-
self,
|
622 |
-
request: BaseRequest,
|
623 |
-
status: int = 500,
|
624 |
-
exc: Optional[BaseException] = None,
|
625 |
-
message: Optional[str] = None,
|
626 |
-
) -> StreamResponse:
|
627 |
-
"""Handle errors.
|
628 |
-
|
629 |
-
Returns HTTP response with specific status code. Logs additional
|
630 |
-
information. It always closes current connection.
|
631 |
-
"""
|
632 |
-
self.log_exception("Error handling request", exc_info=exc)
|
633 |
-
|
634 |
-
# some data already got sent, connection is broken
|
635 |
-
if request.writer.output_size > 0:
|
636 |
-
raise ConnectionError(
|
637 |
-
"Response is sent already, cannot send another response "
|
638 |
-
"with the error message"
|
639 |
-
)
|
640 |
-
|
641 |
-
ct = "text/plain"
|
642 |
-
if status == HTTPStatus.INTERNAL_SERVER_ERROR:
|
643 |
-
title = "{0.value} {0.phrase}".format(HTTPStatus.INTERNAL_SERVER_ERROR)
|
644 |
-
msg = HTTPStatus.INTERNAL_SERVER_ERROR.description
|
645 |
-
tb = None
|
646 |
-
if self.debug:
|
647 |
-
with suppress(Exception):
|
648 |
-
tb = traceback.format_exc()
|
649 |
-
|
650 |
-
if "text/html" in request.headers.get("Accept", ""):
|
651 |
-
if tb:
|
652 |
-
tb = html_escape(tb)
|
653 |
-
msg = f"<h2>Traceback:</h2>\n<pre>{tb}</pre>"
|
654 |
-
message = (
|
655 |
-
"<html><head>"
|
656 |
-
"<title>{title}</title>"
|
657 |
-
"</head><body>\n<h1>{title}</h1>"
|
658 |
-
"\n{msg}\n</body></html>\n"
|
659 |
-
).format(title=title, msg=msg)
|
660 |
-
ct = "text/html"
|
661 |
-
else:
|
662 |
-
if tb:
|
663 |
-
msg = tb
|
664 |
-
message = title + "\n\n" + msg
|
665 |
-
|
666 |
-
resp = Response(status=status, text=message, content_type=ct)
|
667 |
-
resp.force_close()
|
668 |
-
|
669 |
-
return resp
|
670 |
-
|
671 |
-
def _make_error_handler(
|
672 |
-
self, err_info: _ErrInfo
|
673 |
-
) -> Callable[[BaseRequest], Awaitable[StreamResponse]]:
|
674 |
-
async def handler(request: BaseRequest) -> StreamResponse:
|
675 |
-
return self.handle_error(
|
676 |
-
request, err_info.status, err_info.exc, err_info.message
|
677 |
-
)
|
678 |
-
|
679 |
-
return handler
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Dachus/Realfee/Dockerfile
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
FROM ghcr.io/livebook-dev/livebook:latest-cuda11.8
|
2 |
-
|
3 |
-
ENV LIVEBOOK_APP_SERVICE_NAME "🐳 Hugging Face - $SPACE_TITLE"
|
4 |
-
ENV LIVEBOOK_APP_SERVICE_URL "https://huggingface.co/spaces/$SPACE_AUTHOR_NAME/$SPACE_REPO_NAME"
|
5 |
-
ENV LIVEBOOK_UPDATE_INSTRUCTIONS_URL "https://livebook.dev"
|
6 |
-
ENV LIVEBOOK_WITHIN_IFRAME "true"
|
7 |
-
ENV LIVEBOOK_APPS_PATH "/public-apps"
|
8 |
-
ENV LIVEBOOK_DATA_PATH "/data"
|
9 |
-
ENV LIVEBOOK_PORT 7860
|
10 |
-
|
11 |
-
EXPOSE 7860
|
12 |
-
USER root
|
13 |
-
COPY public-apps/ /public-apps
|
14 |
-
RUN mkdir -p /data
|
15 |
-
RUN chmod 777 /data
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Dauzy/whisper-webui/src/utils.py
DELETED
@@ -1,245 +0,0 @@
|
|
1 |
-
import textwrap
|
2 |
-
import unicodedata
|
3 |
-
import re
|
4 |
-
|
5 |
-
import zlib
|
6 |
-
from typing import Iterator, TextIO, Union
|
7 |
-
import tqdm
|
8 |
-
|
9 |
-
import urllib3
|
10 |
-
|
11 |
-
|
12 |
-
def exact_div(x, y):
|
13 |
-
assert x % y == 0
|
14 |
-
return x // y
|
15 |
-
|
16 |
-
|
17 |
-
def str2bool(string):
|
18 |
-
str2val = {"True": True, "False": False}
|
19 |
-
if string in str2val:
|
20 |
-
return str2val[string]
|
21 |
-
else:
|
22 |
-
raise ValueError(f"Expected one of {set(str2val.keys())}, got {string}")
|
23 |
-
|
24 |
-
|
25 |
-
def optional_int(string):
|
26 |
-
return None if string == "None" else int(string)
|
27 |
-
|
28 |
-
|
29 |
-
def optional_float(string):
|
30 |
-
return None if string == "None" else float(string)
|
31 |
-
|
32 |
-
|
33 |
-
def compression_ratio(text) -> float:
|
34 |
-
return len(text) / len(zlib.compress(text.encode("utf-8")))
|
35 |
-
|
36 |
-
|
37 |
-
def format_timestamp(seconds: float, always_include_hours: bool = False, fractionalSeperator: str = '.'):
|
38 |
-
assert seconds >= 0, "non-negative timestamp expected"
|
39 |
-
milliseconds = round(seconds * 1000.0)
|
40 |
-
|
41 |
-
hours = milliseconds // 3_600_000
|
42 |
-
milliseconds -= hours * 3_600_000
|
43 |
-
|
44 |
-
minutes = milliseconds // 60_000
|
45 |
-
milliseconds -= minutes * 60_000
|
46 |
-
|
47 |
-
seconds = milliseconds // 1_000
|
48 |
-
milliseconds -= seconds * 1_000
|
49 |
-
|
50 |
-
hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else ""
|
51 |
-
return f"{hours_marker}{minutes:02d}:{seconds:02d}{fractionalSeperator}{milliseconds:03d}"
|
52 |
-
|
53 |
-
|
54 |
-
def write_txt(transcript: Iterator[dict], file: TextIO):
|
55 |
-
for segment in transcript:
|
56 |
-
print(segment['text'].strip(), file=file, flush=True)
|
57 |
-
|
58 |
-
|
59 |
-
def write_vtt(transcript: Iterator[dict], file: TextIO,
|
60 |
-
maxLineWidth=None, highlight_words: bool = False):
|
61 |
-
iterator = __subtitle_preprocessor_iterator(transcript, maxLineWidth, highlight_words)
|
62 |
-
|
63 |
-
print("WEBVTT\n", file=file)
|
64 |
-
|
65 |
-
for segment in iterator:
|
66 |
-
text = segment['text'].replace('-->', '->')
|
67 |
-
|
68 |
-
print(
|
69 |
-
f"{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])}\n"
|
70 |
-
f"{text}\n",
|
71 |
-
file=file,
|
72 |
-
flush=True,
|
73 |
-
)
|
74 |
-
|
75 |
-
def write_srt(transcript: Iterator[dict], file: TextIO,
|
76 |
-
maxLineWidth=None, highlight_words: bool = False):
|
77 |
-
"""
|
78 |
-
Write a transcript to a file in SRT format.
|
79 |
-
Example usage:
|
80 |
-
from pathlib import Path
|
81 |
-
from whisper.utils import write_srt
|
82 |
-
result = transcribe(model, audio_path, temperature=temperature, **args)
|
83 |
-
# save SRT
|
84 |
-
audio_basename = Path(audio_path).stem
|
85 |
-
with open(Path(output_dir) / (audio_basename + ".srt"), "w", encoding="utf-8") as srt:
|
86 |
-
write_srt(result["segments"], file=srt)
|
87 |
-
"""
|
88 |
-
iterator = __subtitle_preprocessor_iterator(transcript, maxLineWidth, highlight_words)
|
89 |
-
|
90 |
-
for i, segment in enumerate(iterator, start=1):
|
91 |
-
text = segment['text'].replace('-->', '->')
|
92 |
-
|
93 |
-
# write srt lines
|
94 |
-
print(
|
95 |
-
f"{i}\n"
|
96 |
-
f"{format_timestamp(segment['start'], always_include_hours=True, fractionalSeperator=',')} --> "
|
97 |
-
f"{format_timestamp(segment['end'], always_include_hours=True, fractionalSeperator=',')}\n"
|
98 |
-
f"{text}\n",
|
99 |
-
file=file,
|
100 |
-
flush=True,
|
101 |
-
)
|
102 |
-
|
103 |
-
def __subtitle_preprocessor_iterator(transcript: Iterator[dict], maxLineWidth: int = None, highlight_words: bool = False):
|
104 |
-
for segment in transcript:
|
105 |
-
words = segment.get('words', [])
|
106 |
-
|
107 |
-
if len(words) == 0:
|
108 |
-
# Yield the segment as-is or processed
|
109 |
-
if maxLineWidth is None or maxLineWidth < 0:
|
110 |
-
yield segment
|
111 |
-
else:
|
112 |
-
yield {
|
113 |
-
'start': segment['start'],
|
114 |
-
'end': segment['end'],
|
115 |
-
'text': process_text(segment['text'].strip(), maxLineWidth)
|
116 |
-
}
|
117 |
-
# We are done
|
118 |
-
continue
|
119 |
-
|
120 |
-
subtitle_start = segment['start']
|
121 |
-
subtitle_end = segment['end']
|
122 |
-
|
123 |
-
text_words = [ this_word["word"] for this_word in words ]
|
124 |
-
subtitle_text = __join_words(text_words, maxLineWidth)
|
125 |
-
|
126 |
-
# Iterate over the words in the segment
|
127 |
-
if highlight_words:
|
128 |
-
last = subtitle_start
|
129 |
-
|
130 |
-
for i, this_word in enumerate(words):
|
131 |
-
start = this_word['start']
|
132 |
-
end = this_word['end']
|
133 |
-
|
134 |
-
if last != start:
|
135 |
-
# Display the text up to this point
|
136 |
-
yield {
|
137 |
-
'start': last,
|
138 |
-
'end': start,
|
139 |
-
'text': subtitle_text
|
140 |
-
}
|
141 |
-
|
142 |
-
# Display the text with the current word highlighted
|
143 |
-
yield {
|
144 |
-
'start': start,
|
145 |
-
'end': end,
|
146 |
-
'text': __join_words(
|
147 |
-
[
|
148 |
-
{
|
149 |
-
"word": re.sub(r"^(\s*)(.*)$", r"\1<u>\2</u>", word)
|
150 |
-
if j == i
|
151 |
-
else word,
|
152 |
-
# The HTML tags <u> and </u> are not displayed,
|
153 |
-
# # so they should not be counted in the word length
|
154 |
-
"length": len(word)
|
155 |
-
} for j, word in enumerate(text_words)
|
156 |
-
], maxLineWidth)
|
157 |
-
}
|
158 |
-
last = end
|
159 |
-
|
160 |
-
if last != subtitle_end:
|
161 |
-
# Display the last part of the text
|
162 |
-
yield {
|
163 |
-
'start': last,
|
164 |
-
'end': subtitle_end,
|
165 |
-
'text': subtitle_text
|
166 |
-
}
|
167 |
-
|
168 |
-
# Just return the subtitle text
|
169 |
-
else:
|
170 |
-
yield {
|
171 |
-
'start': subtitle_start,
|
172 |
-
'end': subtitle_end,
|
173 |
-
'text': subtitle_text
|
174 |
-
}
|
175 |
-
|
176 |
-
def __join_words(words: Iterator[Union[str, dict]], maxLineWidth: int = None):
|
177 |
-
if maxLineWidth is None or maxLineWidth < 0:
|
178 |
-
return " ".join(words)
|
179 |
-
|
180 |
-
lines = []
|
181 |
-
current_line = ""
|
182 |
-
current_length = 0
|
183 |
-
|
184 |
-
for entry in words:
|
185 |
-
# Either accept a string or a dict with a 'word' and 'length' field
|
186 |
-
if isinstance(entry, dict):
|
187 |
-
word = entry['word']
|
188 |
-
word_length = entry['length']
|
189 |
-
else:
|
190 |
-
word = entry
|
191 |
-
word_length = len(word)
|
192 |
-
|
193 |
-
if current_length > 0 and current_length + word_length > maxLineWidth:
|
194 |
-
lines.append(current_line)
|
195 |
-
current_line = ""
|
196 |
-
current_length = 0
|
197 |
-
|
198 |
-
current_length += word_length
|
199 |
-
# The word will be prefixed with a space by Whisper, so we don't need to add one here
|
200 |
-
current_line += word
|
201 |
-
|
202 |
-
if len(current_line) > 0:
|
203 |
-
lines.append(current_line)
|
204 |
-
|
205 |
-
return "\n".join(lines)
|
206 |
-
|
207 |
-
def process_text(text: str, maxLineWidth=None):
|
208 |
-
if (maxLineWidth is None or maxLineWidth < 0):
|
209 |
-
return text
|
210 |
-
|
211 |
-
lines = textwrap.wrap(text, width=maxLineWidth, tabsize=4)
|
212 |
-
return '\n'.join(lines)
|
213 |
-
|
214 |
-
def slugify(value, allow_unicode=False):
|
215 |
-
"""
|
216 |
-
Taken from https://github.com/django/django/blob/master/django/utils/text.py
|
217 |
-
Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
|
218 |
-
dashes to single dashes. Remove characters that aren't alphanumerics,
|
219 |
-
underscores, or hyphens. Convert to lowercase. Also strip leading and
|
220 |
-
trailing whitespace, dashes, and underscores.
|
221 |
-
"""
|
222 |
-
value = str(value)
|
223 |
-
if allow_unicode:
|
224 |
-
value = unicodedata.normalize('NFKC', value)
|
225 |
-
else:
|
226 |
-
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
|
227 |
-
value = re.sub(r'[^\w\s-]', '', value.lower())
|
228 |
-
return re.sub(r'[-\s]+', '-', value).strip('-_')
|
229 |
-
|
230 |
-
def download_file(url: str, destination: str):
|
231 |
-
with urllib3.request.urlopen(url) as source, open(destination, "wb") as output:
|
232 |
-
with tqdm(
|
233 |
-
total=int(source.info().get("Content-Length")),
|
234 |
-
ncols=80,
|
235 |
-
unit="iB",
|
236 |
-
unit_scale=True,
|
237 |
-
unit_divisor=1024,
|
238 |
-
) as loop:
|
239 |
-
while True:
|
240 |
-
buffer = source.read(8192)
|
241 |
-
if not buffer:
|
242 |
-
break
|
243 |
-
|
244 |
-
output.write(buffer)
|
245 |
-
loop.update(len(buffer))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|