Commit
·
0038684
1
Parent(s):
9214814
Update parquet files (step 80 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- errors.txt +0 -24
- spaces/07jeancms/minima/app.py +0 -7
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cracked Trials A Common but Costly Phenomenon in the Courts.md +0 -13
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crackle Crackle Free Movies.md +0 -19
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Patch Bad Piggies 1.5.0 Pc and Build Your Own Crazy Vehicles.md +0 -142
- spaces/1gistliPinn/ChatGPT4/Examples/Download Film Al Fatih 1453 Subtitle Indonesia Download WORK.md +0 -9
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/8 Ball Pool Hile 2022 APK Play Offline or Online with Standard or Snooker Rules.md +0 -161
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Dawn Awakening The Ultimate Guide to Surviving the Post-Apocalyptic World.md +0 -124
- spaces/1phancelerku/anime-remove-background/APKPure The Ultimate App Store for Android Users.md +0 -130
- spaces/1phancelerku/anime-remove-background/Android Oyun Club Car Parking Son Srm The Most Popular Parking Game on Google Play.md +0 -107
- spaces/1phancelerku/anime-remove-background/Animal Tycoon - Zoo Craft Game Mod Apk The Ultimate Idle Zoo Simulation.md +0 -190
- spaces/1phancelerku/anime-remove-background/Fifa Street 4 PC Download - Enjoy Street Soccer in High Resolution.md +0 -76
- spaces/3B-Group/ConvRe-Leaderboard/app.py +0 -237
- spaces/44ov41za8i/FreeVC/speaker_encoder/data_objects/speaker_verification_dataset.py +0 -56
- spaces/801artistry/RVC801/LazyImport.py +0 -13
- spaces/AI-Hobbyist/Hoyo-RVC/docs/faq.md +0 -89
- spaces/AIFILMS/generate_human_motion/pyrender/tests/unit/test_meshes.py +0 -133
- spaces/Abhay1210/prompt-generator_V1/app.py +0 -18
- spaces/Abubakari/Sales_Prediction/app.py +0 -166
- spaces/Adr740/SmartHadithFR/app.py +0 -41
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/ResolveChildrenWidth.js +0 -16
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/press/Press.js +0 -2
- spaces/Ajay07pandey/Netfilx_Movie_Recommendation_System/README.md +0 -12
- spaces/Alycer/VITS-Umamusume-voice-synthesizer/utils.py +0 -226
- spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/cpp/cppipc/waiter.h +0 -83
- spaces/Amrrs/DragGan-Inversion/PTI/training/coaches/__init__.py +0 -0
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/iadb.py +0 -149
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/vae_flax.py +0 -869
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py +0 -561
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/deepfloyd_if/test_if_inpainting.py +0 -88
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/utils/overwrite_expected_slice.py +0 -90
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/base.py +0 -166
- spaces/Anonymous-sub/Rerender/LICENSE.md +0 -201
- spaces/AntNikYab/NaturalLanguageProcessing/pages/pushkin.py +0 -64
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/__init__.py +0 -132
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/search.py +0 -174
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/engine/hooks.py +0 -686
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/evaluation/pascal_voc_evaluation.py +0 -300
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/tutorials/datasets.md +0 -290
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/custom_build_augmentation.py +0 -59
- spaces/Benson/text-generation/Examples/Descargar Anime Negro Apk.md +0 -64
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/install_scripts.py +0 -61
- spaces/Boadiwaa/Recipes/openai/util.py +0 -185
- spaces/CVPR/LIVE/thrust/thrust/detail/swap.h +0 -36
- spaces/CVPR/LIVE/thrust/thrust/iterator/detail/zip_iterator_base.h +0 -405
- spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/malloc_and_free.h +0 -23
- spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/uninitialized_copy.h +0 -116
- spaces/CVPR/WALT/mmdet/datasets/custom.py +0 -334
- spaces/Chaitanya01/InvestingPlatform/notifier.py +0 -40
- spaces/CikeyQI/Yunzai/Yunzai/plugins/system/friend.js +0 -22
errors.txt
DELETED
@@ -1,24 +0,0 @@
|
|
1 |
-
ky2k/Toxicity_Classifier_POC
|
2 |
-
tialenAdioni/chat-gpt-api
|
3 |
-
Narsil/myspace
|
4 |
-
arxify/RVC-beta-v2-0618
|
5 |
-
WitchHuntTV/WinnieThePoohSVC_sovits4
|
6 |
-
yizhangliu/Grounded-Segment-Anything
|
7 |
-
Robert001/UniControl-Demo
|
8 |
-
internetsignal/audioLDM
|
9 |
-
inamXcontru/PoeticTTS
|
10 |
-
dcarpintero/nlp-summarizer-pegasus
|
11 |
-
SungBeom/chatwine-korean
|
12 |
-
x6/BingAi
|
13 |
-
1gistliPinn/ChatGPT4
|
14 |
-
colakin/video-generater
|
15 |
-
stomexserde/gpt4-ui
|
16 |
-
quidiaMuxgu/Expedit-SAM
|
17 |
-
NasirKhalid24/Dalle2-Diffusion-Prior
|
18 |
-
joaopereirajp/livvieChatBot
|
19 |
-
diacanFperku/AutoGPT
|
20 |
-
tioseFevbu/cartoon-converter
|
21 |
-
chuan-hd/law-assistant-chatbot
|
22 |
-
mshukor/UnIVAL
|
23 |
-
xuyingliKepler/openai_play_tts
|
24 |
-
TNR-5/lib111
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/07jeancms/minima/app.py
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
def greet(name):
|
4 |
-
return "Hello " + name + "!!"
|
5 |
-
|
6 |
-
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
|
7 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cracked Trials A Common but Costly Phenomenon in the Courts.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>What is a Cracked Trial and Why Does It Matter?</h1>
|
3 |
-
<p>A cracked trial is a term used in the criminal justice system to describe a trial that has been scheduled for a not guilty hearing but does not proceed on the day, either because the defendant changes their plea to guilty or the prosecution drops the case. A cracked trial means that the case is resolved without a trial, but it also means that the court time and resources have been wasted, and the witnesses have been inconvenienced or distressed.</p>
|
4 |
-
<p>According to the <a href="https://www.judiciary.uk/wp-content/uploads/2010/04/cit_guidance_v3_1007.pdf">guidance issued by the judiciary</a>, a cracked trial can have a negative impact on the confidence in the system, as it may suggest that the case was not properly prepared or reviewed, or that there was undue pressure on the parties to reach a resolution. A cracked trial can also affect the victim's satisfaction and sense of justice, as they may feel that their voice was not heard or that the outcome was not fair.</p>
|
5 |
-
<h2>cracked trial definition</h2><br /><p><b><b>Download</b> ✑ ✑ ✑ <a href="https://byltly.com/2uKyQ0">https://byltly.com/2uKyQ0</a></b></p><br /><br />
|
6 |
-
<p>The <a href="https://fullfact.org/crime/when-criminal-trials-are-cracked/">statistics published by Full Fact</a> show that in 2014/15, about 35% of trials in the crown court and 37% in the magistrates' court were cracked, and that the main reason for this was late guilty pleas by the defendants. The report also found that only 2.1% of trials in the crown court and 6.8% of trials in the magistrates' court were cracked because of witness issues, such as absence or withdrawal of evidence.</p>
|
7 |
-
<p>The <a href="https://www.oxfordreference.com/view/10.1093/oi/authority.20110803095645197">definition of a cracked trial</a> in A Dictionary of Law Enforcement states that a trial that has been listed for a not guilty hearing on a particular day but does not proceed, either because the defendant pleads guilty to the whole or part of the indictment, or an alternative charge, or because the prosecution offers no evidence.</p>
|
8 |
-
<p>A cracked trial is different from an ineffective trial, which is a trial that has been listed for a hearing but cannot start or continue on the day for reasons beyond the control of the parties, such as illness, unavailability of a judge or jury, or technical problems. An ineffective trial has to be rescheduled for another date.</p>
|
9 |
-
<p>A cracked trial is also different from a vacated trial, which is a trial that has been listed for a hearing but is cancelled before the day for reasons within the control of the parties, such as an agreement to resolve the case by another means, such as a plea bargain or a diversion scheme. A vacated trial does not require any further court time.</p>
|
10 |
-
<h2>Conclusion</h2>
|
11 |
-
<p>A cracked trial is a common occurrence in the criminal justice system, but it can have negative consequences for the efficiency and effectiveness of the system, as well as for the satisfaction and well-being of the victims and witnesses. Reducing the number of cracked trials is one of the challenges faced by the courts and prosecutors, who have to balance the interests of justice with the realities of resource constraints and human factors.</p> ddb901b051<br />
|
12 |
-
<br />
|
13 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crackle Crackle Free Movies.md
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Watch Crackle Crackle Free Movies Online</h1>
|
3 |
-
<p>If you are looking for a way to watch free movies online, you may have heard of Crackle Crackle. Crackle Crackle is a website that offers a large collection of movies and TV shows that you can stream for free. You can find movies from various genres, such as action, comedy, drama, horror, thriller, and more. You can also watch original content from Crackle Crackle, such as The Oath, Snatch, and StartUp.</p>
|
4 |
-
<h2>crackle crackle free movies</h2><br /><p><b><b>Download Zip</b> >>> <a href="https://byltly.com/2uKvVZ">https://byltly.com/2uKvVZ</a></b></p><br /><br />
|
5 |
-
<p>However, Crackle Crackle is not available in all countries, and you may encounter some issues when trying to access it. For example, you may see a message that says "Sorry, this content is not available in your region" or "This video is not available in your country". This is because Crackle Crackle uses geo-restrictions to limit its content to certain regions. If you are outside of those regions, you will not be able to watch Crackle Crackle free movies online.</p>
|
6 |
-
<p>But don't worry, there is a solution to this problem. You can use a VPN (Virtual Private Network) to bypass the geo-restrictions and watch Crackle Crackle free movies online from anywhere in the world. A VPN is a service that allows you to connect to a server in another country and change your IP address. This way, you can trick Crackle Crackle into thinking that you are in a region where its content is available. You can also enjoy other benefits of using a VPN, such as protecting your privacy and security online.</p>
|
7 |
-
<p>Here are the steps to watch Crackle Crackle free movies online with a VPN:</p>
|
8 |
-
<ol>
|
9 |
-
<li>Choose a VPN service that has servers in the countries where Crackle Crackle is available, such as the US, Canada, Australia, or the UK. Some of the best VPNs for streaming are ExpressVPN, NordVPN, Surfshark, and CyberGhost.</li>
|
10 |
-
<li>Download and install the VPN app on your device. You can use a VPN on your computer, smartphone, tablet, or smart TV.</li>
|
11 |
-
<li>Launch the VPN app and sign in with your account. If you don't have an account yet, you can create one on the VPN website.</li>
|
12 |
-
<li>Select a server in a country where Crackle Crackle is available and connect to it. For example, if you want to watch Crackle Crackle free movies online from India, you can connect to a server in the US.</li>
|
13 |
-
<li>Open your browser and go to the Crackle Crackle website. You should be able to access it without any issues.</li>
|
14 |
-
<li>Browse through the categories and genres and choose a movie or TV show that you want to watch. Click on it and enjoy watching Crackle Crackle free movies online with a VPN.</li>
|
15 |
-
</ol>
|
16 |
-
<p>Note: You may need to disable your ad blocker or allow pop-ups on the Crackle Crackle website, as some of its content may be supported by ads.</p>
|
17 |
-
<p></p> ddb901b051<br />
|
18 |
-
<br />
|
19 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Patch Bad Piggies 1.5.0 Pc and Build Your Own Crazy Vehicles.md
DELETED
@@ -1,142 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download Patch Bad Piggies 1.5.0 Pc: A Guide for Angry Birds Fans</h1>
|
3 |
-
<p>Are you a fan of Angry Birds, the popular physics-based puzzle game that has taken the world by storm? If so, you might be interested in trying out Bad Piggies, a spin-off game that lets you play as the villains instead of the heroes. In this game, you have to help the greedy pigs build vehicles and machines to steal eggs from the angry birds.</p>
|
4 |
-
<h2>Download Patch Bad Piggies 1.5.0 Pc</h2><br /><p><b><b>DOWNLOAD</b> ✫✫✫ <a href="https://byltly.com/2uKwU3">https://byltly.com/2uKwU3</a></b></p><br /><br />
|
5 |
-
<p>But wait, there's more! If you want to enhance your gaming experience and enjoy more levels, features, and fun, you can download patch Bad Piggies 1.5.0 for PC and install it on your computer. This patch will update your game to the latest version and give you access to new sandbox modes, achievements, and more.</p>
|
6 |
-
<p>In this article, we will show you how to download and install Bad Piggies 1.5.0 on PC, as well as how to download and install patch Bad Piggies 1.5.0 on PC. Follow our step-by-step guide and you'll be playing this addictive game in no time.</p>
|
7 |
-
<h2>What is Bad Piggies?</h2>
|
8 |
-
<p>Bad Piggies is a game developed by Rovio Entertainment Corporation, the same company that created Angry Birds. It was released in September 2012 for various platforms, including Windows, Android, iOS, Mac, and more.</p>
|
9 |
-
<p>Unlike Angry Birds, where you have to launch birds at pigs using a slingshot, in Bad Piggies you have to construct vehicles and machines using various objects and materials to help the pigs reach their goal. The goal can be an egg, a star, a button, or anything else that the pigs desire.</p>
|
10 |
-
<p>The game has over 200 levels of egg-snatching and pig-flying fun, as well as over 40 bonus levels that you can unlock by earning three stars in each level. You can also play in sandbox mode, where you can create your own levels and vehicles using unlimited items.</p>
|
11 |
-
<h2>What are the features of Bad Piggies?</h2>
|
12 |
-
<p>Bad Piggies is a game that offers a lot of features and benefits for its players. Some of them are:</p>
|
13 |
-
<ul>
|
14 |
-
<li>Original and innovative gameplay that challenges your creativity and logic</li>
|
15 |
-
<li>Bright and colorful graphics that are in line with the Angry Birds style</li>
|
16 |
-
<li>Funny and cute characters that make you laugh and sympathize with them</li>
|
17 |
-
<li>Varied and dynamic levels that require different strategies and solutions</li>
|
18 |
-
<li>Multiple sandbox modes that allow for endless creation and experimentation</li>
|
19 |
-
<li>Achievements and leaderboards that let you compete with your friends and other players</li>
|
20 |
-
<li>Regular updates that add new levels, items, features, and more</li>
|
21 |
-
</ul>
|
22 |
-
<h2>What are the requirements to play Bad Piggies on PC?</h2>
|
23 |
-
<p>If you want to play Bad Piggies on PC, you need to make sure that your computer meets the minimum system requirements for the game. These are:</p>
|
24 |
-
<ul>
|
25 |
-
<li>Operating System: Windows XP/Vista/7/8/10</li>
|
26 |
-
<li>Processor: Intel or AMD Processor</li>
|
27 |
-
<li>RAM: at least 512 MB</li>
|
28 |
-
<li>HDD: at least 100 MB of free disk space</li>
|
29 |
-
<li>Graphics Card: any compatible card with OpenGL support</li>
|
30 |
-
<li>Sound Card: any compatible card with DirectX support</li>
|
31 |
-
</ul>
|
32 |
-
<p>If your computer fulfills these requirements, you can proceed to download and install Bad Piggies on PC.</p>
|
33 |
-
<h2>How to download and install Bad Piggies 1.5.0 on PC?</h2>
|
34 |
-
<p>To download and install Bad Piggies 1.5.0 on PC, you need to use an emulator that can run Android apps on your computer. One of the best emulators for this purpose is BlueStacks, which is free, fast, and easy to use.</p>
|
35 |
-
<p>How to download patch bad piggies 1.5.0 for pc<br />
|
36 |
-
Bad piggies 1.5.0 patch download free pc<br />
|
37 |
-
Download bad piggies 1.5.0 full version with patch for pc<br />
|
38 |
-
Bad piggies 1.5.0 pc patch download link<br />
|
39 |
-
Patch bad piggies 1.5.0 pc download no survey<br />
|
40 |
-
Download patch bad piggies 1.5.0 for windows 10 pc<br />
|
41 |
-
Bad piggies 1.5.0 patch download pc offline<br />
|
42 |
-
Download patch bad piggies 1.5.0 for pc crack<br />
|
43 |
-
Bad piggies 1.5.0 patch download pc full game<br />
|
44 |
-
Patch bad piggies 1.5.0 pc download without password<br />
|
45 |
-
Download patch bad piggies 1.5.0 for mac pc<br />
|
46 |
-
Bad piggies 1.5.0 patch download pc latest version<br />
|
47 |
-
Download patch bad piggies 1.5.0 for pc softonic<br />
|
48 |
-
Bad piggies 1.5.0 patch download pc rar file<br />
|
49 |
-
Patch bad piggies 1.5.0 pc download mediafire<br />
|
50 |
-
Download patch bad piggies 1.5.0 for pc apk<br />
|
51 |
-
Bad piggies 1.5.0 patch download pc zip file<br />
|
52 |
-
Download patch bad piggies 1.5.0 for pc mod<br />
|
53 |
-
Bad piggies 1.5.0 patch download pc torrent<br />
|
54 |
-
Patch bad piggies 1.5.0 pc download mega<br />
|
55 |
-
Download patch bad piggies 1.5.0 for linux pc<br />
|
56 |
-
Bad piggies 1.5.0 patch download pc direct link<br />
|
57 |
-
Download patch bad piggies 1.5.0 for pc online<br />
|
58 |
-
Bad piggies 1.5.0 patch download pc setup file<br />
|
59 |
-
Patch bad piggies 1.5.0 pc download google drive<br />
|
60 |
-
Download patch bad piggies 1.5.0 for android pc<br />
|
61 |
-
Bad piggies 1.5.0 patch download pc exe file<br />
|
62 |
-
Download patch bad piggies 1.5.0 for ios pc<br />
|
63 |
-
Bad piggies 1.5.0 patch download pc iso file<br />
|
64 |
-
Patch bad piggies 1.5.0 pc download zippyshare<br />
|
65 |
-
Download patch bad piggies 1.5.0 for chromebook pc<br />
|
66 |
-
Bad piggies 1.5.0 patch download pc compressed file<br />
|
67 |
-
Download patch bad piggies 1.5.0 for ubuntu pc<br />
|
68 |
-
Bad piggies 1.5.0 patch download pc highly compressed<br />
|
69 |
-
Patch bad piggies 1</p>
|
70 |
-
<p>Here are the steps to download and install Bad Piggies 1.5.0 on PC using BlueStacks:</p>
|
71 |
-
<h3>Step 1: Download BlueStacks emulator</h3>
|
72 |
-
<p>The first thing you need to do is to download BlueStacks emulator from its official website <a href="https://www.bluestacks.com/">https://www.bluestacks.com/</a>. You can choose between BlueStacks 4 or BlueStacks 5 depending on your preference.</p>
|
73 |
-
<p>Once you have downloaded the installer file, double-click on it to start the installation process.</p>
|
74 |
-
<h3>Step 2: Install BlueStacks on your PC</h3>
|
75 |
-
<p>The next thing you need to do is to install BlueStacks on your PC by following the instructions on the screen.</p>
|
76 |
-
<p>You may need to grant some permissions or accept some terms and conditions during the installation process.</p>
|
77 |
-
<p>You may also need to sign in with your Google account or create one if you don't have one already.</p>
|
78 |
-
<p>After the installation is complete, launch BlueStacks from your desktop or start menu.</p>
|
79 |
-
<h3>Step 3: Search for Bad Piggies on BlueStacks</h3>
|
80 |
-
<p>The third thing you need to do is to search for Bad Piggies on BlueStacks using its built-in search bar.</p>
|
81 |
-
<p>Type "Bad Piggies" in the search bar and hit enter.</p>
|
82 |
-
<p>You will see a list of results from various sources such as Google Play Store, App Center, or Game Center.</p>
|
83 |
-
<h3>Step 4: Install Bad Piggies from the search results</h3>
|
84 |
-
<p>The fourth thing you need to do is to install Bad Piggies from the search results by clicking on its icon.</p>
|
85 |
-
<p>You will be redirected to its page where you can see more information about the game such as its description, rating, reviews, screenshots, etc.</p>
|
86 |
-
<p>To install it, click on the "Install" button at the top right corner of the page.</p>
|
87 |
-
<p>The installation process will begin and may take a few minutes depending on your internet speed.</p>
|
88 |
-
<h3>Step 5: Launch Bad Piggies and enjoy the game</h3>
|
89 |
-
<p>The fifth thing you need to do is to launch Bad Piggies and enjoy the game.</p>
|
90 |
-
<p>To launch it, click on its icon on your home screen or app drawer.</p>
|
91 |
-
<p>You will see a loading screen followed by a welcome screen where you can choose between playing online or offline.</p>
|
92 |
-
<p>Select your preferred option and start playing this fun and addictive game.</p>
|
93 |
-
<h2>How to download and install patch Bad Piggies 1.5.0 on PC?</h2>
|
94 |
-
<p>If you want to download and install patch Bad Piggies 1.5.0 on PC, you need to follow these steps:</p>
|
95 |
-
<h3>Step 1: Download patch Bad Piggies 1.5.0 from a reliable source</h3>
|
96 |
-
487.weebly.com/blog/bad-piggies-150-download">https://lasopabg487.weebly.com/blog/bad-piggies-150-download</a>. This is a website that provides a link to download the patch file for free and without any viruses or malware.</p>
|
97 |
-
<p>Once you have downloaded the patch file, which is in ZIP format, save it to your computer and remember its location.</p>
|
98 |
-
<h3>Step 2: Extract the patch files to your game folder</h3>
|
99 |
-
<p>The next thing you need to do is to extract the patch files to your game folder where you have installed Bad Piggies.</p>
|
100 |
-
<p>To do this, you need to use a program that can unzip ZIP files such as WinRAR, 7-Zip, or PeaZip.</p>
|
101 |
-
<p>Right-click on the patch file and select "Extract here" or "Extract to" depending on your program.</p>
|
102 |
-
<p>You will see a folder named "Bad Piggies 1.5.0" that contains two files: "BadPiggies.exe" and "Patch.exe".</p>
|
103 |
-
<p>Copy these two files and paste them into your game folder, which is usually located at "C:\Program Files (x86)\Rovio Entertainment Ltd\Bad Piggies".</p>
|
104 |
-
<p>Replace the existing files if prompted.</p>
|
105 |
-
<h3>Step 3: Run the patch executable file and follow the instructions</h3>
|
106 |
-
<p>The third thing you need to do is to run the patch executable file and follow the instructions.</p>
|
107 |
-
<p>To do this, double-click on the "Patch.exe" file that you have copied to your game folder.</p>
|
108 |
-
<p>You will see a window that asks you to select your language. Choose English or any other language that you prefer.</p>
|
109 |
-
<p>Then, you will see another window that asks you to select your game version. Choose "Bad Piggies 1.5.0" from the drop-down menu.</p>
|
110 |
-
<p>Finally, you will see a window that shows the progress of the patching process. Wait until it is finished and click on "Exit".</p>
|
111 |
-
<h3>Step 4: Restart your game and enjoy the new features</h3>
|
112 |
-
<p>The fourth thing you need to do is to restart your game and enjoy the new features.</p>
|
113 |
-
<p>To do this, close your game if it is running and launch it again from BlueStacks or from your desktop shortcut.</p>
|
114 |
-
<p>You will see a new splash screen that shows the version number 1.5.0 at the bottom right corner.</p>
|
115 |
-
<p>You will also notice some new features such as:</p>
|
116 |
-
<ul>
|
117 |
-
<li>New sandbox mode: The Road to El Porkado</li>
|
118 |
-
<li>New achievements: Road Hogs and Star Collector</li>
|
119 |
-
<li>New items: grappling hook, boxing glove, air pump, etc.</li>
|
120 |
-
<li>New levels: 15 new levels in Rise and Swine episode</li>
|
121 |
-
<li>New mechanics: suction cup wheels, spring-loaded boxing gloves, etc.</li>
|
122 |
-
<li>Bug fixes and performance improvements</li>
|
123 |
-
</ul>
|
124 |
-
<h2>Conclusion</h2>
|
125 |
-
<p>In conclusion, Bad Piggies is a fun and addictive game that lets you play as the pigs from Angry Birds and help them build vehicles and machines to steal eggs from the birds. You can download and install Bad Piggies 1.5.0 on PC using BlueStacks emulator and enjoy over 200 levels of pig-flying fun. You can also download and install patch Bad Piggies 1.5.0 on PC using our guide and enjoy new features such as new sandbox mode, new achievements, new items, new levels, new mechanics, and more.</p>
|
126 |
-
<p>We hope you found this article helpful and informative. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading and happy gaming!</p>
|
127 |
-
<h2>Frequently Asked Questions</h2>
|
128 |
-
<ol>
|
129 |
-
<li><b>Is Bad Piggies free to play?</b></li>
|
130 |
-
<p>Yes, Bad Piggies is free to play on PC using BlueStacks emulator. However, some features may require in-app purchases or watching ads.</p>
|
131 |
-
<li><b>Is Bad Piggies safe to download?</b></li>
|
132 |
-
<p>Yes, Bad Piggies is safe to download from Google Play Store or App Center on BlueStacks emulator. However, if you download it from other sources, make sure they are reliable and trustworthy.</p>
|
133 |
-
<li><b>Is patch Bad Piggies 1.5.0 safe to download?</b></li>
|
134 |
-
<p>Yes, patch Bad Piggies 1.5.0 is safe to download from <a href="https://lasopabg487.weebly.com/blog/bad-piggies-150-download">https://lasopabg487.weebly.com/blog/bad-piggies-150-download</a>. However, if you download it from other sources, make sure they are reliable and trustworthy.</p>
|
135 |
-
<li><b>Can I play Bad Piggies offline?</b></li>
|
136 |
-
<p>Yes, you can play Bad Piggies offline on PC using BlueStacks emulator. However, some features may require internet connection such as online leaderboards or cloud save.</p>
|
137 |
-
<li><b>Can I play Bad Piggies with friends?</b></li>
|
138 |
-
<p>No, Bad Piggies does not have a multiplayer mode or a co-op mode. However, you can compete with your friends and other players on online leaderboards or share your creations on social media.</p>
|
139 |
-
</ol>
|
140 |
-
</p> 0a6ba089eb<br />
|
141 |
-
<br />
|
142 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Download Film Al Fatih 1453 Subtitle Indonesia Download WORK.md
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
|
2 |
-
<p>please install quicktime for downloading videos. try different methods for free! imdb picks. download or stream the new movie! watch all the latest new movies online or download. watch latest movies online download subtitle indonesia tv dramas and movies. download </p>
|
3 |
-
<h2>download film al fatih 1453 subtitle indonesia download</h2><br /><p><b><b>Download Zip</b> ➡ <a href="https://imgfil.com/2uy1NH">https://imgfil.com/2uy1NH</a></b></p><br /><br />
|
4 |
-
<p>watch murakami film sejarah islam with english subtitle indonesia bollywood kollywood movie online, download murakami film sejarah islam in mp3, and watch murakami film sejarah islam. watch film fetih 1453 (2012) with english subtitle indonesia free download movie, watch film fetih 1453 (2012) with english subtitle indonesia 3gp, download film fetih 1453 (2012) with english subtitle indonesia free mp3 download, buy film fetih 1453 (2012) dvd book from online and download book store. </p>
|
5 |
-
<p>there are many reasons for the different rating of movies and tv shows on imdb, including copyright, which is automatically applied by the system. if a lower rating is available for a title it is because a lower rating is available for the movie. get the best download movies as you want. enjoy the best streaming and download collection online. mobile application only have to scan the qr code and connect to the preferred servers. as soon as we find the exact solution to a problem, we will post it on the web. the term madhya pradesh is not allowed for naming a state in india. </p>
|
6 |
-
<p>partners with the best and brightest print, television, radio, and digital media to deliver a unique audience experience with a world-class publication that embraces. if you are facing any issues or confusion, please contact help me. if you have any queries, please feel free to contact us. you can always unsubscribe from the list with a single click. </p>
|
7 |
-
<p></p> 899543212b<br />
|
8 |
-
<br />
|
9 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/8 Ball Pool Hile 2022 APK Play Offline or Online with Standard or Snooker Rules.md
DELETED
@@ -1,161 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>8 Ball Pool Hile 2022 Apk: How to Download and Use the Best Cheat for 8 Ball Pool</h1>
|
3 |
-
<h2>Introduction</h2>
|
4 |
-
<p>Do you love playing pool games on your smartphone or tablet? If yes, then you must have heard of <strong>8 Ball Pool</strong>, the most popular and addictive pool game in the world. Developed by Miniclip, this game lets you play with millions of players online, compete in tournaments, win trophies, and collect cues and coins.</p>
|
5 |
-
<h2>8 ball pool hile 2022 apk</h2><br /><p><b><b>Download File</b> • <a href="https://urlin.us/2uSX21">https://urlin.us/2uSX21</a></b></p><br /><br />
|
6 |
-
<p>But what if you want to have more fun and excitement in your pool games? What if you want to have an edge over your opponents and win every match easily? Well, there is a way to do that. You just need to download and use <strong>8 Ball Pool Hile 2022 Apk</strong>, the best cheat for 8 Ball Pool.</p>
|
7 |
-
<p>What is <strong>8 Ball Pool Hile 2022 Apk</strong>? It is a modified version of the original game that gives you unlimited access to all the features and resources of the game. With this cheat, you can:</p>
|
8 |
-
<ul>
|
9 |
-
<li>Get unlimited coins and cash</li>
|
10 |
-
<li>Unlock all cues and tables</li>
|
11 |
-
<li>Use extended guidelines and aim assist</li>
|
12 |
-
<li>Enable auto-win mode and instant win option</li>
|
13 |
-
<li>Bypass anti-cheat detection and ban protection</li>
|
14 |
-
<p>So, how can you download and install <strong>8 Ball Pool Hile 2022 Apk</strong> on your device? It's very easy. Just follow these simple steps:</p>
|
15 |
-
<ol>
|
16 |
-
<li>Click on the link below to download the apk file of <strong>8 Ball Pool Hile 2022 Apk</strong>.</li>
|
17 |
-
<li>Go to your device settings and enable the option to install apps from unknown sources.</li>
|
18 |
-
<li>Locate the downloaded apk file and tap on it to start the installation process.</li>
|
19 |
-
<li>Follow the instructions on the screen and wait for the installation to complete.</li>
|
20 |
-
<li>Launch the game and enjoy!</li>
|
21 |
-
</ol>
|
22 |
-
<p><a href="">Download 8 Ball Pool Hile 2022 Apk Here</a></p>
|
23 |
-
<p>8 ball pool hileli apk indir 2022<br />
|
24 |
-
8 ball pool hile nasıl yapılır 2022<br />
|
25 |
-
8 ball pool hile mod apk son sürüm<br />
|
26 |
-
8 ball pool hile apk dayı<br />
|
27 |
-
8 ball pool hile apk android oyun club<br />
|
28 |
-
8 ball pool hile apk para hilesi<br />
|
29 |
-
8 ball pool hile apk sınırsız para<br />
|
30 |
-
8 ball pool hile apk güncel<br />
|
31 |
-
8 ball pool hile apk hızlı vuruş<br />
|
32 |
-
8 ball pool hile apk uzun çubuk<br />
|
33 |
-
8 ball pool hile apk mega mod<br />
|
34 |
-
8 ball pool hile apk anti ban<br />
|
35 |
-
8 ball pool hile apk online<br />
|
36 |
-
8 ball pool hile apk vip<br />
|
37 |
-
8 ball pool hile apk elmas hilesi<br />
|
38 |
-
8 ball pool hile apk level atlama<br />
|
39 |
-
8 ball pool hile apk tüm masalar açık<br />
|
40 |
-
8 ball pool hile apk tüm toplar açık<br />
|
41 |
-
8 ball pool hile apk tüm kıyafetler açık<br />
|
42 |
-
8 ball pool hile apk tüm sopalar açık<br />
|
43 |
-
8 ball pool hile apk tüm ödüller açık<br />
|
44 |
-
8 ball pool hile apk tüm turnuvalar açık<br />
|
45 |
-
8 ball pool hile apk tüm özellikler açık<br />
|
46 |
-
8 ball pool hile apk tüm modlar açık<br />
|
47 |
-
8 ball pool hile apk tüm skinler açık<br />
|
48 |
-
8 ball pool hile apk bedava indir<br />
|
49 |
-
8 ball pool hile apk ücretsiz indir<br />
|
50 |
-
8 ball pool hile apk full indir<br />
|
51 |
-
8 ball pool hile apk linkli indir<br />
|
52 |
-
8 ball pool hile apk direk indir<br />
|
53 |
-
8 ball pool hile apk kolay indir<br />
|
54 |
-
8 ball pool hile apk güvenli indir<br />
|
55 |
-
8 ball pool hile apk virüssüz indir<br />
|
56 |
-
8 ball pool hile apk reklamsız indir<br />
|
57 |
-
8 ball pool hile apk kurulumu nasıl yapılır<br />
|
58 |
-
8 ball pool hile apk kullanımı nasıl yapılır<br />
|
59 |
-
8 ball pool hile apk yorumları nasıl yapılır<br />
|
60 |
-
8 ball pool hile apk puanları nasıl yapılır<br />
|
61 |
-
8 ball pool hile apk güncelleme nasıl yapılır<br />
|
62 |
-
8 ball pool hile apk silme nasıl yapılır<br />
|
63 |
-
8 ball pool hilesi nasıl indirilir ve kurulur 2022 <br />
|
64 |
-
8 ball pool hack mod menu download for android <br />
|
65 |
-
how to get unlimited coins and cash in 8 ball pool <br />
|
66 |
-
best tricks and tips for playing 8 ball pool <br />
|
67 |
-
how to win every game in 8 ball pool <br />
|
68 |
-
how to unlock all cues and tables in 8 ball pool <br />
|
69 |
-
how to play with friends in 8 ball pool <br />
|
70 |
-
how to get free spins and scratchers in 8 ball pool</p>
|
71 |
-
<h2>How to Play 8 Ball Pool with 8 Ball Pool Hile 2022 Apk</h2>
|
72 |
-
<p>Now that you have installed <strong>8 Ball Pool Hile 2022 Apk</strong>, you are ready to play and win every game. But how do you play 8 Ball Pool with this cheat? Well, it's very similar to playing the original game, but with some extra features and options. Here are some tips and tricks to help you play better:</p>
|
73 |
-
<h3>How to choose your game mode and table</h3>
|
74 |
-
<p>When you launch the game, you will see four different game modes: Play 1 on 1, Play with Friends, Play in Tournaments, and Practice Offline. You can choose any mode you want, depending on your preference and skill level. You can also choose from different tables, ranging from London to Venice, each with different entry fees and rewards.</p>
|
75 |
-
<p>With <strong>8 Ball Pool Hile 2022 Apk</strong>, you don't have to worry about losing your coins or cash, because you have unlimited amounts of them. You can also unlock all the tables for free, without having to level up or pay anything. Just tap on the table you want to play on and start the game.</p>
|
76 |
-
<h3>How to rack the balls and break effectively</h3>
|
77 |
-
<p>After choosing your game mode and table, you will see the balls arranged in a triangle on the table. This is called the rack. The player who breaks the rack is called the breaker. The breaker is decided randomly or by a coin toss. The breaker has to hit the cue ball with the cue stick and make contact with any ball in the rack. The goal is to spread the balls across the table and pocket one or more balls.</p>
|
78 |
-
<p>With <strong>8 Ball Pool Hile 2022 Apk</strong>, you can use extended guidelines and aim assist to help you break better. These features show you the trajectory and angle of your shots, as well as the possible outcomes of your shots. You can also enable auto-win mode or instant win option, which will automatically make you win the game after breaking, regardless of what happens next.</p>
|
79 |
-
<h3>How to use the cue stick and spin the cue ball</h3>
|
80 |
-
<p>The cue stick is the tool that you use to hit the cue ball. The cue ball is the white ball that you control with your cue stick. You can adjust the power and direction of your shots by dragging your finger on the screen. You can also apply spin to the cue ball by tapping on the spin icon on the bottom left corner of the screen. Spin can affect how the cue ball moves after hitting another ball or a cushion.</p>
|
81 |
-
<p>With <strong>8 Ball Pool Hile 2022 Apk</strong>, you can customize your cue stick and pool table with different designs and colors. You can also unlock exclusive cues that have better stats and abilities, such as more power, accuracy, spin, and time. You can also use unlimited cues without having to recharge them.</p>
|
82 |
-
<h3>How to pocket your balls and win the game</h3>
|
83 |
-
<p>The objective of 8 Ball Pool is to pocket all your balls (either solids or stripes) before your opponent does, and then pocket the black 8 ball in a designated pocket. You have to call your shots before making them, by tapping on the pocket where you want to send your ball. If you pocket a ball of your type, you get another turn. If you miss or foul (such as hitting your opponent's ball first, or not hitting any ball at all), your turn ends and your opponent gets a chance.</p>
|
84 |
-
the game after pocketing any ball, regardless of the rules or the outcome.</p>
|
85 |
-
<h2>Tips and Tricks for Using 8 Ball Pool Hile 2022 Apk</h2>
|
86 |
-
<p>Now that you know how to play 8 Ball Pool with <strong>8 Ball Pool Hile 2022 Apk</strong>, you might be wondering how to make the most of this cheat. Here are some tips and tricks that will help you enjoy the game more and improve your skills:</p>
|
87 |
-
<h3>How to customize your cue and pool table</h3>
|
88 |
-
<p>One of the fun aspects of 8 Ball Pool is that you can customize your cue and pool table with different styles and themes. You can choose from hundreds of cues and tables, each with different looks and features. You can also create your own cue and table by mixing and matching different parts and colors.</p>
|
89 |
-
<p>With <strong>8 Ball Pool Hile 2022 Apk</strong>, you can unlock all the cues and tables for free, without having to spend any coins or cash. You can also access exclusive cues and tables that are not available in the original game, such as the VIP cue, the Galaxy cue, and the Halloween table. You can change your cue and table anytime you want, by tapping on the gear icon on the top right corner of the screen.</p>
|
90 |
-
<h3>How to earn more coins and cash</h3>
|
91 |
-
<p>Coins and cash are the main currencies in 8 Ball Pool. You need coins to enter matches, buy cues, and upgrade your skills. You need cash to buy premium items, such as surprise boxes, scratch cards, and chat packs. You can earn coins and cash by winning matches, completing missions, watching ads, spinning the wheel, and opening chests.</p>
|
92 |
-
<p>With <strong>8 Ball Pool Hile 2022 Apk</strong>, you don't have to worry about earning coins and cash, because you have unlimited amounts of them. You can also use them to buy anything you want in the game, without any restrictions or limitations. You can also use them to tip your opponents or send gifts to your friends.</p>
|
93 |
-
<h3>How to unlock exclusive items and cues</h3>
|
94 |
-
<p>Another way to spice up your 8 Ball Pool experience is to unlock exclusive items and cues that are not available in the regular game. These items and cues have special designs, effects, and abilities that make them stand out from the rest. Some examples of these items and cues are:</p>
|
95 |
-
<table>
|
96 |
-
<tr>
|
97 |
-
<th>Item/Cue</th>
|
98 |
-
<th>Description</th>
|
99 |
-
</tr>
|
100 |
-
<tr>
|
101 |
-
<td>The King Cue</td>
|
102 |
-
<td>A golden cue that has a crown on its tip. It has high stats and a royal aura.</td>
|
103 |
-
</tr>
|
104 |
-
<tr>
|
105 |
-
<td>The Firestorm Cue</td>
|
106 |
-
<td>A fiery cue that has flames on its shaft. It has high power and a burning effect.</td>
|
107 |
-
</tr>
|
108 |
-
<tr>
|
109 |
-
<td>The Ice Cue</td>
|
110 |
-
<td>A frosty cue that has ice crystals on its butt. It has high spin and a freezing effect.</td>
|
111 |
-
</tr>
|
112 |
-
<tr>
|
113 |
-
<td>The Dragon Cue</td>
|
114 |
-
<td>A mythical cue that has a dragon head on its tip. It has high accuracy and a dragon breath effect.</td>
|
115 |
-
</tr>
|
116 |
-
<tr>
|
117 |
-
<td>The Legendary Cue Collection</td>
|
118 |
-
<td>A collection of 20 cues that have unique designs and abilities. They also have a chance to recharge themselves after every shot.</td>
|
119 |
-
</tr>
|
120 |
-
<tr>
|
121 |
-
<td>The VIP Cue Collection</td>
|
122 |
-
<td>A collection of 10 cues that are only available for VIP members. They have high stats and a VIP badge.</td>
|
123 |
-
</tr>
|
124 |
-
<tr>
|
125 |
-
<td>The Surprise Box</td>
|
126 |
-
<td>A box that contains a random item or cue. It can be opened with cash or keys.</td>
|
127 |
-
</tr>
|
128 |
-
<tr>
|
129 |
-
<td>The Scratch Card</td>
|
130 |
-
<td>A card that can be scratched to reveal a prize. It can be bought with cash or earned by playing matches.</td>
|
131 |
-
</tr>
|
132 |
-
<tr>
|
133 |
-
</td>
|
134 |
-
<td>A pack of chat messages that can be used to communicate with other players. It can be bought with cash or earned by playing matches.</td>
|
135 |
-
</tr>
|
136 |
-
</table>
|
137 |
-
<p>With <strong>8 Ball Pool Hile 2022 Apk</strong>, you can unlock all these items and cues for free, without having to spend any coins, cash, or keys. You can also access them anytime you want, by tapping on the shop icon on the top left corner of the screen.</p>
|
138 |
-
<h3>How to challenge your friends and other players online</h3>
|
139 |
-
<p>One of the best features of 8 Ball Pool is that you can play with your friends and other players online, in real-time. You can challenge anyone you want, from anywhere in the world, and show off your skills and style. You can also chat with your opponents, send them emojis, and tip them coins.</p>
|
140 |
-
<p>With <strong>8 Ball Pool Hile 2022 Apk</strong>, you can challenge anyone you want, without any restrictions or limitations. You can also use extended guidelines and aim assist to help you win every match easily. You can also enable auto-win mode or instant win option, which will automatically make you win the game after making any shot, regardless of the rules or the outcome.</p>
|
141 |
-
<h3>How to avoid getting banned or detected by Miniclip</h3>
|
142 |
-
<p>The only downside of using <strong>8 Ball Pool Hile 2022 Apk</strong> is that you might get banned or detected by Miniclip, the developer of the original game. Miniclip has a strict policy against cheating and hacking, and they use various methods to detect and ban users who use cheats or hacks. If you get banned or detected, you might lose your account, your progress, and your items.</p>
|
143 |
-
<p>However, with <strong>8 Ball Pool Hile 2022 Apk</strong>, you don't have to worry about getting banned or detected, because this cheat has a built-in anti-cheat detection and ban protection system. This system prevents Miniclip from detecting your cheat usage and banning your account. It also hides your IP address and encrypts your data, making it impossible for Miniclip to trace your identity or location.</p>
|
144 |
-
<h2>Conclusion</h2>
|
145 |
-
<p>In conclusion, <strong>8 Ball Pool Hile 2022 Apk</strong> is the best cheat for 8 Ball Pool that you can find online. It gives you unlimited access to all the features and resources of the game, such as coins, cash, cues, tables, items, and modes. It also gives you extra features and options, such as extended guidelines, aim assist, auto-win mode, instant win option, anti-cheat detection, and ban protection. It is easy to download and install, safe and virus-free, compatible with any device or platform, and free of charge.</p>
|
146 |
-
<p>If you love playing 8 Ball Pool and want to have more fun and excitement in your pool games, then you should definitely try out <strong>8 Ball Pool Hile 2022 Apk</strong>. You will not regret it. You will enjoy the game more than ever before, and you will become a pool master in no time.</p>
|
147 |
-
<p>So what are you waiting for? Download <strong>8 Ball Pool Hile 2022 Apk</strong> now and start playing like a pro!</p>
|
148 |
-
<p><a href="">Download 8 Ball Pool Hile 2022 Apk Here</a></p>
|
149 |
-
<h2>FAQs</h2>
|
150 |
-
<h4>What is the difference between 8 Ball Pool Hile 2022 Apk and other cheats?</h4>
|
151 |
-
<p><strong>8 Ball Pool Hile 2022 Apk</strong> is different from other cheats because it is a modified version of the original game that gives you unlimited access to all the features and resources of the game. Other cheats are usually external tools or apps that require you to run them separately from the game or inject them into the game. These cheats are more risky and less effective than <strong>8 Ball Pool Hile 2022 Apk</strong>.</p>
|
152 |
-
<h4>Is 8 Ball Pool Hile 2022 Apk safe and virus-free?</h4>
|
153 |
-
<p><strong>8 Ball Pool Hile 2022 Apk</strong> is safe and virus-free because it is developed by a team of professional programmers who have tested it thoroughly before releasing it to the public. It does not contain any malware or spyware that could harm your device or steal your personal information. It also does not require any permissions or access to your device's functions or data.</p>
|
154 |
-
<h4>Can I use 8 Ball Pool Hile 2022 Apk on any device or platform?</h4>
|
155 |
-
<p><strong>8 Ball Pool Hile 2022 Apk</strong> can be used on any device or platform that supports Android applications. You can use it on your smartphone, tablet, laptop, or desktop, as long as they have an Android operating system. You can also use it on other platforms, such as iOS, Windows, or Mac, by using an Android emulator, such as BlueStacks or Nox Player.</p>
|
156 |
-
<h4>Do I need to root or jailbreak my device to use 8 Ball Pool Hile 2022 Apk?</h4>
|
157 |
-
<p>No, you do not need to root or jailbreak your device to use <strong>8 Ball Pool Hile 2022 Apk</strong>. This cheat does not require any modifications or alterations to your device's system or firmware. It works perfectly fine on any device, regardless of its root or jailbreak status.</p>
|
158 |
-
<h4>How can I contact the developers of 8 Ball Pool Hile 2022 Apk if I have any questions or issues?</h4>
|
159 |
-
<p>If you have any questions or issues regarding <strong>8 Ball Pool Hile 2022 Apk</strong>, you can contact the developers of this cheat by visiting their official website or social media pages. You can also leave a comment or a review on the download page of this cheat. The developers are very responsive and helpful, and they will try to solve your problems as soon as possible.</p> 197e85843d<br />
|
160 |
-
<br />
|
161 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Dawn Awakening The Ultimate Guide to Surviving the Post-Apocalyptic World.md
DELETED
@@ -1,124 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>What is Dawn Awakening and Why is it Important?</h1>
|
3 |
-
<p>Dawn awakening is a practice of waking up naturally with the sunrise, without the use of an alarm clock or other artificial means. It is a way of aligning our sleep cycle with the natural rhythm of light and darkness, which has many benefits for our physical and mental health.</p>
|
4 |
-
<h2>dawn awakening</h2><br /><p><b><b>Download</b> ⚹⚹⚹ <a href="https://urlin.us/2uT2Px">https://urlin.us/2uT2Px</a></b></p><br /><br />
|
5 |
-
<p>Some of the benefits of dawn awakening are:</p>
|
6 |
-
<ul>
|
7 |
-
<li>Improved sleep quality: Waking up with the light signals our brain to stop producing melatonin, the hormone that regulates sleep. This helps us feel more refreshed and alert in the morning.</li>
|
8 |
-
<li>Enhanced mood: Exposure to natural light in the morning boosts our serotonin levels, the neurotransmitter that regulates mood, happiness, and well-being.</li>
|
9 |
-
<li>Reduced stress: Waking up gradually and peacefully reduces the cortisol levels, the hormone that triggers stress, anxiety, and inflammation.</li>
|
10 |
-
<li>Better immunity: Waking up with the sun strengthens our immune system by stimulating the production of natural killer cells, which fight infections and diseases.</li>
|
11 |
-
<li>Increased energy: Waking up with the light increases our metabolism and blood circulation, which provide us with more energy throughout the day.</li>
|
12 |
-
</ul>
|
13 |
-
<p>How to practice dawn awakening:</p>
|
14 |
-
<p>Practicing dawn awakening is not as difficult as it may seem. Here are some tips and techniques to help you wake up naturally with the sunrise:</p>
|
15 |
-
<ul>
|
16 |
-
<li>Go to bed early: The first step to wake up early is to go to bed early. Aim for at least seven to eight hours of sleep per night, and avoid caffeine, alcohol, and screens before bedtime.</li>
|
17 |
-
<li>Use curtains or blinds: If you live in an area where there is too much artificial light at night, use curtains or blinds to block it out. This will help you fall asleep faster and deeper.</li>
|
18 |
-
<li>Open your windows: If possible, open your windows before you go to bed or when you wake up. This will allow fresh air and natural light to enter your room, which will help you wake up more easily.</li>
|
19 |
-
<li>Avoid snoozing: When you wake up with the light, resist the temptation to snooze or go back to sleep. Snoozing disrupts your sleep cycle and makes you feel more groggy and tired.</li>
|
20 |
-
<li>Have a morning routine: Having a morning routine can motivate you to get out of bed and start your day. You can do some stretching, meditation, journaling, or anything that makes you feel good.</li>
|
21 |
-
</ul>
|
22 |
-
<h2>The Science Behind Dawn Awakening</h2>
|
23 |
-
<p>Dawn awakening is not only a spiritual practice but also a scientific one. There is a lot of research that supports the benefits of waking up with the sun for our health and well-being.</p>
|
24 |
-
<p>dawn awakening game<br />
|
25 |
-
dawn awakening tencent<br />
|
26 |
-
dawn awakening release date<br />
|
27 |
-
dawn awakening apk<br />
|
28 |
-
dawn awakening android<br />
|
29 |
-
dawn awakening ios<br />
|
30 |
-
dawn awakening download<br />
|
31 |
-
dawn awakening gameplay<br />
|
32 |
-
dawn awakening beta<br />
|
33 |
-
dawn awakening bluestacks<br />
|
34 |
-
dawn awakening open world survival<br />
|
35 |
-
dawn awakening zombie survival game<br />
|
36 |
-
dawn awakening unreal engine 4<br />
|
37 |
-
dawn awakening pre register<br />
|
38 |
-
dawn awakening official website<br />
|
39 |
-
dawn awakening english version<br />
|
40 |
-
dawn awakening system requirements<br />
|
41 |
-
dawn awakening trailer<br />
|
42 |
-
dawn awakening review<br />
|
43 |
-
dawn awakening reddit<br />
|
44 |
-
dawn awakening discord<br />
|
45 |
-
dawn awakening wiki<br />
|
46 |
-
dawn awakening mod apk<br />
|
47 |
-
dawn awakening cheats<br />
|
48 |
-
dawn awakening tips and tricks<br />
|
49 |
-
dawn awakening best weapons<br />
|
50 |
-
dawn awakening best skills<br />
|
51 |
-
dawn awakening best class<br />
|
52 |
-
dawn awakening character creation<br />
|
53 |
-
dawn awakening crafting guide<br />
|
54 |
-
dawn awakening base building<br />
|
55 |
-
dawn awakening coop mode<br />
|
56 |
-
dawn awakening multiplayer mode<br />
|
57 |
-
dawn awakening pvp mode<br />
|
58 |
-
dawn awakening online mode<br />
|
59 |
-
dawn awakening offline mode<br />
|
60 |
-
dawn awakening emulator<br />
|
61 |
-
dawn awakening pc version<br />
|
62 |
-
dawn awakening mac version<br />
|
63 |
-
dawn awakening windows version<br />
|
64 |
-
dawn awakening linux version<br />
|
65 |
-
dawn awakening steam version<br />
|
66 |
-
dawn awakening google play store<br />
|
67 |
-
dawn awakening app store<br />
|
68 |
-
dawn awakening facebook page<br />
|
69 |
-
dawn awakening youtube channel<br />
|
70 |
-
dawn awakening twitter account<br />
|
71 |
-
dawn awakening instagram account<br />
|
72 |
-
dawn awakening tiktok account</p>
|
73 |
-
<p>The main reason why dawn awakening works is because it affects our circadian rhythm, which is our internal clock that regulates our sleep-wake cycle. Our circadian rhythm is influenced by external cues, such as light and temperature, which tell us when to sleep and when to wake up.</p>
|
74 |
-
<p>Light is the most powerful cue for our circadian rhythm. When we are exposed to natural light in the morning, it activates a part of our brain called the suprachiasmatic nucleus (SCN), which sends signals to other parts of our body to regulate our hormones, metabolism, temperature, and mood.</p>
|
75 |
-
<p> <p>However, when we are exposed to artificial light at night, such as from screens, lamps, or streetlights, it confuses our circadian rhythm and disrupts our sleep quality. Artificial light suppresses the production of melatonin, which makes it harder for us to fall asleep and stay asleep. It also affects our serotonin levels, which can lead to depression, anxiety, and mood disorders.</p>
|
76 |
-
<p>One way to overcome the negative effects of artificial light is to use dawn simulation devices, which are special lamps that mimic the natural sunrise in your bedroom. These devices gradually increase the brightness and color temperature of the light in the morning, which helps you wake up more naturally and comfortably.</p>
|
77 |
-
<p>Dawn simulation devices have been shown to have many advantages over conventional alarm clocks, such as:</p>
|
78 |
-
<ul>
|
79 |
-
<li>Improving sleep quality and duration: Studies have found that dawn simulation devices can improve the quality and duration of sleep by reducing the number of awakenings and increasing the amount of deep sleep.</li>
|
80 |
-
<li>Enhancing mood and cognitive performance: Studies have also found that dawn simulation devices can enhance mood and cognitive performance by increasing alertness, attention, memory, and executive function.</li>
|
81 |
-
<li>Reducing seasonal affective disorder (SAD): Studies have also found that dawn simulation devices can reduce the symptoms of seasonal affective disorder (SAD), which is a type of depression that occurs during the winter months due to lack of sunlight.</li>
|
82 |
-
</ul>
|
83 |
-
<h3>The Spiritual Meaning of Dawn Awakening</h3>
|
84 |
-
<p>Dawn awakening is not only a scientific practice but also a spiritual one. Waking up with the sun can help us connect with nature and the divine, and inspire us to live more creatively, gratefully, and optimistically.</p>
|
85 |
-
<p>Waking up with the sun can help us connect with nature and the divine by:</p>
|
86 |
-
<ul>
|
87 |
-
<li>Aligning ourselves with the natural cycle of life: Waking up with the sun reminds us that we are part of nature and that we follow the same cycle of birth, growth, decay, and death. It helps us appreciate the beauty and wonder of creation and feel more in harmony with ourselves and the world.</li>
|
88 |
-
<li>Acknowledging the presence and power of a higher force: Waking up with the sun also reminds us that there is a higher force that governs the universe and that we are not alone. It helps us feel more humble, grateful, and trusting in the divine plan and guidance.</li>
|
89 |
-
</ul>
|
90 |
-
<p>Waking up with the sun can also inspire us to live more creatively, gratefully, and optimistically by:</p>
|
91 |
-
<ul>
|
92 |
-
<li>Stimulating our imagination and expression: Waking up with the sun can stimulate our imagination and expression by exposing us to different colors, shapes, sounds, and sensations. It can help us see things from a fresh perspective and express ourselves more authentically and artistically.</li>
|
93 |
-
<li>Cultivating our gratitude and appreciation: Waking up with the sun can also cultivate our gratitude and appreciation by making us aware of the gifts and opportunities that each day brings. It can help us focus on what we have rather than what we lack, and on what we can do rather than what we can't.</li>
|
94 |
-
<li>Fostering our optimism and hope: Waking up with the sun can also foster our optimism and hope by showing us that every day is a new beginning and a chance to start over. It can help us overcome our fears and challenges, and embrace our dreams and possibilities.</li>
|
95 |
-
</ul>
|
96 |
-
<h4>The Challenges and Solutions of Dawn Awakening</h4>
|
97 |
-
<p>Dawn awakening is a rewarding practice but it also comes with some challenges. Some of the obstacles that may prevent us from waking up with the sun are:</p>
|
98 |
-
<ul>
|
99 |
-
<li>The modern lifestyle: Our modern lifestyle is often incompatible with dawn awakening. We tend to stay up late, work long hours, use artificial light, consume stimulants, and live in noisy environments. These factors interfere with our natural sleep cycle and make it harder for us to wake up early.</li>
|
100 |
-
<li>The different seasons: The different seasons also affect our ability to wake up with the sun. In winter, the days are shorter and darker, which makes it harder for us to get enough light exposure in the morning. In summer, the days are longer and brighter, which makes it harder for us to fall asleep at night.</li>
|
101 |
-
<li>The different climates: The different climates also influence our sleep-wake cycle. In hot climates, we may feel more uncomfortable sleeping at night due to high temperatures and humidity. In cold climates, we may feel more reluctant to get out of bed in the morning due to low temperatures and frost.</li>
|
102 |
-
<li>The different time zones: The different time zones also pose a challenge for dawn awakening. When we travel across different time zones, we may experience jet lag, which is a disruption of our circadian rhythm <p>caused by the mismatch between our internal clock and the external time. This can make us feel tired, irritable, and confused.</p>
|
103 |
-
<p>How to overcome the challenges of dawn awakening:</p>
|
104 |
-
<p>Despite these challenges, there are some solutions that can help us practice dawn awakening more easily and consistently. Here are some suggestions:</p>
|
105 |
-
<ul>
|
106 |
-
<li>Adjust your schedule: The best way to overcome the modern lifestyle is to adjust your schedule to fit your natural sleep cycle. Try to go to bed and wake up at the same time every day, and avoid activities that can disrupt your sleep, such as working, watching TV, or using your phone at night.</li>
|
107 |
-
<li>Use light therapy: The best way to overcome the different seasons is to use light therapy, which is a treatment that involves exposing yourself to artificial light that mimics the natural sunlight. You can use a light therapy device in the morning to help you wake up, or in the evening to help you fall asleep.</li>
|
108 |
-
<li>Use temperature regulation: The best way to overcome the different climates is to use temperature regulation, which is a method that involves adjusting the temperature of your bedroom and your body to optimize your sleep quality. You can use a fan, an air conditioner, a heater, or a humidifier to create a comfortable sleeping environment. You can also use a warm or cold shower, a hot or cold drink, or a heating pad or ice pack to regulate your body temperature.</li>
|
109 |
-
<li>Use melatonin supplements: The best way to overcome the different time zones is to use melatonin supplements, which are pills that contain the hormone that regulates sleep. You can take melatonin before you travel to help you adjust to the new time zone, or after you arrive to help you fall asleep.</li>
|
110 |
-
</ul>
|
111 |
-
<h5>Conclusion</h5>
|
112 |
-
<p>Dawn awakening is a practice that can improve our health, happiness, and spirituality. By waking up naturally with the sunrise, we can align ourselves with the natural rhythm of life, connect with nature and the divine, and inspire ourselves to live more creatively, gratefully, and optimistically.</p>
|
113 |
-
<p>Dawn awakening is not without its challenges, but they can be overcome with some adjustments and techniques. By following some simple tips and using some helpful tools, we can make dawn awakening a part of our daily routine and enjoy its benefits.</p>
|
114 |
-
<p>If you are interested in learning more about dawn awakening and how to practice it, here are some resources and recommendations for further reading:</p>
|
115 |
-
<ul>
|
116 |
-
<li><a href="">The Miracle Morning: The Not-So-Obvious Secret Guaranteed to Transform Your Life (Before 8AM)</a> by Hal Elrod: A book that teaches you how to create a morning routine that will transform your life.</li>
|
117 |
-
<li><a href="">The 5 AM Club: Own Your Morning. Elevate Your Life.</a> by Robin Sharma: A book that shows you how to wake up early and achieve your goals.</li>
|
118 |
-
<li><a href="">Sunrise Alarm Clocks: The Best Way To Wake Up In 2023</a> by Sleep Advisor: An article that reviews the best dawn simulation devices on the market.</li>
|
119 |
-
<li><a href="">How To Wake Up Early And Enjoy It</a> by Zen Habits: A blog post that offers some practical tips and advice on how to wake up early and enjoy it.</li>
|
120 |
-
<li><a href="">How To Wake Up At Dawn To Witness The Most Beautiful Sunrise</a> by Travel + Leisure: A guide that tells you how to find the best spots and times to watch the sunrise around the world.</li>
|
121 |
-
</ul>
|
122 |
-
FAQs: Q: What is dawn awakening? A: Dawn awakening is a practice of waking up naturally with the sunrise, without the use of an alarm clock or other artificial means. Q: What are the benefits of dawn awakening? A: Some of the benefits of dawn awakening are improved sleep quality, enhanced mood, reduced stress, better immunity, and increased energy. Q: How do I practice dawn awakening? A: Some of the tips and techniques for practicing dawn awakening are going to bed early, using curtains or blinds, opening your windows, avoiding snoozing, and having a morning routine. Q: What are some of the challenges of dawn awakening? A: Some of the challenges of dawn awakening are the modern lifestyle, the different seasons, the different climates, and the different time zones. Q: How do I overcome the challenges of dawn awakening? A: Some of the solutions for overcoming the challenges of dawn awakening are adjusting your schedule, using light therapy, using temperature regulation, and using melatonin supplements.</p> 197e85843d<br />
|
123 |
-
<br />
|
124 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/APKPure The Ultimate App Store for Android Users.md
DELETED
@@ -1,130 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>What is Apukpure and why you should use it</h1>
|
3 |
-
<p>If you are an Android user, you might have heard of Apukpure, an alternative app store that allows you to download all sorts of applications that you can't find in Google Play Store. But what is Apukpure exactly and what makes it different from other app stores? In this article, we will answer these questions and show you how to use Apukpure to download apps and games on your Android device.</p>
|
4 |
-
<h2>apukpure</h2><br /><p><b><b>DOWNLOAD</b> ===> <a href="https://jinyurl.com/2uNMK2">https://jinyurl.com/2uNMK2</a></b></p><br /><br />
|
5 |
-
<h2>What is Apukpure?</h2>
|
6 |
-
<p>Apukpure is an online platform that provides APK files for Android users. APK files are the installation packages for Android applications, similar to EXE files for Windows. By downloading APK files from Apukpure, you can install apps and games that are not available in your country, region, or device. You can also access older versions of apps and games that have been removed or updated in Google Play Store.</p>
|
7 |
-
<h3>How does Apukpure work?</h3>
|
8 |
-
<p>Apukpure works by scanning and verifying the APK files from various sources on the internet. It then uploads them to its own servers and provides a download link for users. Apukpure also has an app that you can install on your Android device, which acts as a browser and downloader for the APK files. You can use the app to search, download, and install apps and games from Apukpure.</p>
|
9 |
-
<h3>What are the benefits of Apukpure?</h3>
|
10 |
-
<p>There are many benefits of using Apukpure, such as:</p>
|
11 |
-
<ul>
|
12 |
-
<li>You can access apps and games that are not available in Google Play Store due to geo-restrictions, compatibility issues, or censorship.</li>
|
13 |
-
<li>You can download apps and games faster and easier than using Google Play Store.</li>
|
14 |
-
<li>You can update apps and games without waiting for Google Play Store to release them.</li>
|
15 |
-
<li>You can downgrade apps and games to older versions if you don't like the new updates.</li>
|
16 |
-
<li>You can discover new and interesting apps and games that are not featured in Google Play Store.</li>
|
17 |
-
</ul>
|
18 |
-
<h2>How to download and install Apukpure on your Android device</h2>
|
19 |
-
<p>To use Apukpure, you need to download and install its app on your Android device. Here are the steps to do so:</p>
|
20 |
-
<h3>Step 1: Enable unknown sources</h3>
|
21 |
-
<p>Before you can install any APK file on your Android device, you need to enable unknown sources in your settings. This will allow you to install apps from sources other than Google Play Store. To enable unknown sources, follow these steps:</p>
|
22 |
-
<p>APKPure app download for Android<br />
|
23 |
-
APKPure apk file free download<br />
|
24 |
-
APKPure alternative app store<br />
|
25 |
-
APKPure games download<br />
|
26 |
-
APKPure app store review<br />
|
27 |
-
APKPure for PC Windows<br />
|
28 |
-
APKPure mod apk download<br />
|
29 |
-
APKPure app not working<br />
|
30 |
-
APKPure vs Aptoide<br />
|
31 |
-
APKPure app update<br />
|
32 |
-
APKPure app install<br />
|
33 |
-
APKPure app lock<br />
|
34 |
-
APKPure app manager<br />
|
35 |
-
APKPure app backup<br />
|
36 |
-
APKPure app uninstaller<br />
|
37 |
-
APKPure app downloader<br />
|
38 |
-
APKPure app categories<br />
|
39 |
-
APKPure app ratings<br />
|
40 |
-
APKPure app recommendations<br />
|
41 |
-
APKPure app search<br />
|
42 |
-
APKPure app history<br />
|
43 |
-
APKPure app settings<br />
|
44 |
-
APKPure app notifications<br />
|
45 |
-
APKPure app permissions<br />
|
46 |
-
APKPure app security<br />
|
47 |
-
APKPure app size<br />
|
48 |
-
APKPure app speed<br />
|
49 |
-
APKPure app language<br />
|
50 |
-
APKPure app region<br />
|
51 |
-
APKPure app support<br />
|
52 |
-
APKPure app feedback<br />
|
53 |
-
APKPure app features<br />
|
54 |
-
APKPure app benefits<br />
|
55 |
-
APKPure app disadvantages<br />
|
56 |
-
APKPure app pros and cons<br />
|
57 |
-
APKPure app comparison<br />
|
58 |
-
APKPure app alternatives<br />
|
59 |
-
APKPure app competitors<br />
|
60 |
-
APKPure app advantages<br />
|
61 |
-
APKPure app disadvantages</p>
|
62 |
-
<ol>
|
63 |
-
<li>Go to your device's settings.</li>
|
64 |
-
<li>Tap on security or privacy.</li>
|
65 |
-
<li>Find and toggle on unknown sources or allow installation from unknown sources.</li>
|
66 |
-
</ol>
|
67 |
-
<h3>Step 2: Download the Apukpure APK file</h3>
|
68 |
-
<p>Next, you need to download the Apukpure APK file from its official website. To do so, follow these steps:</p>
|
69 |
-
<ol>
|
70 |
-
<li>Open your browser and go to <a href="(^1^)">https://www.malavida.com/en/soft/apkpure/android/</a></li>
|
71 |
-
<li>Tap on the green download button.</li>
|
72 |
-
<li>Wait for the download to finish.</li>
|
73 |
-
</ol>
|
74 |
-
<h3>Step 3: Install the Apukpure app</h3>
|
75 |
-
<p> <p>Finally, you need to install the Apukpure app on your device. To do so, follow these steps:</p>
|
76 |
-
<ol>
|
77 |
-
<li>Locate the Apukpure APK file in your downloads folder or notification bar.</li>
|
78 |
-
<li>Tap on the file to open it.</li>
|
79 |
-
<li>Tap on install and wait for the installation to complete.</li>
|
80 |
-
<li>Tap on open to launch the Apukpure app.</li>
|
81 |
-
</ol>
|
82 |
-
<h2>How to use Apukpure to download apps and games</h2>
|
83 |
-
<p>Now that you have installed the Apukpure app, you can use it to download apps and games on your device. Here are the steps to do so:</p>
|
84 |
-
<h3>Step 1: Open the Apukpure app</h3>
|
85 |
-
<p>Open the Apukpure app from your app drawer or home screen. You will see a simple and user-friendly interface with various categories and tabs. You can browse the featured, popular, new, and updated apps and games on the home page. You can also use the menu button on the top left corner to access more options and settings.</p>
|
86 |
-
<h3>Step 2: Search for the app or game you want</h3>
|
87 |
-
<p>If you have a specific app or game in mind, you can use the search bar on the top right corner to find it. Just type in the name of the app or game and tap on the magnifying glass icon. You will see a list of results that match your query. You can also filter the results by category, rating, price, size, and more.</p>
|
88 |
-
<h3>Step 3: Download and install the app or game</h3>
|
89 |
-
<p>Once you have found the app or game you want, tap on it to open its details page. You will see a brief description, screenshots, ratings, reviews, and more information about the app or game. You will also see a green download button at the bottom of the page. Tap on it to start downloading the APK file. You will see a progress bar and a notification on your screen. When the download is finished, tap on install to install the app or game on your device. You can then open it from your app drawer or home screen.</p>
|
90 |
-
<h2>How to update apps and games with Apukpure</h2>
|
91 |
-
<p>One of the advantages of using Apukpure is that you can update apps and games without waiting for Google Play Store to release them. Here are the steps to do so:</p>
|
92 |
-
<h3>Step 1: Check for updates in the Apukpure app</h3>
|
93 |
-
<p>To check for updates, open the Apukpure app and tap on the menu button on the top left corner. Then tap on update. You will see a list of apps and games that have new versions available. You can also tap on check all to scan all your installed apps and games for updates.</p>
|
94 |
-
<h3>Step 2: Download and install the updates</h3>
|
95 |
-
<p>To download and install the updates, tap on update all or select the apps and games you want to update individually. Then tap on download. You will see a progress bar and a notification on your screen. When the download is finished, tap on install to install the updates on your device. You can then open them from your app drawer or home screen.</p>
|
96 |
-
<h2>Conclusion</h2>
|
97 |
-
<p>In conclusion, Apukpure is an alternative app store that allows you to download apps and games that are not available in Google Play Store. It also lets you update apps and games faster and easier than using Google Play Store. To use Apukpure, you need to download and install its app on your Android device. Then you can use it to search, download, and install apps and games from its platform.</p>
|
98 |
-
<p>We hope this article has helped you understand what is Apukpure and how to use it. If you have any questions or feedback, please feel free to leave a comment below.</p>
|
99 |
-
<h2>Frequently Asked Questions</h2>
|
100 |
-
<ul>
|
101 |
-
<li><b>Is Apukpure safe?</b></li>
|
102 |
-
<p>Apukpure claims that it scans and verifies all the APK files before uploading them to its servers. However, there is always a risk of downloading APK files from unknown sources as they may contain malware or viruses. Therefore, we recommend that you use a reliable antivirus software on your device and only download APK files from trusted sources.</p>
|
103 |
-
<li><b>Is Apukpure legal?</b></li>
|
104 |
-
<p>Apukpure does not host any pirated or illegal content on its platform. It only provides links to APK files that are freely available on the internet. However, some of these APK files may violate the terms and conditions of Google Play Store or other app developers. Therefore, we advise that you use Apukpure at your own discretion and respect the intellectual property rights of others.</p>
|
105 |
-
<li><b <p>Here are some more FAQs to complete the article:</p>
|
106 |
-
<ul>
|
107 |
-
<li><b>How to uninstall Apukpure?</b></li>
|
108 |
-
<p>If you want to uninstall Apukpure from your device, you can do so by following these steps:</p>
|
109 |
-
<ol>
|
110 |
-
<li>Go to your device's settings.</li>
|
111 |
-
<li>Tap on apps or applications.</li>
|
112 |
-
<li>Find and tap on Apukpure.</li>
|
113 |
-
<li>Tap on uninstall and confirm.</li>
|
114 |
-
</ol>
|
115 |
-
<li><b>How to contact Apukpure?</b></li>
|
116 |
-
<p>If you have any questions, suggestions, or complaints about Apukpure, you can contact them by using the following methods:</p>
|
117 |
-
<ul>
|
118 |
-
<li>Email: [email protected]</li>
|
119 |
-
<li>Facebook: https://www.facebook.com/apkpure</li>
|
120 |
-
<li>Twitter: https://twitter.com/apkpure</li>
|
121 |
-
</ul>
|
122 |
-
<li><b>What are some alternatives to Apukpure?</b></li>
|
123 |
-
<p>If you are looking for some alternatives to Apukpure, you can try these app stores:</p>
|
124 |
-
<ul>
|
125 |
-
<li>Aptoide: A decentralized app store that allows users to create and manage their own app stores.</li>
|
126 |
-
<li>Uptodown: A multi-platform app store that offers apps and games for Android, Windows, Mac, Linux, and more.</li>
|
127 |
-
<li>APKMirror: A website that hosts APK files for popular apps and games that are updated frequently.</li>
|
128 |
-
</ul></p> 401be4b1e0<br />
|
129 |
-
<br />
|
130 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Android Oyun Club Car Parking Son Srm The Most Popular Parking Game on Google Play.md
DELETED
@@ -1,107 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Android Oyun Club Car Parking Son Sürüm: A Review</h1>
|
3 |
-
<p>If you are looking for a new and exciting game to play on your Android device, you might want to check out Android Oyun Club Car Parking Son Sürüm. This is the latest version of the popular Car Parking Multiplayer game, which is available for free on the Android Oyun Club platform. In this article, we will review what Android Oyun Club is, what Car Parking Multiplayer is, and what the latest version of the game offers. We will also provide you with the download link and instructions on how to install and play the game.</p>
|
4 |
-
<h2>What is Android Oyun Club?</h2>
|
5 |
-
<p>Android Oyun Club is a platform for sharing and downloading Android games. It is a community of gamers and developers who love playing and creating games for Android devices. On Android Oyun Club, you can find thousands of games in various genres, such as action, adventure, racing, simulation, puzzle, and more. You can also find games that are not available on Google Play Store, or that are paid on Google Play Store but free on Android Oyun Club. You can also interact with other users by leaving comments, ratings, and tips on the games.</p>
|
6 |
-
<h2>android oyun club car parking son sürüm</h2><br /><p><b><b>DOWNLOAD</b> ———>>> <a href="https://jinyurl.com/2uNL2I">https://jinyurl.com/2uNL2I</a></b></p><br /><br />
|
7 |
-
<h3>A platform for sharing and downloading Android games</h3>
|
8 |
-
<p>Android Oyun Club allows you to download and play any game you want for free. You can browse through the categories or search for your favorite game by name or keyword. You can also see the ratings, reviews, screenshots, and videos of the games before downloading them. You can also upload your own games or mods to share with other users. You can also request games that are not available on the platform, and the developers will try to find them for you.</p>
|
9 |
-
<h3>A community of gamers and developers</h3>
|
10 |
-
<p>Android Oyun Club is not just a platform for downloading games, but also a community of gamers and developers who love Android games. You can join the forums and chat rooms to discuss games, share tips, ask questions, or make friends with other users. You can also follow the news and updates about the latest games, events, contests, and giveaways on the platform. You can also support the developers by donating or buying their premium games.</p>
|
11 |
-
<p>android oyun club car parking multiplayer indir<br />
|
12 |
-
android oyun club car parking son sürüm apk<br />
|
13 |
-
android oyun club car parking mod menu<br />
|
14 |
-
android oyun club car parking hileli<br />
|
15 |
-
android oyun club car parking 3d<br />
|
16 |
-
android oyun club car parking pro<br />
|
17 |
-
android oyun club car parking yeni sürüm<br />
|
18 |
-
android oyun club car parking online<br />
|
19 |
-
android oyun club car parking simulator<br />
|
20 |
-
android oyun club car parking 2023<br />
|
21 |
-
android oyun club car parking hack<br />
|
22 |
-
android oyun club car parking premium<br />
|
23 |
-
android oyun club car parking güncel sürüm<br />
|
24 |
-
android oyun club car parking para hilesi<br />
|
25 |
-
android oyun club car parking real<br />
|
26 |
-
android oyun club car parking vip<br />
|
27 |
-
android oyun club car parking full sürüm<br />
|
28 |
-
android oyun club car parking mod apk indir<br />
|
29 |
-
android oyun club car parking update<br />
|
30 |
-
android oyun club car parking free download<br />
|
31 |
-
android oyun club car parking unlimited money<br />
|
32 |
-
android oyun club car parking latest version<br />
|
33 |
-
android oyun club car parking cheats<br />
|
34 |
-
android oyun club car parking cracked<br />
|
35 |
-
android oyun club car parking offline<br />
|
36 |
-
android oyun club car parking classic<br />
|
37 |
-
android oyun club car parking mod menu indir<br />
|
38 |
-
android oyun club car parking yeni araçlar<br />
|
39 |
-
android oyun club car parking beta sürümü<br />
|
40 |
-
android oyun club car parking drift mode<br />
|
41 |
-
android oyun club car parking extreme<br />
|
42 |
-
android oyun club car parking gold hilesi<br />
|
43 |
-
android oyun club car parking ios indir<br />
|
44 |
-
android oyun club car parking keyifli oyuncu<br />
|
45 |
-
android oyun club car parking lite sürümü<br />
|
46 |
-
android oyun club car parking modifiye hilesi<br />
|
47 |
-
android oyun club car parking no ads<br />
|
48 |
-
android oyun club car parking oyuncu tv<br />
|
49 |
-
android oyun club car parking premium apk indir<br />
|
50 |
-
android oyun club car parking quizlet answers</p>
|
51 |
-
<h2>What is Car Parking Multiplayer?</h2>
|
52 |
-
<p>Car Parking Multiplayer is one of the most popular games on Android Oyun Club. It is a realistic and fun parking game that challenges you to park your car in various scenarios. You can also enjoy a multiplayer open world mode where you can explore, race, chat, and trade with other players. You can also customize your car with different parts, colors, stickers, and accessories.</p>
|
53 |
-
<h3>A realistic and fun parking game</h3>
|
54 |
-
<p>Car Parking Multiplayer offers you 82 real-life parking and driving challenges. You can choose from 100 cars with real interiors and physics. You can park cars, trucks, buses, or any other vehicle you want. You have to follow the traffic rules, avoid obstacles, use indicators, mirrors, cameras, and sensors to park your car correctly. You can also adjust the difficulty level, camera angle, steering mode, weather condition, time of day, and more to suit your preference.</p>
|
55 |
-
<h3>A multiplayer open world mode</h3>
|
56 |
-
<p>Car Parking Multiplayer also lets you enjoy a multiplayer open world mode where you can free roam in a large map with real gas stations and car services. You can compete against real players in multiplayer racing or join them in cooperative missions. You can also chat with them using voice or text messages. You can also exchange cars with other players or buy and sell them in the market.</p>
|
57 |
-
<h3>A car customization feature</h3>
|
58 |
-
<p>Car Parking Multiplayer <p>Car Parking Multiplayer also lets you customize your car with different parts, colors, stickers, and accessories. You can change the engine, suspension, wheels, tires, brakes, exhaust, turbo, transmission, and more to improve the performance of your car. You can also paint your car with any color you want, or apply decals and vinyls to make it look unique. You can also add spoilers, bumpers, hoods, grills, lights, horns, and more to enhance the appearance of your car.</p>
|
59 |
-
<h2>What is the latest version of Car Parking Multiplayer?</h2>
|
60 |
-
<p>The latest version of Car Parking Multiplayer is 4.8.4.1, which was released on June 15, 2023. This version has some new features and improvements that make the game more enjoyable and realistic. Here are some of the highlights of the latest version:</p>
|
61 |
-
<h3>The new features and improvements</h3>
|
62 |
-
<ul>
|
63 |
-
<li>A new map with a city, a desert, and a highway.</li>
|
64 |
-
<li>A new garage with more space and options for car customization.</li>
|
65 |
-
<li>A new car wash system that lets you clean your car and earn money.</li>
|
66 |
-
<li>A new police mode that lets you chase or be chased by the cops.</li>
|
67 |
-
<li>A new chat system that lets you send emojis and stickers to other players.</li>
|
68 |
-
<li>A new radio system that lets you listen to music from your device or online stations.</li>
|
69 |
-
<li>A new weather system that changes the climate and the lighting of the map.</li>
|
70 |
-
<li>A new traffic system that adds more cars and pedestrians to the map.</li>
|
71 |
-
<li>A new damage system that shows the effects of collisions and accidents on your car.</li>
|
72 |
-
<li>A new physics system that makes the car handling more realistic and responsive.</li>
|
73 |
-
</ul>
|
74 |
-
<h3>The download link and instructions</h3>
|
75 |
-
<p>If you want to download and play the latest version of Car Parking Multiplayer, you can follow these steps:</p>
|
76 |
-
<ol>
|
77 |
-
<li>Go to [Android Oyun Club] and search for Car Parking Multiplayer.</li>
|
78 |
-
<li>Click on the download button and wait for the file to be downloaded.</li>
|
79 |
-
<li>Open the file manager on your device and locate the downloaded file.</li>
|
80 |
-
<li>Tap on the file and allow the installation of unknown sources if prompted.</li>
|
81 |
-
<li>Wait for the installation to be completed and launch the game.</li>
|
82 |
-
</ol>
|
83 |
-
<h2>Conclusion</h2>
|
84 |
-
<p>Android Oyun Club Car Parking Son Sürüm is a great game for anyone who loves parking games or driving games. It offers a realistic and fun parking experience with a variety of cars, challenges, and modes. It also has a multiplayer open world mode where you can explore, race, chat, and trade with other players. You can also customize your car with different parts, colors, stickers, and accessories. The latest version of the game has some new features and improvements that make it even better. You can download it for free from Android Oyun Club and enjoy it on your Android device.</p>
|
85 |
-
<h3>Why you should try Android Oyun Club Car Parking Son Sürüm</h3>
|
86 |
-
<p>You should try Android Oyun Club Car Parking Son Sürüm because:</p>
|
87 |
-
<ul>
|
88 |
-
<li>It is free to download and play.</li>
|
89 |
-
<li>It is realistic and fun to play.</li>
|
90 |
-
<li>It has a lot of cars, challenges, and modes to choose from.</li>
|
91 |
-
<li>It has a multiplayer open world mode where you can interact with other players.</li>
|
92 |
-
<li>It has a car customization feature where you can make your car unique.</li>
|
93 |
-
<li>It has a new version with new features and improvements.</li>
|
94 |
-
</ul>
|
95 |
-
<h3>FAQs</h3>
|
96 |
-
<p>Here are some frequently asked questions about Android Oyun Club Car Parking Son Sürüm:</p>
|
97 |
-
<table border="1">
|
98 |
-
<tr><th>Question</th><th>Answer</th></tr>
|
99 |
-
<tr><td>Is Android Oyun Club safe to use?</td><td>Yes, Android Oyun Club is safe to use. It does not contain any viruses or malware. However, you should always be careful when downloading files from unknown sources and scan them with an antivirus before opening them.</td></tr>
|
100 |
-
<tr><td>Is Car Parking Multiplayer online or offline?</td><td>Car Parking Multiplayer can be played both online and offline. You can play the parking challenges offline without an internet connection. You can also play the multiplayer open world mode online with other players. However, you will need an internet connection to download the game and update it to the latest version.</td></tr>
|
101 |
-
<tr><td>How can I get more money in Car Parking Multiplayer?</td><td>You can get more money in Car Parking Multiplayer by completing the parking challenges, washing your car, selling or exchanging your car with other players, or buying premium cars with real money.</td></tr>
|
102 |
-
<tr><td>How can I play Car Parking Multiplayer with my friends?</td><td>You can play Car Parking Multiplayer with your friends by joining the same server and map. You can also create your own private server and invite your friends to join. You can also add your friends as contacts and chat with them in the game.</td></tr>
|
103 |
-
<tr><td>How can I update Car Parking Multiplayer to the latest version?</td><td>You can update Car Parking Multiplayer to the latest version by downloading it from Android Oyun Club. You can also check for updates in the game settings and download them from there. You should always update the game to the latest version to enjoy the new features and improvements.</td></tr>
|
104 |
-
</table>
|
105 |
-
<p>I hope this article has helped you learn more about Android Oyun Club Car Parking Son Sürüm. If you have any questions or feedback, please leave a comment below. Thank you for reading and happy parking!</p> 197e85843d<br />
|
106 |
-
<br />
|
107 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Animal Tycoon - Zoo Craft Game Mod Apk The Ultimate Idle Zoo Simulation.md
DELETED
@@ -1,190 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Animal Tycoon - Zoo Craft Game Mod Apk: A Fun and Creative Way to Build Your Own Zoo</h1>
|
3 |
-
<p>Do you love animals and dream of running your own zoo? If so, you might want to check out Animal Tycoon - Zoo Craft Game, a simulation game that lets you build and manage a wildlife park full of exotic creatures. And if you want to make the game even more fun and easy, you can download the mod apk version of Animal Tycoon - Zoo Craft Game, which gives you unlimited money, gems, and access to all the animals and items in the game. In this article, we will tell you more about this game, why you should download the mod apk, how to install it, and some tips and tricks to play it.</p>
|
4 |
-
<h2>animal tycoon zoo craft game mod apk</h2><br /><p><b><b>Download Zip</b> ››› <a href="https://jinyurl.com/2uNLVh">https://jinyurl.com/2uNLVh</a></b></p><br /><br />
|
5 |
-
<h2>What is Animal Tycoon - Zoo Craft Game?</h2>
|
6 |
-
<p>Animal Tycoon - Zoo Craft Game is a simulation game developed by Mini Games Inc. It was released in March 2021 and has over 50,000 downloads on Google Play. The game is rated 4.1 out of 5 stars by the users.</p>
|
7 |
-
<h3>A simulation game that lets you create and manage a wildlife park</h3>
|
8 |
-
<p>In Animal Tycoon - Zoo Craft Game, you are the owner and zookeeper of a wildlife park. Your goal is to create a beautiful and profitable zoo that attracts visitors from all over the world. You can choose from hundreds of animals, habitats, decorations, and attractions to design your park according to your preferences. You can also breed new animals, feed them, play with them, and watch them interact with each other.</p>
|
9 |
-
<h3>A game that features hundreds of animals, habitats, decorations, and attractions</h3>
|
10 |
-
<p>Animal Tycoon - Zoo Craft Game has a huge variety of animals to choose from. You can find common animals like lions, tigers, elephants, giraffes, zebras, pandas, monkeys, bears, penguins, flamingos, etc. You can also find rare animals like unicorns, dragons, dinosaurs, phoenixes, etc. Each animal has its own personality, needs, and preferences. You can also customize their habitats with different types of fences, plants, rocks, water features, etc. You can also decorate your park with various items like statues, fountains, benches, lamps, signs, etc. You can also add attractions like roller coasters, ferris wheels, carousels, etc. to make your park more fun and exciting.</p>
|
11 |
-
<h3>A game that challenges you to satisfy your visitors and earn money</h3>
|
12 |
-
<p>Animal Tycoon - Zoo Craft Game is not just about building your zoo. You also have to manage it well. You have to make sure that your animals are happy and healthy. You have to provide them with food, water, toys, medicine, etc. You also have to keep your park clean and safe. You have to hire staff like cleaners, vets, security guards, etc. You also have to attract visitors to your park by setting ticket prices, advertising campaigns, etc. You have to satisfy their needs and wants by providing them with food, drinks, restrooms, souvenirs, etc. You have to earn money from your visitors and use it to expand and improve your park. You can also earn money from completing quests and achievements.</p>
|
13 |
-
<h2>Why download the mod apk version of Animal Tycoon - Zoo Craft Game?</h2>
|
14 |
-
<p>Animal Tycoon - Zoo Craft Game is a free game to download and play, but it also has some limitations and drawbacks. For example, you have to wait for a long time to collect money and gems, which are the main currencies in the game. You also have to watch ads or spend real money to get more money and gems. You also have to unlock the animals and items by reaching certain levels or paying with money and gems. If you want to enjoy the game without these restrictions and annoyances, you can download the mod apk version of Animal Tycoon - Zoo Craft Game, which offers you many benefits and advantages.</p>
|
15 |
-
<h3>To enjoy unlimited money and gems</h3>
|
16 |
-
<p>The mod apk version of Animal Tycoon - Zoo Craft Game gives you unlimited money and gems from the start. You don't have to wait or watch ads or spend real money to get more money and gems. You can use them to buy anything you want in the game, such as animals, habitats, decorations, attractions, etc. You can also use them to speed up the processes, such as breeding, building, upgrading, etc. You can also use them to skip the quests and achievements if you don't feel like doing them.</p>
|
17 |
-
<h3>To unlock all the animals and items</h3>
|
18 |
-
<p>The mod apk version of Animal Tycoon - Zoo Craft Game also unlocks all the animals and items in the game. You don't have to reach certain levels or pay with money and gems to unlock them. You can access them from the beginning and use them to create your dream zoo. You can also mix and match different animals and items to create unique combinations and designs.</p>
|
19 |
-
<p>animal tycoon zoo craft game hack apk<br />
|
20 |
-
animal tycoon zoo craft game unlimited money apk<br />
|
21 |
-
animal tycoon zoo craft game mod apk download<br />
|
22 |
-
animal tycoon zoo craft game latest version mod apk<br />
|
23 |
-
animal tycoon zoo craft game offline mod apk<br />
|
24 |
-
animal tycoon zoo craft game cheats apk<br />
|
25 |
-
animal tycoon zoo craft game premium mod apk<br />
|
26 |
-
animal tycoon zoo craft game free shopping mod apk<br />
|
27 |
-
animal tycoon zoo craft game unlocked mod apk<br />
|
28 |
-
animal tycoon zoo craft game pro mod apk<br />
|
29 |
-
animal tycoon zoo craft game full mod apk<br />
|
30 |
-
animal tycoon zoo craft game cracked mod apk<br />
|
31 |
-
animal tycoon zoo craft game mega mod apk<br />
|
32 |
-
animal tycoon zoo craft game vip mod apk<br />
|
33 |
-
animal tycoon zoo craft game no ads mod apk<br />
|
34 |
-
animal tycoon zoo craft game android mod apk<br />
|
35 |
-
animal tycoon zoo craft game ios mod apk<br />
|
36 |
-
animal tycoon zoo craft game pc mod apk<br />
|
37 |
-
animal tycoon zoo craft game online mod apk<br />
|
38 |
-
animal tycoon zoo craft game 3d mod apk<br />
|
39 |
-
animal tycoon zoo craft game simulation mod apk<br />
|
40 |
-
animal tycoon zoo craft game idle mod apk<br />
|
41 |
-
animal tycoon zoo craft game strategy mod apk<br />
|
42 |
-
animal tycoon zoo craft game management mod apk<br />
|
43 |
-
animal tycoon zoo craft game adventure mod apk<br />
|
44 |
-
animal tycoon zoo craft game fun mod apk<br />
|
45 |
-
animal tycoon zoo craft game cute mod apk<br />
|
46 |
-
animal tycoon zoo craft game realistic mod apk<br />
|
47 |
-
animal tycoon zoo craft game wild mod apk<br />
|
48 |
-
animal tycoon zoo craft game exotic mod apk<br />
|
49 |
-
animal tycoon zoo craft game rare mod apk<br />
|
50 |
-
animal tycoon zoo craft game endangered mod apk<br />
|
51 |
-
animal tycoon zoo craft game rescue mod apk<br />
|
52 |
-
animal tycoon zoo craft game breeding mod apk<br />
|
53 |
-
animal tycoon zoo craft game evolution mod apk<br />
|
54 |
-
animal tycoon zoo craft game genetics mod apk<br />
|
55 |
-
animal tycoon zoo craft game hybrid mod apk<br />
|
56 |
-
animal tycoon zoo craft game mutation mod apk<br />
|
57 |
-
animal tycoon zoo craft game customization mod apk<br />
|
58 |
-
animal tycoon zoo craft game decoration mod apk</p>
|
59 |
-
<h3>To remove ads and in-app purchases</h3>
|
60 |
-
<p>The mod apk version of Animal Tycoon - Zoo Craft Game also removes all the ads and in-app purchases in the game. You don't have to watch ads or spend real money to enjoy the game. You can play the game without any interruptions or distractions. You can also play the game offline without any internet connection.</p>
|
61 |
-
<h2>How to download and install Animal Tycoon - Zoo Craft Game mod apk?</h2>
|
62 |
-
<p>If you are interested in downloading and installing Animal Tycoon - Zoo Craft Game mod apk, you can follow these simple steps:</p>
|
63 |
-
<h3>Find a reliable source for the mod apk file</h3>
|
64 |
-
<p>The first step is to find a reliable source for the mod apk file of Animal Tycoon - Zoo Craft Game. There are many websites that offer mod apk files for various games, but not all of them are safe and trustworthy. Some of them may contain viruses, malware, or spyware that can harm your device or steal your personal information. Some of them may also provide fake or outdated mod apk files that don't work or cause problems in the game. Therefore, you should be careful and do some research before downloading any mod apk file from any website. You should check the reviews, ratings, comments, feedbacks, etc. of other users who have downloaded the mod apk file from that website. You should also scan the mod apk file with an antivirus or anti-malware program before installing it on your device.</p>
|
65 |
-
<h3>Enable unknown sources on your device settings</h3>
|
66 |
-
<p>The second step is to enable unknown sources on your device settings. This is because your device may not allow you to install apps from sources other than Google Play Store by default. To enable unknown sources, you can go to your device settings > security > unknown sources > toggle on. This will allow you to install apps from sources other than Google Play Store.</p>
|
67 |
-
<h3>Download and install the mod apk file</h3>
|
68 |
-
<p>The third step is to download and install the mod apk file of Animal Tycoon - Zoo Craft Game on your device. To do this, you can go to the website where you found the mod apk file and click on the download button. The mod apk file will be downloaded to your device storage. Then, you can go to your device storage > downloads > find the mod apk file > tap on it > install. The installation process may take a few seconds or minutes depending on your device performance.</p>
|
69 |
-
<h3>Launch the game and enjoy</h3>
|
70 |
-
<p>The final step is to launch the game and enjoy it with unlimited money, gems, animals, items, etc. To do this, you can go to your device home screen > find the game icon > tap on it > start playing. You will see that you have unlimited money and gems in your account. You will also see that all the animals and items are unlocked in the game. You can also enjoy the game without any ads or in-app purchases. You can also play the game offline without any internet connection.</p>
|
71 |
-
<h2>What are some tips and tricks to play Animal Tycoon - Zoo Craft Game?</h2>
|
72 |
-
<p>Animal Tycoon - Zoo Craft Game is a fun and creative game, but it can also be challenging and complex. If you want to play the game well and achieve your goals, you can follow these tips and tricks:</p>
|
73 |
-
<h3>Plan your park layout carefully</h3>
|
74 |
-
<p>One of the most important aspects of Animal Tycoon - Zoo Craft Game is the park layout. You have to plan your park layout carefully to make the best use of the space and resources. You have to consider the needs and preferences of your animals, visitors, and staff. You have to balance the aesthetics and functionality of your park. You have to create a park that is attractive, comfortable, convenient, and profitable. Here are some tips to plan your park layout:</p>
|
75 |
-
<ul>
|
76 |
-
<li>Use different types of fences to separate different habitats and zones.</li>
|
77 |
-
<li>Use paths to connect different habitats and attractions.</li>
|
78 |
-
<li>Use signs to guide your visitors and staff.</li>
|
79 |
-
<li>Use plants, rocks, water features, etc. to decorate your habitats and park.</li>
|
80 |
-
<li>Use statues, fountains, benches, lamps, etc. to add more charm and beauty to your park.</li>
|
81 |
-
<li>Use roller coasters, ferris wheels, carousels, etc. to add more fun and excitement to your park.</li>
|
82 |
-
<li>Use food stalls, drink stands, restrooms, souvenir shops, etc. to provide services and amenities to your visitors.</li>
|
83 |
-
<li>Use a table to compare the pros and cons of different types of fences, paths, signs, plants, rocks, water features, statues, fountains, benches, lamps, roller coasters, ferris wheels, carousels, food stalls, drink stands, restrooms, souvenir shops, etc.</li>
|
84 |
-
</ul>
|
85 |
-
<table>
|
86 |
-
<tr>
|
87 |
-
<th>Type</th>
|
88 |
-
<th>Pros</th>
|
89 |
-
<th>Cons</th>
|
90 |
-
</tr>
|
91 |
-
<tr>
|
92 |
-
<td>Fences</td>
|
93 |
-
<td>- Different types of fences have different durability, cost, and appearance.<br>- Fences can keep your animals safe and secure.<br>- Fences can create different habitats and zones in your park.</td>
|
94 |
-
<td>- Fences can block the view of your animals and attractions.<br>- Fences can be damaged by animals or visitors.<br>- Fences can take up space in your park.</td>
|
95 |
-
</tr>
|
96 |
-
<tr>
|
97 |
-
<td>Paths</td>
|
98 |
-
<td>- Paths can connect different habitats and attractions in your park.<br>- Paths can make your park more accessible and convenient for your visitors and staff.<br>- Paths can enhance the look of your park with different colors and patterns.</td>
|
99 |
-
<td>- Paths can be expensive to build and maintain.<br>- Paths can be crowded by visitors and staff.<br>- Paths can limit the space for your animals and attractions.</td>
|
100 |
-
</tr>
|
101 |
-
<tr>
|
102 |
-
<td>Signs</td>
|
103 |
-
<td>- Signs can guide your visitors and staff to different habitats and attractions in your park.<br>- Signs can inform your visitors and staff about the names, facts, and rules of your animals and attractions.<br>- Signs can add more personality and style to your park with different fonts and designs.</td>
|
104 |
-
<td>- Signs can be costly to buy and install.<br>- Signs can be vandalized by visitors or staff.<br>- Signs can clutter your park with too many words and images.</td>
|
105 |
-
</tr>
|
106 |
-
<tr>
|
107 |
-
<td>Plants</td>
|
108 |
-
<td>- Plants can beautify your habitats and park with different colors and shapes.<br>- Plants can provide shade, oxygen, and food for your animals and visitors.<br>- Plants can attract more wildlife and biodiversity to your park.</td>
|
109 |
-
<td>- Plants can be expensive to buy and plant.<br>- Plants can require watering, pruning, fertilizing, etc.<br>- Plants can grow out of control or die if not cared for properly.</td>
|
110 |
-
</tr> <tr>
|
111 |
-
<td>Rocks</td>
|
112 |
-
<td>- Rocks can decorate your habitats and park with different textures and shapes.<br>- Rocks can provide shelter, hiding places, and basking spots for your animals.<br>- Rocks can create natural barriers and boundaries in your park.</td>
|
113 |
-
<td>- Rocks can be heavy and hard to move and place.<br>- Rocks can be sharp and dangerous for your animals and visitors.<br>- Rocks can erode or crack over time.</td>
|
114 |
-
</tr>
|
115 |
-
<tr>
|
116 |
-
<td>Water features</td>
|
117 |
-
<td>- Water features can beautify your habitats and park with different sounds and movements.<br>- Water features can provide water, humidity, and cooling for your animals and visitors.<br>- Water features can attract more aquatic and amphibious animals to your park.</td>
|
118 |
-
<td>- Water features can be costly to build and operate.<br>- Water features can require filtering, pumping, cleaning, etc.<br>- Water features can leak, overflow, or freeze if not maintained properly.</td>
|
119 |
-
</tr>
|
120 |
-
<tr>
|
121 |
-
<td>Statues</td>
|
122 |
-
<td>- Statues can beautify your habitats and park with different artistic and cultural expressions.<br>- Statues can represent your animals, attractions, or themes in your park.<br>- Statues can add more prestige and value to your park.</td>
|
123 |
-
<td>- Statues can be expensive to buy and install.<br>- Statues can be damaged by weather, animals, or visitors.<br>- Statues can take up space in your habitats and park.</td>
|
124 |
-
</tr>
|
125 |
-
<tr>
|
126 |
-
<td>Fountains</td>
|
127 |
-
<td>- Fountains can beautify your habitats and park with different water effects and lights.<br>- Fountains can provide water, humidity, and cooling for your animals and visitors.<br>- Fountains can create a relaxing and soothing atmosphere in your park.</td>
|
128 |
-
<td>- Fountains can be costly to build and operate.<br>- Fountains can require filtering, pumping, cleaning, etc.<br>- Fountains can leak, overflow, or freeze if not maintained properly.</td>
|
129 |
-
</tr>
|
130 |
-
<tr>
|
131 |
-
<td>Benches</td>
|
132 |
-
<td>- Benches can provide seating and resting places for your visitors and staff.<br>- Benches can enhance the comfort and convenience of your park.<br>- Benches can come in different styles and materials to suit your park theme.</td>
|
133 |
-
<td>- Benches can be expensive to buy and install.<br>- Benches can be damaged by weather, animals, or visitors.<br>- Benches can be occupied by unwanted guests or littered with trash.</td>
|
134 |
-
</tr>
|
135 |
-
<tr>
|
136 |
-
<td>Lamps</td>
|
137 |
-
<td>- Lamps can provide lighting and visibility for your habitats and park at night.<br>- Lamps can enhance the beauty and ambiance of your park at night.<br>- Lamps can come in different colors and shapes to suit your park theme.</td>
|
138 |
-
<td>- Lamps can be expensive to buy and install.<br>- Lamps can consume electricity and generate heat.<br>- Lamps can break or malfunction if not maintained properly.</td>
|
139 |
-
</tr>
|
140 |
-
<tr>
|
141 |
-
<td>Roller coasters</td>
|
142 |
-
<td>- Roller coasters can provide thrill and excitement for your visitors.<br>- Roller coasters can attract more visitors to your park.<br>- Roller coasters can come in different types, sizes, and designs to suit your park theme.</td>
|
143 |
-
<td>- Roller coasters can be very expensive to build and operate.<br>- Roller coasters can require safety inspections, maintenance, repairs, etc.<br>- Roller coasters can cause noise, pollution, or accidents if not managed properly.</td>
|
144 |
-
</tr> <tr>
|
145 |
-
<td>Ferris wheels</td>
|
146 |
-
<td>- Ferris wheels can provide a panoramic view of your habitats and park for your visitors.<br>- Ferris wheels can attract more visitors to your park.<br>- Ferris wheels can come in different heights, diameters, and designs to suit your park theme.</td>
|
147 |
-
<td>- Ferris wheels can be very expensive to build and operate.<br>- Ferris wheels can require safety inspections, maintenance, repairs, etc.<br>- Ferris wheels can cause noise, pollution, or accidents if not managed properly.</td>
|
148 |
-
</tr>
|
149 |
-
<tr>
|
150 |
-
<td>Carousels</td>
|
151 |
-
<td>- Carousels can provide a fun and nostalgic ride for your visitors.<br>- Carousels can attract more visitors to your park.<br>- Carousels can come in different themes, styles, and animals to suit your park theme.</td>
|
152 |
-
<td>- Carousels can be expensive to build and operate.<br>- Carousels can require safety inspections, maintenance, repairs, etc.<br>- Carousels can cause noise, pollution, or accidents if not managed properly.</td>
|
153 |
-
</tr>
|
154 |
-
<tr>
|
155 |
-
<td>Food stalls</td>
|
156 |
-
<td>- Food stalls can provide food and drinks for your visitors and staff.<br>- Food stalls can enhance the satisfaction and loyalty of your visitors and staff.<br>- Food stalls can come in different cuisines, menus, and prices to suit your park theme.</td>
|
157 |
-
<td>- Food stalls can be expensive to buy and install.<br>- Food stalls can require food safety inspections, hygiene standards, inventory management, etc.<br>- Food stalls can cause waste, litter, or pests if not cleaned properly.</td>
|
158 |
-
</tr>
|
159 |
-
<tr>
|
160 |
-
<td>Drink stands</td>
|
161 |
-
<td>- Drink stands can provide drinks for your visitors and staff.<br>- Drink stands can enhance the satisfaction and loyalty of your visitors and staff.<br>- Drink stands can come in different types, flavors, and prices to suit your park theme.</td>
|
162 |
-
<td>- Drink stands can be expensive to buy and install.<br>- Drink stands can require food safety inspections, hygiene standards, inventory management, etc.<br>- Drink stands can cause waste, litter, or pests if not cleaned properly.</td>
|
163 |
-
</tr>
|
164 |
-
<tr>
|
165 |
-
<td>Restrooms</td>
|
166 |
-
<td>- Restrooms can provide sanitary facilities for your visitors and staff.<br>- Restrooms can enhance the comfort and convenience of your visitors and staff.<br>- Restrooms can come in different sizes, locations, and designs to suit your park theme.</td>
|
167 |
-
<td>- Restrooms can be expensive to build and maintain.<br>- Restrooms can require plumbing, ventilation, cleaning, etc.<br>- Restrooms can cause odor, pollution, or vandalism if not managed properly.</td>
|
168 |
-
</tr>
|
169 |
-
<tr>
|
170 |
-
<td>Souvenir shops</td>
|
171 |
-
<td>- Souvenir shops can provide souvenirs for your visitors and staff.<br>- Souvenir shops can enhance the memory and loyalty of your visitors and staff.<br>- Souvenir shops can come in different types, items, and prices to suit your park theme.</td>
|
172 |
-
<td>- Souvenir shops can be expensive to buy and install.<br>- Souvenir shops can require inventory management, marketing, sales, etc.<br>- Souvenir shops can cause waste, litter, or theft if not managed properly.</td>
|
173 |
-
</tr>
|
174 |
-
</table>
|
175 |
-
<h2>Conclusion</h2>
|
176 |
-
<p>Animal Tycoon - Zoo Craft Game is a fun and creative game that lets you build and manage your own zoo. You can choose from hundreds of animals, habitats, decorations, and attractions to create your dream zoo. You can also download the mod apk version of Animal Tycoon - Zoo Craft Game to enjoy unlimited money, gems, animals, items, etc. You can also follow some tips and tricks to play the game well and achieve your goals. If you love animals and zoos, you should definitely try Animal Tycoon - Zoo Craft Game mod apk.</p>
|
177 |
-
<h2>FAQs</h2>
|
178 |
-
<p>Here are some frequently asked questions about Animal Tycoon - Zoo Craft Game mod apk:</p>
|
179 |
-
<h3>Q: Is Animal Tycoon - Zoo Craft Game mod apk safe to download and install?</h3>
|
180 |
-
<p>A: Yes, Animal Tycoon - Zoo Craft Game mod apk is safe to download and install if you find a reliable source for the mod apk file. You should also scan the mod apk file with an antivirus or anti-malware program before installing it on your device. You should also enable unknown sources on your device settings to allow the installation of apps from sources other than Google Play Store.</p>
|
181 |
-
<h3>Q: Is Animal Tycoon - Zoo Craft Game mod apk compatible with my device?</h3>
|
182 |
-
<p>A: Animal Tycoon - Zoo Craft Game mod apk is compatible with most Android devices that have Android 4.4 or higher. However, some devices may have different specifications or features that may affect the performance or compatibility of the game. You should check the compatibility of your device with the game before downloading and installing it. You can also contact the developer of the game for more information or support.</p>
|
183 |
-
<h3>Q: How can I update Animal Tycoon - Zoo Craft Game mod apk?</h3>
|
184 |
-
<p>A: Animal Tycoon - Zoo Craft Game mod apk may not update automatically like the original version of the game. You may have to download and install the latest version of the mod apk file manually whenever there is a new update available. You can check the website where you downloaded the mod apk file for any updates or notifications. You can also backup your game data before updating to avoid losing your progress or settings.</p>
|
185 |
-
<h3>Q: How can I uninstall Animal Tycoon - Zoo Craft Game mod apk?</h3>
|
186 |
-
<p>A: Animal Tycoon - Zoo Craft Game mod apk can be uninstalled like any other app on your device. You can go to your device settings > apps > find Animal Tycoon - Zoo Craft Game > tap on it > uninstall. You can also delete the mod apk file from your device storage if you don't need it anymore.</p>
|
187 |
-
<h3>Q: Can I play Animal Tycoon - Zoo Craft Game mod apk with my friends?</h3>
|
188 |
-
<p>A: Animal Tycoon - Zoo Craft Game mod apk does not have a multiplayer or online mode. You can only play the game solo on your device. However, you can still share your park creations and achievements with your friends through social media or screenshots. You can also compare your park ratings and rankings with other players around the world.</p> 197e85843d<br />
|
189 |
-
<br />
|
190 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Fifa Street 4 PC Download - Enjoy Street Soccer in High Resolution.md
DELETED
@@ -1,76 +0,0 @@
|
|
1 |
-
|
2 |
-
<br> - H2: Requirements and Recommendations: What you need to run FIFA Street 4 on PC smoothly <br> - H2: Methods and Steps: How to download and install FIFA Street 4 on PC using different options <br> - H2: Tips and Tricks: How to optimize your FIFA Street 4 experience on PC <br> - Conclusion: A summary of the main points and a call to action | | H2: Requirements and Recommendations | - H3: Minimum and Recommended System Requirements: The hardware and software specifications for running FIFA Street 4 on PC <br> - H3: Best Emulators for FIFA Street 4: The pros and cons of different emulators that can run FIFA Street 4 on PC <br> - H3: Best Controllers for FIFA Street 4: The advantages and disadvantages of different controllers that can enhance your FIFA Street 4 gameplay on PC | | H2: Methods and Steps | - H3: Method 1: Download FIFA Street 4 from Steam: The easiest and most convenient way to get FIFA Street 4 on PC <br> - H3: Method 2: Download FIFA Street 4 from Origin: Another official and reliable way to get FIFA Street 4 on PC <br> - H3: Method 3: Download FIFA Street 4 from a Torrent Site: A risky but possible way to get FIFA Street 4 on PC for free <br> - H3: Method 4: Download FIFA Street 4 from a ROM Site: A similar but safer way to get FIFA Street 4 on PC for free <br> - H3: Method 5: Download FIFA Street 4 from a Mod Site: A creative and fun way to get FIFA Street 4 on PC with extra features | | H2: Tips and Tricks | - H3: How to Configure Your Emulator Settings for FIFA Street 4: How to adjust the graphics, audio, input, and performance settings of your emulator for optimal FIFA Street 4 gameplay <br> - H3: How to Customize Your Controller Settings for FIFA Street 4: How to map the buttons, sticks, triggers, and vibration of your controller for intuitive FIFA Street 4 controls <br> - H3: How to Access the Online Features of FIFA Street 4 on PC: How to connect to the EA servers, play online matches, join tournaments, and unlock rewards in FIFA Street 4 on PC | Table 2: Article with HTML formatting <h1>Download FIFA Street 4 PC: How to Play the Ultimate Street Soccer Game on Your Computer</h1>
|
3 |
-
<p>If you are a fan of soccer games, you have probably heard of or played FIFA Street 4, also known as FIFA Street 2012. It is a spin-off of the popular FIFA series that focuses on street soccer, where you can showcase your skills, style, and creativity in various urban locations around the world. It features over 50 teams, over 35 locations, over 500 players, and a variety of modes, such as World Tour, Hit the Streets, Freestyle, Last Man Standing, Panna Rules, Futsal, and Custom Match.</p>
|
4 |
-
<h2>download fifa street 4 pc</h2><br /><p><b><b>Download</b> → <a href="https://jinyurl.com/2uNSRF">https://jinyurl.com/2uNSRF</a></b></p><br /><br />
|
5 |
-
<p>FIFA Street 4 was originally released for PlayStation 3 and Xbox 360 in March 2012. It received positive reviews from critics and players alike, who praised its gameplay, graphics, sound, customization, and online features. It was also a commercial success, selling over one million copies worldwide.</p>
|
6 |
-
<p>But what if you want to play FIFA Street 4 on your PC? Unfortunately, there is no official PC version of the game. However, there are some ways to download and install FIFA Street 4 on your computer using different methods. In this article, we will show you how to do that step by step. We will also give you some tips and tricks on how to optimize your FIFA Street 4 experience on PC.</p>
|
7 |
-
<h2>Requirements and Recommendations</h2>
|
8 |
-
<p>Before you download and install FIFA Street 4 on your PC, you need to make sure that your computer meets the minimum and recommended system requirements for running the game smoothly. You also need to choose the best emulator for playing FIFA Street 4 on your PC. And finally, you need to decide which controller you want to use for enjoying FIFA Street 4 gameplay.</p>
|
9 |
-
<h3>Minimum and Recommended System Requirements</h3>
|
10 |
-
<p>The minimum and recommended system requirements for running FIFA Street 4 on your PC are as follows: - Minimum System Requirements: - CPU: Intel Core 2 Duo 2.0 GHz or higher - RAM: 2 GB - VIDEO CARD: DirectX 9.0c/Shader3.0 or higher compatible, NVIDIA GeForce 8600 series or higher, ATI Radeon (TM) X 1900 or higher, VRAM :512MB or higher - Recommended System Requirements: - CPU: Intel Core i5 or higher - RAM: 4 GB or more - VIDEO CARD: DirectX 11 or higher compatible, NVIDIA GeForce GTX 1050 or higher, AMD Radeon RX 560 or higher, VRAM :2GB or more These system requirements are based on the ones for Street Fighter IV, which is a similar game in terms of graphics and gameplay. However, since FIFA Street 4 is not officially supported on PC, you may encounter some issues or errors depending on your hardware and software configuration. <h3>Best Emulators for FIFA Street 4</h3>
|
11 |
-
An emulator is a software that allows you to run games or applications that are designed for a different platform on your PC. For example, you can use an emulator to play PlayStation 3 or Xbox 360 games on your PC. There are many emulators available for different platforms, but not all of them are compatible with FIFA Street 4. Here are some of the best emulators that can run FIFA Street 4 on PC: - RPCS3: This is an open source emulator for PlayStation 3 games. It is one of the most advanced and stable emulators for PS3, and it can run many games at high resolution and frame rate. It also supports online features, custom settings, and controller mapping. You can download RPCS3 from its official website or from its GitHub page . You will also need a PS3 BIOS file and a FIFA Street 4 ISO file to run the game on RPCS3. - Xenia: This is an open source emulator for Xbox 360 games. It is still in development and has some limitations and bugs, but it can run some games at decent performance and quality. It also supports online features, custom settings, and controller mapping. You can download Xenia from its official website or from its GitHub page . You will also need an Xbox 360 BIOS file and a FIFA Street 4 ISO file to run the game on Xenia. - PCSX2: This is an open source emulator for PlayStation 2 games. It is one of the most popular and reliable emulators for PS2, and it can run many games at high resolution and frame rate. It also supports online features, custom settings, and controller mapping. You can download PCSX2 from its official website or from its GitHub page . You will also need a PS2 BIOS file and a FIFA Street ISO file to run the game on PCSX2. Each emulator has its own advantages and disadvantages, so you may want to try them out and see which one works best for you. You can also check out some YouTube videos that show how to download, install, and configure each emulator for FIFA Street 4. For example, you can watch this video for RPCS3, this video for Xenia, and this video for PCSX2. <h3>Best Controllers for FIFA Street 4</h3>
|
12 |
-
A controller is a device that allows you to control the game using buttons, sticks, triggers, and vibration. A controller can enhance your FIFA Street 4 gameplay by giving you more precision, comfort, and feedback. There are many controllers available for different platforms, but not all of them are compatible with FIFA Street 4 on PC. Here are some of the best controllers that can work with FIFA Street 4 on PC: - Xbox One Controller: This is the official controller for Xbox One consoles. It is one of the most widely used and supported controllers for PC gaming, as it has native compatibility with Windows 10 and many games and emulators. It has a sleek design, ergonomic grip, responsive buttons, analog sticks, triggers, bumpers, D-pad, and vibration motors. It also has a headphone jack, a micro USB port, and a wireless adapter. You can connect it to your PC via USB cable or Bluetooth. - DualShock 4 Controller: This is the official controller for PlayStation 4 consoles. It is another popular and versatile controller for PC gaming, as it has native compatibility with Steam and some games and emulators. It has a stylish design, comfortable grip, responsive buttons, analog sticks, triggers, bumpers, D-pad, and vibration motors. It also has a touchpad, a light bar, a speaker, a headphone jack, a micro USB port, and a wireless adapter. You can connect it to your PC via USB cable or Bluetooth. - Logitech F310 Controller: This is a wired controller for PC gaming. It is one of the most affordable and reliable controllers for PC gaming, as it has native compatibility with Windows and many games and emulators. It has a classic design, durable grip, responsive buttons, analog sticks, triggers, bumpers, D-pad, and vibration motors. It also has a mode switch, a back button, and a start button. You can connect it to your PC via USB cable. - 8BitDo SN30 Pro Controller: This is a wireless controller for PC gaming. It is one of the most retro and stylish controllers for PC gaming, as it has native compatibility with Windows, Android, macOS, Steam, Switch, and Raspberry Pi. It has a nostalgic design, comfortable grip, responsive buttons, analog sticks, triggers, bumpers, D-pad, and vibration motors. It also has a turbo function, a screenshot button, a home button, and a star button. You can connect it to your PC via USB cable or Bluetooth. Each controller has its own advantages and disadvantages, so you may want to try them out and see which one suits your preference and budget. You can also check out some YouTube videos that show how to connect and configure each controller for FIFA Street 4 on PC. For example, you can watch this video for Xbox One Controller, this video for DualShock 4 Controller, this video for Logitech F310 Controller, and this video for 8BitDo SN30 Pro Controller. <h2>Methods and Steps</h2>
|
13 |
-
<p>Now that you have checked the system requirements, chosen the emulator, and decided the controller for playing FIFA Street 4 on PC, you are ready to download and install the game on your computer. There are several methods to do that using different sources and options. In this section, we will show you how to download and install FIFA Street 4 on PC using five different methods:</p>
|
14 |
-
<p>How to download fifa street 4 pc free full version highly compressed<br />
|
15 |
-
Fifa street 4 pc performance test on rpcs3 ps3 emulator<br />
|
16 |
-
Fifa street 4 installer v.3.1 pc download from 4shared<br />
|
17 |
-
Fifa street 4 pc system requirements and installation guide<br />
|
18 |
-
Fifa street 4 pc gameplay and review<br />
|
19 |
-
Fifa street 4 pc cheats and tips<br />
|
20 |
-
Fifa street 4 pc best teams and players<br />
|
21 |
-
Fifa street 4 pc online multiplayer mode<br />
|
22 |
-
Fifa street 4 pc mods and patches<br />
|
23 |
-
Fifa street 4 pc vs ps3 vs xbox 360 comparison<br />
|
24 |
-
Fifa street 4 pc download torrent link<br />
|
25 |
-
Fifa street 4 pc crack and serial key<br />
|
26 |
-
Fifa street 4 pc controller support and settings<br />
|
27 |
-
Fifa street 4 pc graphics and sound quality<br />
|
28 |
-
Fifa street 4 pc features and modes<br />
|
29 |
-
Fifa street 4 pc download size and speed<br />
|
30 |
-
Fifa street 4 pc problems and solutions<br />
|
31 |
-
Fifa street 4 pc demo and trial version<br />
|
32 |
-
Fifa street 4 pc release date and price<br />
|
33 |
-
Fifa street 4 pc official website and forum<br />
|
34 |
-
Fifa street 4 pc screenshots and videos<br />
|
35 |
-
Fifa street 4 pc news and updates<br />
|
36 |
-
Fifa street 4 pc ratings and reviews<br />
|
37 |
-
Fifa street 4 pc download from steam or origin<br />
|
38 |
-
Fifa street 4 pc minimum and recommended specs<br />
|
39 |
-
Fifa street 4 pc world tour and tournaments<br />
|
40 |
-
Fifa street 4 pc legends and unlockables<br />
|
41 |
-
Fifa street 4 pc customizations and options<br />
|
42 |
-
Fifa street 4 pc tricks and skills<br />
|
43 |
-
Fifa street 4 pc fun and challenges</p>
|
44 |
-
- Method 1: Download FIFA Street 4 from Steam - Method 2: Download FIFA Street 4 from Origin - Method 3: Download FIFA Street 4 from a Torrent Site - Method 4: Download FIFA Street 4 from a ROM Site - Method 5: Download FIFA Street 4 from a Mod Site <p>Each method has its own pros and cons, so you may want to choose the one that works best for you. However, we recommend that you use the official and legal methods (Method 1 and Method 2) as much as possible to avoid any potential risks or issues.</p>
|
45 |
-
<h3>Method 1: Download FIFA Street 4 from Steam</h3>
|
46 |
-
<p>Steam is the most popular and convenient platform for downloading and playing PC games. It offers thousands of games across various genres and categories at reasonable prices. It also provides various features such as cloud saving, achievements, friends, reviews, and community. You can download FIFA Street 4 from Steam by following these steps: - Step 1: Create a Steam account or log in to your existing account. You can do this by visiting the Steam website or by downloading the Steam client and installing it on your PC. - Step 2: Search for FIFA Street 4 on the Steam store or click on this link to go directly to the game page. You will see the game details, screenshots, videos, reviews, and system requirements. - Step 3: Click on the "Add to Cart" button to purchase the game. You will need to pay $19.99 USD or equivalent in your currency. You can use various payment methods such as credit card, debit card, PayPal, Steam Wallet, or gift card. - Step 4: After completing the payment, click on the "Library" tab on the Steam client. You will see FIFA Street 4 in your list of games. Click on it to start downloading and installing the game on your PC. - Step 5: Once the download and installation are finished, click on the "Play" button to launch the game. You will need to log in to your EA account or create a new one to access the online features of the game. This method is the easiest and most convenient way to get FIFA Street 4 on PC. However, it also has some drawbacks, such as: - You need to have a stable internet connection and enough disk space to download and install the game. - You need to pay for the game and agree to the terms and conditions of Steam and EA. - You need to have a compatible emulator and controller to play the game on PC. - You may encounter some compatibility or performance issues depending on your system configuration and emulator settings. <h3>Method 2: Download FIFA Street 4 from Origin</h3>
|
47 |
-
Origin is another popular and reliable platform for downloading and playing PC games. It is owned by EA, the publisher of FIFA Street 4. It offers many EA games across various genres and categories at reasonable prices. It also provides various features such as cloud saving, achievements, friends, reviews, and community. You can download FIFA Street 4 from Origin by following these steps: - Step 1: Create an Origin account or log in to your existing account. You can do this by visiting the Origin website or by downloading the Origin client and installing it on your PC. - Step 2: Search for FIFA Street 4 on the Origin store or click on this link to go directly to the game page. You will see the game details, screenshots, videos, reviews, and system requirements. - Step 3: Click on the "Buy Now" button to purchase the game. You will need to pay $19.99 USD or equivalent in your currency. You can use various payment methods such as credit card, debit card, PayPal, Origin Wallet, or gift card. - Step 4: After completing the payment, click on the "My Game Library" tab on the Origin client. You will see FIFA Street 4 in your list of games. Click on it to start downloading and installing the game on your PC. - Step 5: Once the download and installation are finished, click on the "Play" button to launch the game. You will need to log in to your EA account or create a new one to access the online features of the game. This method is another official and reliable way to get FIFA Street 4 on PC. However, it also has some drawbacks, such as: - You need to have a stable internet connection and enough disk space to download and install the game. - You need to pay for the game and agree to the terms and conditions of Origin and EA. - You need to have a compatible emulator and controller to play the game on PC. - You may encounter some compatibility or performance issues depending on your system configuration and emulator settings. <h3>Method 3: Download FIFA Street 4 from a Torrent Site</h3>
|
48 |
-
A torrent site is a website that allows you to download files from other users using a peer-to-peer network. A torrent file is a small file that contains information about the files that you want to download, such as the name, size, type, and location. A torrent client is a software that allows you to open and download the files from the torrent file using the peer-to-peer network. There are many torrent sites and torrent clients available for different platforms, but not all of them are safe and legal. Here are some of the best torrent sites and torrent clients that can help you download FIFA Street 4 on PC: - Torrent Site: The Pirate Bay: This is one of the most popular and notorious torrent sites in the world. It offers millions of torrent files across various categories and genres, including games, movies, music, software, and more. It also has a simple and user-friendly interface, a search engine, a comment section, and a rating system. You can access The Pirate Bay by visiting its official website or by using a proxy or a VPN service. - Torrent Client: uTorrent: This is one of the most widely used and trusted torrent clients for PC gaming. It is a lightweight and powerful software that allows you to download and manage your torrent files easily and efficiently. It also supports various features such as magnet links, streaming, bandwidth control, encryption, remote access, and more. You can download uTorrent from its official website or from its GitHub page . You can download FIFA Street 4 from a torrent site by following these steps: - Step 1: Open your web browser and go to The Pirate Bay website or use a proxy or a VPN service to access it. - Step 2: Search for FIFA Street 4 on the search bar or click on this link to go directly to the game page. You will see the game details, screenshots, videos, comments, and ratings. - Step 3: Click on the "Get this torrent" button to download the torrent file of FIFA Street 4. You will need to choose a location to save the file on your PC. - Step 4: Open your torrent client and add the torrent file of FIFA Street 4. You will see the game files that you want to download, such as the ISO file, the crack file, the readme file, and more. - Step 5: Start downloading the game files from the peer-to-peer network. You will need to have a stable internet connection and enough disk space to download the game files. - Step 6: Once the download is finished, open the ISO file of FIFA Street 4 using a software such as WinRAR or Daemon Tools. You will see the game folder that contains the setup file, the crack file, the readme file, and more. - Step 7: Run the setup file of FIFA Street 4 and follow the instructions to install the game on your PC. You will need to choose a location to install the game on your PC. - Step 8: Copy the crack file of FIFA Street 4 from the ISO file and paste it into the game folder where you installed the game on your PC. This will overwrite the original game file and allow you to play the game without any restrictions. - Step 9: Run the game file of FIFA Street 4 from the game folder where you installed the game on your PC. You will need to log in to your EA account or create a new one to access the online features of the game. This method is a risky but possible way to get FIFA Street 4 on PC for free. However, it also has some drawbacks, such as: - You need to have a stable internet connection and enough disk space to download and install the game files. - You may encounter some legal or ethical issues for downloading and using pirated or cracked games. - You may expose your PC to viruses, malware, or spyware that can harm your system or data. - You may face some compatibility or performance issues depending on your system configuration and emulator settings. <h3>Method 4: Download FIFA Street 4 from a ROM Site</h3>
|
49 |
-
A ROM site is a website that allows you to download ROM files of games that are designed for a different platform. A ROM file is a file that contains the data of a game that can be read by an emulator. There are many ROM sites available for different platforms, but not all of them are safe and legal. Here are some of the best ROM sites that can help you download FIFA Street 4 on PC: - ROM Site: CoolROM: This is one of the most popular and trusted ROM sites in the world. It offers thousands of ROM files across various platforms and genres, including PlayStation 3, Xbox 360, PlayStation 2, and more. It also has a simple and user-friendly interface, a search engine, a comment section, and a rating system. You can access CoolROM by visiting its official website or by using a proxy or a VPN service. - ROM Site: EmuParadise: This is another popular and reliable ROM site in the world. It offers thousands of ROM files across various platforms and genres, including PlayStation 3, Xbox 360, PlayStation 2, and more. It also has a simple and user-friendly interface, a search engine, a comment section, and a rating system. You can access EmuParadise by visiting its official website or by using a proxy or a VPN service. You can download FIFA Street 4 from a ROM site by following these steps: - Step 1: Open your web browser and go to CoolROM or EmuParadise website or use a proxy or a VPN service to access it. - Step 2: Search for FIFA Street 4 on the search bar or click on this link for CoolROM or this link for EmuParadise to go directly to the game page. You will see the game details, screenshots, videos, comments, and ratings. - Step 3: Click on the "Download Now" button to download the ROM file of FIFA Street 4. You will need to choose a location to save the file on your PC. - Step 4: Open your emulator and add the ROM file of FIFA Street 4. You will see the game files that you want to play, such as the ISO file, the readme file, and more. - Step 5: Start playing the game from your emulator. You will need to log in to your EA account or create a new one to access the online features of the game. This method is a similar but safer way to get FIFA Street 4 on PC for free. However, it also has some drawbacks, such as: - You need to have a stable internet connection and enough disk space to download and play the game files. - You may encounter some legal or ethical issues for downloading and using ROM files of games that are not in the public domain or that you do not own. - You may expose your PC to viruses, malware, or spyware that can harm your system or data. - You may face some compatibility or performance issues depending on your system configuration and emulator settings. <h3>Method 5: Download FIFA Street 4 from a Mod Site</h3>
|
50 |
-
A mod site is a website that allows you to download mod files of games that are modified or enhanced by other users. A mod file is a file that contains the data of a game that can be changed by an emulator or a software. There are many mod sites available for different platforms, but not all of them are safe and legal. Here are some of the best mod sites that can help you download FIFA Street 4 on PC: - Mod Site: Nexus Mods: This is one of the most popular and trusted mod sites in the world. It offers thousands of mod files across various platforms and genres, including PC, PlayStation, Xbox, Nintendo, and more. It also has a simple and user-friendly interface, a search engine, a comment section, and a rating system. You can access Nexus Mods by visiting its official website or by using a proxy or a VPN service. - Mod Site: Mod DB: This is another popular and reliable mod site in the world. It offers thousands of mod files across various platforms and genres, including PC, PlayStation, Xbox, Nintendo, and more. It also has a simple and user-friendly interface, a search engine, a comment section, and a rating system. You can access Mod DB by visiting its official website or by using a proxy or a VPN service. You can download FIFA Street 4 from a mod site by following these steps: - Step 1: Open your web browser and go to Nexus Mods or Mod DB website or use a proxy or a VPN service to access it. - Step 2: Search for FIFA Street 4 on the search bar or click on this link for Nexus Mods or this link for Mod DB to go directly to the game page. You will see the game details, screenshots, videos, comments, and ratings. - Step 3: Click on the "Download" button to download the mod file of FIFA Street 4. You will need to choose a location to save the file on your PC. - Step 4: Open your emulator or software and add the mod file of FIFA Street 4. You will see the game files that you want to play, such as the ISO file, the readme file, and more. - Step 5: Start playing the game from your emulator or software. You will need to log in to your EA account or create a new one to access the online features of the game. This method is a creative and fun way to get FIFA Street 4 on PC with extra features. However, it also has some drawbacks, such as: - You need to have a stable internet connection and enough disk space to download and play the game files. - You may encounter some legal or ethical issues for downloading and using mod files of games that are not authorized or approved by the original developers or publishers. - You may expose your PC to viruses, malware, or spyware that can harm your system or data. - You may face some compatibility or performance issues depending on your system configuration and emulator or software settings. <h2>Tips and Tricks</h2>
|
51 |
-
<p>Now that you have downloaded and installed FIFA Street 4 on your PC using one of the methods above, you are ready to enjoy the ultimate street soccer game on your computer. However, you may want to optimize your FIFA Street 4 experience on PC by adjusting some settings and customizing some features. In this section, we will give you some tips and tricks on how to do that:</p>
|
52 |
-
- How to Configure Your Emulator Settings for FIFA Street 4 - How to Customize Your Controller Settings for FIFA Street 4 - How to Access the Online Features of FIFA Street 4 on PC <h3>How to Configure Your Emulator Settings for FIFA Street 4</h3>
|
53 |
-
<p>An emulator is a software that allows you to run games or applications that are designed for a different platform on your PC. For example, you can use an emulator to play PlayStation 3 or Xbox 360 games on your PC. However, an emulator may not run the game perfectly by default, and you may need to configure some settings to improve the graphics, audio, input, and performance of the game. Here are some steps on how to configure your emulator settings for FIFA Street 4:</p>
|
54 |
-
- Step 1: Open your emulator and go to the settings menu. You will see different options and tabs for various settings, such as graphics, audio, input, and performance. - Step 2: Adjust the graphics settings according to your preference and system capability. You can change the resolution, aspect ratio, frame rate, anti-aliasing, texture filtering, shaders, and more. You can also enable or disable fullscreen mode, vsync, and windowed mode. The higher the graphics settings, the better the game will look, but the more demanding it will be on your system. - Step 3: Adjust the audio settings according to your preference and system capability. You can change the volume, output device, sample rate, latency, and more. You can also enable or disable sound effects, music, voice, and subtitles. The higher the audio settings, the better the game will sound, but the more demanding it will be on your system. - Step 4: Adjust the input settings according to your preference and controller type. You can map the buttons, sticks, triggers, and vibration of your controller to match the game controls. You can also enable or disable analog mode, dead zone, sensitivity, and rumble. The more accurate the input settings, the better the game will respond, but the more complex it will be to set up. - Step 5: Adjust the performance settings according to your preference and system capability. You can change the emulation speed, CPU mode, GPU mode, cache size, and more. You can also enable or disable hacks, cheats, patches, and logs. The higher the performance settings, the faster the game will run, but the more unstable it will be on your system. You can also use some presets or profiles that are already configured for FIFA Street 4 by other users or developers. You can find them on the emulator website, forum, or wiki. You can also save your own settings and load them later. <h3>How to Customize Your Controller Settings for FIFA Street 4</h3>
|
55 |
-
<p>A controller is a device that allows you to control the game using buttons, sticks, triggers, and vibration. A controller can enhance your FIFA Street 4 gameplay by giving you more precision, comfort, and feedback. However, a controller may not work well with the game by default, and you may need to customize some settings to improve the mapping, sensitivity, and vibration of the controller. Here are some steps on how to customize your controller settings for FIFA Street 4:</p>
|
56 |
-
- Step 1: Open your emulator and go to the input menu. You will see different options and tabs for various input devices, such as keyboard, mouse, joystick, gamepad, and more. - Step 2: Choose the input device that you want to use for playing FIFA Street 4. You can use a keyboard and mouse, but we recommend using a controller for better gameplay. - Step 3: Configure the mapping of your controller according to your preference and game controls. You can assign the buttons, sticks, triggers, and vibration of your controller to match the actions, movements, skills, and feedback of the game. You can also enable or disable analog mode, dead zone, sensitivity, and rumble. - Step 4: Test your controller settings by playing a practice match or a tutorial in FIFA Street 4. You can check if the controller works properly and if you are comfortable with the settings. You can also adjust the settings as you play until you find the best configuration for you. - Step 5: Save your controller settings and load them later. You can also use some presets or profiles that are already configured for FIFA Street 4 by other users or developers. You can find them on the emulator website, forum, or wiki. <h3>How to Access the Online Features of FIFA Street 4 on PC</h3>
|
57 |
-
<p>One of the best features of FIFA Street 4 is its online mode, where you can connect to the EA servers, play online matches, join tournaments, and unlock rewards. However, accessing the online features of FIFA Street 4 on PC may not be easy or possible depending on your method and emulator. Here are some tips on how to access the online features of FIFA Street 4 on PC:</p>
|
58 |
-
- Tip 1: Use an official and legal method (Method 1 or Method 2) to download and install FIFA Street 4 on PC. This will ensure that you have a valid copy of the game and that you can log in to your EA account without any issues. - Tip 2: Use a compatible and stable emulator (RPCS3 or Xenia) to play FIFA Street 4 on PC. This will ensure that you can run the game smoothly and that you can connect to the EA servers without any errors. - Tip 3: Use a reliable and secure internet connection to play FIFA Street 4 on PC. This will ensure that you can play online matches without any lag or disconnection. - Tip 4: Use a VPN service to play FIFA Street 4 on PC. This will ensure that you can bypass any regional or network restrictions that may prevent you from accessing the online features of the game. - Tip 5: Use a mod file or a patch file to play FIFA Street 4 on PC. This will ensure that you can unlock some extra features or fix some bugs that may affect the online mode of the game. <h2>Conclusion</h2>
|
59 |
-
<p>FIFA Street 4 is one of the best soccer games ever made. It offers a unique and exciting street soccer experience that showcases your skills, style, and creativity in various urban locations around the world. It features over 50 teams, over 35 locations, over 500 players, and a variety of modes, such as World Tour, Hit the Streets, Freestyle, Last Man Standing, Panna Rules, Futsal, and Custom Match.</p>
|
60 |
-
<p>However, if you want to play FIFA Street 4 on your PC, you may face some challenges, as there is no official PC version of the game. But don't worry, we have got you covered. In this article, we have shown you how to download and install FIFA Street 4 on PC using five different methods:</p>
|
61 |
-
- Method 1: Download FIFA Street 4 from Steam - Method 2: Download FIFA Street 4 from Origin - Method 3: Download FIFA Street 4 from a Torrent Site - Method 4: Download FIFA Street 4 from a ROM Site - Method 5: Download FIFA Street 4 from a Mod Site <p>We have also given you some tips and tricks on how to optimize your FIFA Street 4 experience on PC by configuring your emulator settings, customizing your controller settings, and accessing the online features of the game.</p>
|
62 |
-
<p>We hope that this article has helped you to play FIFA Street 4 on PC and enjoy the ultimate street soccer game on your computer. If you have any questions or feedback, please feel free to leave a comment below. And if you liked this article, please share it with your friends and family who are also fans of soccer games.</p>
|
63 |
-
<h2>FAQs</h2>
|
64 |
-
<p>Here are some of the frequently asked questions about FIFA Street 4 on PC:</p>
|
65 |
-
<h3>Q: Is FIFA Street 4 available for PC?</h3>
|
66 |
-
<p>A: No, FIFA Street 4 is not officially available for PC. It was only released for PlayStation 3 and Xbox 360 in March 2012. However, you can use some methods and emulators to play FIFA Street 4 on PC.</p>
|
67 |
-
<h3>Q: What is the best method to download and install FIFA Street 4 on PC?</h3>
|
68 |
-
<p>A: The best method to download and install FIFA Street 4 on PC depends on your preference and situation. However, we recommend that you use the official and legal methods (Method 1 and Method 2) as much as possible to avoid any potential risks or issues.</p>
|
69 |
-
<h3>Q: What is the best emulator to play FIFA Street 4 on PC?</h3>
|
70 |
-
<p>A: The best emulator to play FIFA Street 4 on PC depends on your system configuration and performance. However, we recommend that you use RPCS3 or Xenia as they are the most compatible and stable emulators for PlayStation 3 and Xbox 360 games.</p>
|
71 |
-
<h3>Q: What is the best controller to play FIFA Street 4 on PC?</h3>
|
72 |
-
<p>A: The best controller to play FIFA Street 4 on PC depends on your preference and budget. However, we recommend that you use Xbox One Controller or DualShock 4 Controller as they are the most widely used and supported controllers for PC gaming.</p>
|
73 |
-
<h3>Q: How can I access the online features of FIFA Street 4 on PC?</h3>
|
74 |
-
<p>A: You can access the online features of FIFA Street 4 on PC by using an official and legal method (Method 1 or Method 2) to download and install the game, using a compatible and stable emulator (RPCS3 or Xenia) to play the game, using a reliable and secure internet connection to connect to the EA servers, using a VPN service to bypass any regional or network restrictions, and using a mod file or a patch file to unlock some extra features or fix some bugs.</p> 401be4b1e0<br />
|
75 |
-
<br />
|
76 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/3B-Group/ConvRe-Leaderboard/app.py
DELETED
@@ -1,237 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import pandas as pd
|
3 |
-
|
4 |
-
from src.css_html import custom_css
|
5 |
-
from src.utils import (
|
6 |
-
AutoEvalColumn,
|
7 |
-
fields,
|
8 |
-
make_clickable_names,
|
9 |
-
make_plot_data
|
10 |
-
)
|
11 |
-
from src.demo import (
|
12 |
-
generate,
|
13 |
-
random_examples,
|
14 |
-
return_ground_truth,
|
15 |
-
)
|
16 |
-
|
17 |
-
|
18 |
-
DEFAULT_SYSTEM_PROMPT = "You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."
|
19 |
-
MAX_MAX_NEW_TOKENS = 1024
|
20 |
-
DEFAULT_MAX_NEW_TOKENS = 512
|
21 |
-
|
22 |
-
|
23 |
-
df = pd.read_csv("data/eval_board.csv")
|
24 |
-
|
25 |
-
COLS = [c.name for c in fields(AutoEvalColumn)]
|
26 |
-
TYPES = [c.type for c in fields(AutoEvalColumn)]
|
27 |
-
COLS_LITE = [c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.hidden]
|
28 |
-
TYPES_LITE = [c.type for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.hidden]
|
29 |
-
|
30 |
-
|
31 |
-
def add_new_eval(
|
32 |
-
model: str,
|
33 |
-
re2text_easy_precision: str,
|
34 |
-
re2text_hard_precision: str,
|
35 |
-
text2re_easy_precision: str,
|
36 |
-
text2re_hard_precision: str,
|
37 |
-
links: str,
|
38 |
-
):
|
39 |
-
print("adding new eval")
|
40 |
-
|
41 |
-
eval_entry = {
|
42 |
-
"model": model,
|
43 |
-
"re2text_easy": re2text_easy_precision,
|
44 |
-
"re2text_hard": re2text_hard_precision,
|
45 |
-
"text2re_easy": text2re_easy_precision,
|
46 |
-
"text2re_hard": text2re_hard_precision,
|
47 |
-
"link": links
|
48 |
-
}
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
def select_columns(df, columns):
|
53 |
-
always_here_cols = [
|
54 |
-
AutoEvalColumn.model.name
|
55 |
-
]
|
56 |
-
# We use COLS to maintain sorting
|
57 |
-
filtered_df = df[
|
58 |
-
always_here_cols + [c for c in COLS if c in df.columns and c in columns]
|
59 |
-
]
|
60 |
-
return filtered_df
|
61 |
-
|
62 |
-
|
63 |
-
df["pure_name"] = df['Models']
|
64 |
-
df = make_clickable_names(df)
|
65 |
-
demo = gr.Blocks(css=custom_css)
|
66 |
-
|
67 |
-
with demo:
|
68 |
-
with gr.Row():
|
69 |
-
gr.Markdown(
|
70 |
-
"""<div align= "center">
|
71 |
-
<h1>🤖 ConvRe 🤯 <span style='color: #e6b800;'> Leaderboard</span></h1>
|
72 |
-
</div>
|
73 |
-
|
74 |
-
""",
|
75 |
-
elem_classes="markdown-text",
|
76 |
-
)
|
77 |
-
|
78 |
-
gr.Markdown("""🤖**ConvRe**🤯 is the benchmark proposed in our EMNLP 2023 main conference paper: [An Investigation of LLMs’ Inefficacy in Understanding Converse Relations](https://arxiv.org/abs/2310.05163).
|
79 |
-
It aims to evaluate LLMs' ability on understanding converse relations.
|
80 |
-
Converse relation is defined as the opposite of semantic relation while keeping the surface form of the triple unchanged.
|
81 |
-
For example, the triple `(x, has part, y)` is interpreted as "x has a part called y" in normal relation, while "y has a part called x" in converse relation 🔁.
|
82 |
-
|
83 |
-
The experiments in our paper suggested that LLMs often resort to shortcut learning (or superficial correlations) and still face challenges on our 🤖ConvRe🤯 benchmark even for powerful models like GPT-4.
|
84 |
-
""", elem_classes="markdown-text")
|
85 |
-
|
86 |
-
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
87 |
-
with gr.TabItem("🔢 Data", id=0):
|
88 |
-
with gr.Accordion("➡️ See All Columns", open=False):
|
89 |
-
shown_columns = gr.CheckboxGroup(
|
90 |
-
choices=[
|
91 |
-
c for c in COLS if c not in [AutoEvalColumn.model.name]
|
92 |
-
],
|
93 |
-
value=[
|
94 |
-
c for c in COLS_LITE if c not in [AutoEvalColumn.model.name]
|
95 |
-
],
|
96 |
-
label="",
|
97 |
-
elem_id="column-select",
|
98 |
-
interactive=True
|
99 |
-
)
|
100 |
-
leaderboard_df_re2text = gr.components.Dataframe(
|
101 |
-
value=df[
|
102 |
-
[
|
103 |
-
AutoEvalColumn.model.name,
|
104 |
-
] + shown_columns.value
|
105 |
-
],
|
106 |
-
headers=[
|
107 |
-
AutoEvalColumn.model.name,
|
108 |
-
] + shown_columns.value,
|
109 |
-
datatype=TYPES,
|
110 |
-
elem_id="leaderboard-table",
|
111 |
-
interactive=False,
|
112 |
-
)
|
113 |
-
|
114 |
-
hidden_leaderboard_df_re2text = gr.components.DataFrame(
|
115 |
-
value=df,
|
116 |
-
headers=COLS,
|
117 |
-
datatype=["str" for _ in range(len(COLS))],
|
118 |
-
visible=False,
|
119 |
-
)
|
120 |
-
|
121 |
-
shown_columns.change(
|
122 |
-
select_columns,
|
123 |
-
[hidden_leaderboard_df_re2text, shown_columns],
|
124 |
-
leaderboard_df_re2text
|
125 |
-
)
|
126 |
-
|
127 |
-
with gr.TabItem("📊 Plot", id=1):
|
128 |
-
with gr.Row():
|
129 |
-
with gr.Column():
|
130 |
-
gr.LinePlot(
|
131 |
-
make_plot_data(df, task="Re2Text"),
|
132 |
-
x="Setting",
|
133 |
-
y="Accuracy",
|
134 |
-
color="Symbol",
|
135 |
-
title="Re2Text",
|
136 |
-
y_lim=[0, 100],
|
137 |
-
x_label_angle=0,
|
138 |
-
height=400,
|
139 |
-
width=500,
|
140 |
-
)
|
141 |
-
|
142 |
-
with gr.Column():
|
143 |
-
gr.LinePlot(
|
144 |
-
make_plot_data(df, task="Text2Re"),
|
145 |
-
x="Setting",
|
146 |
-
y="Accuracy",
|
147 |
-
color="Symbol",
|
148 |
-
title="Text2Re",
|
149 |
-
y_lim=[0, 100],
|
150 |
-
x_label_angle=0,
|
151 |
-
height=400,
|
152 |
-
width=500,
|
153 |
-
)
|
154 |
-
|
155 |
-
with gr.TabItem("Submit results 🚀", id=3):
|
156 |
-
gr.Markdown("""<div align= "center">
|
157 |
-
<h1>Comming Soon ❤️</span></h1>
|
158 |
-
</div>
|
159 |
-
|
160 |
-
""")
|
161 |
-
|
162 |
-
with gr.Column():
|
163 |
-
gr.Markdown(
|
164 |
-
"""<div style="text-align: center;"><h1> 🤖ConvRe🤯 Demo (Llama-2-Chat-7B🦙) </h1></div>\
|
165 |
-
<br>\
|
166 |
-
""",
|
167 |
-
elem_classes="markdown-text",
|
168 |
-
)
|
169 |
-
|
170 |
-
output_box = gr.Textbox(lines=10, max_lines=10, label="Llama-2-Chat-7B Answer", interactive=False)
|
171 |
-
|
172 |
-
input_box = gr.Textbox(lines=12, max_lines=12, label="User Input")
|
173 |
-
|
174 |
-
ground_truth_display = gr.Textbox("", lines=1, max_lines=1, label="😊Correct Answer😊", interactive=False)
|
175 |
-
|
176 |
-
with gr.Column():
|
177 |
-
|
178 |
-
|
179 |
-
with gr.Accordion("Additional Inputs", open=False):
|
180 |
-
sys_prompt = gr.Textbox(label="System prompt", value=DEFAULT_SYSTEM_PROMPT, lines=6)
|
181 |
-
|
182 |
-
max_new_tokens=gr.Slider(
|
183 |
-
label="Max new tokens",
|
184 |
-
minimum=1,
|
185 |
-
maximum=MAX_MAX_NEW_TOKENS,
|
186 |
-
step=1,
|
187 |
-
value=DEFAULT_MAX_NEW_TOKENS,
|
188 |
-
)
|
189 |
-
|
190 |
-
temperature = gr.Slider(
|
191 |
-
label="Temperature",
|
192 |
-
minimum=0.1,
|
193 |
-
maximum=4.0,
|
194 |
-
step=0.1,
|
195 |
-
value=0.1,
|
196 |
-
)
|
197 |
-
|
198 |
-
|
199 |
-
with gr.Row():
|
200 |
-
re2text_easy_btn = gr.Button("Random Re2Text Easy Example 😄")
|
201 |
-
re2text_easy_btn.click(
|
202 |
-
fn=random_examples,
|
203 |
-
inputs=gr.Text("re2text-easy", visible=False),
|
204 |
-
outputs = input_box,
|
205 |
-
)
|
206 |
-
|
207 |
-
re2text_hard_btn = gr.Button("Random Re2Text Hard Example 🤯")
|
208 |
-
re2text_hard_btn.click(
|
209 |
-
fn=random_examples,
|
210 |
-
inputs=gr.Text("re2text-hard", visible=False),
|
211 |
-
outputs=input_box,
|
212 |
-
)
|
213 |
-
|
214 |
-
text2re_easy_btn = gr.Button("Random Text2Re Easy Example 😄")
|
215 |
-
text2re_easy_btn.click(
|
216 |
-
fn=random_examples,
|
217 |
-
inputs=gr.Text("text2re-easy", visible=False),
|
218 |
-
outputs = input_box,
|
219 |
-
)
|
220 |
-
|
221 |
-
text2re_hard_btn = gr.Button("Random Text2Re Hard Example 🤯")
|
222 |
-
text2re_hard_btn.click(
|
223 |
-
fn=random_examples,
|
224 |
-
inputs=gr.Text("text2re-hard", visible=False),
|
225 |
-
outputs = input_box,
|
226 |
-
)
|
227 |
-
|
228 |
-
with gr.Row():
|
229 |
-
gr.ClearButton([input_box, output_box])
|
230 |
-
submit_btn = gr.Button("Submit🏃")
|
231 |
-
submit_btn.click(generate, inputs=[input_box, sys_prompt, temperature, max_new_tokens], outputs=[output_box])
|
232 |
-
|
233 |
-
answer_btn = gr.Button("Answer🤔")
|
234 |
-
answer_btn.click(return_ground_truth, inputs=[], outputs=[ground_truth_display])
|
235 |
-
|
236 |
-
|
237 |
-
demo.queue(max_size=32).launch(enable_queue=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/44ov41za8i/FreeVC/speaker_encoder/data_objects/speaker_verification_dataset.py
DELETED
@@ -1,56 +0,0 @@
|
|
1 |
-
from speaker_encoder.data_objects.random_cycler import RandomCycler
|
2 |
-
from speaker_encoder.data_objects.speaker_batch import SpeakerBatch
|
3 |
-
from speaker_encoder.data_objects.speaker import Speaker
|
4 |
-
from speaker_encoder.params_data import partials_n_frames
|
5 |
-
from torch.utils.data import Dataset, DataLoader
|
6 |
-
from pathlib import Path
|
7 |
-
|
8 |
-
# TODO: improve with a pool of speakers for data efficiency
|
9 |
-
|
10 |
-
class SpeakerVerificationDataset(Dataset):
|
11 |
-
def __init__(self, datasets_root: Path):
|
12 |
-
self.root = datasets_root
|
13 |
-
speaker_dirs = [f for f in self.root.glob("*") if f.is_dir()]
|
14 |
-
if len(speaker_dirs) == 0:
|
15 |
-
raise Exception("No speakers found. Make sure you are pointing to the directory "
|
16 |
-
"containing all preprocessed speaker directories.")
|
17 |
-
self.speakers = [Speaker(speaker_dir) for speaker_dir in speaker_dirs]
|
18 |
-
self.speaker_cycler = RandomCycler(self.speakers)
|
19 |
-
|
20 |
-
def __len__(self):
|
21 |
-
return int(1e10)
|
22 |
-
|
23 |
-
def __getitem__(self, index):
|
24 |
-
return next(self.speaker_cycler)
|
25 |
-
|
26 |
-
def get_logs(self):
|
27 |
-
log_string = ""
|
28 |
-
for log_fpath in self.root.glob("*.txt"):
|
29 |
-
with log_fpath.open("r") as log_file:
|
30 |
-
log_string += "".join(log_file.readlines())
|
31 |
-
return log_string
|
32 |
-
|
33 |
-
|
34 |
-
class SpeakerVerificationDataLoader(DataLoader):
|
35 |
-
def __init__(self, dataset, speakers_per_batch, utterances_per_speaker, sampler=None,
|
36 |
-
batch_sampler=None, num_workers=0, pin_memory=False, timeout=0,
|
37 |
-
worker_init_fn=None):
|
38 |
-
self.utterances_per_speaker = utterances_per_speaker
|
39 |
-
|
40 |
-
super().__init__(
|
41 |
-
dataset=dataset,
|
42 |
-
batch_size=speakers_per_batch,
|
43 |
-
shuffle=False,
|
44 |
-
sampler=sampler,
|
45 |
-
batch_sampler=batch_sampler,
|
46 |
-
num_workers=num_workers,
|
47 |
-
collate_fn=self.collate,
|
48 |
-
pin_memory=pin_memory,
|
49 |
-
drop_last=False,
|
50 |
-
timeout=timeout,
|
51 |
-
worker_init_fn=worker_init_fn
|
52 |
-
)
|
53 |
-
|
54 |
-
def collate(self, speakers):
|
55 |
-
return SpeakerBatch(speakers, self.utterances_per_speaker, partials_n_frames)
|
56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/LazyImport.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
from importlib.util import find_spec, LazyLoader, module_from_spec
|
2 |
-
from sys import modules
|
3 |
-
|
4 |
-
def lazyload(name):
|
5 |
-
if name in modules:
|
6 |
-
return modules[name]
|
7 |
-
else:
|
8 |
-
spec = find_spec(name)
|
9 |
-
loader = LazyLoader(spec.loader)
|
10 |
-
module = module_from_spec(spec)
|
11 |
-
modules[name] = module
|
12 |
-
loader.exec_module(module)
|
13 |
-
return module
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Hobbyist/Hoyo-RVC/docs/faq.md
DELETED
@@ -1,89 +0,0 @@
|
|
1 |
-
## Q1:ffmpeg error/utf8 error.
|
2 |
-
|
3 |
-
大概率不是ffmpeg问题,而是音频路径问题;<br>
|
4 |
-
ffmpeg读取路径带空格、()等特殊符号,可能出现ffmpeg error;训练集音频带中文路径,在写入filelist.txt的时候可能出现utf8 error;<br>
|
5 |
-
|
6 |
-
## Q2:一键训练结束没有索引
|
7 |
-
|
8 |
-
显示"Training is done. The program is closed."则模型训练成功,后续紧邻的报错是假的;<br>
|
9 |
-
|
10 |
-
一键训练结束完成没有added开头的索引文件,可能是因为训练集太大卡住了添加索引的步骤;已通过批处理add索引解决内存add索引对内存需求过大的问题。临时可尝试再次点击"训练索引"按钮。<br>
|
11 |
-
|
12 |
-
## Q3:训练结束推理没看到训练集的音色
|
13 |
-
点刷新音色再看看,如果还没有看看训练有没有报错,控制台和webui的截图,logs/实验名下的log,都可以发给开发者看看。<br>
|
14 |
-
|
15 |
-
## Q4:如何分享模型
|
16 |
-
rvc_root/logs/实验名 下面存储的pth不是用来分享模型用来推理的,而是为了存储实验状态供复现,以及继续训练用的。用来分享的模型应该是weights文件夹下大小为60+MB的pth文件;<br>
|
17 |
-
后续将把weights/exp_name.pth和logs/exp_name/added_xxx.index合并打包成weights/exp_name.zip省去填写index的步骤,那么zip文件用来分享,不要分享pth文件,除非是想换机器继续训练;<br>
|
18 |
-
如果你把logs文件夹下的几百MB的pth文件复制/分享到weights文件夹下强行用于推理,可能会出现f0,tgt_sr等各种key不存在的报错。你需要用ckpt选项卡最下面,手工或自动(本地logs下如果能找到相关信息则会自动)选择是否携带音高、目标音频采样率的选项后进行ckpt小模型提取(输入路径填G开头的那个),提取完在weights文件夹下会出现60+MB的pth文件,刷新音色后可以选择使用。<br>
|
19 |
-
|
20 |
-
## Q5:Connection Error.
|
21 |
-
也许你关闭了控制台(黑色窗口)。<br>
|
22 |
-
|
23 |
-
## Q6:WebUI弹出Expecting value: line 1 column 1 (char 0).
|
24 |
-
请关闭系统局域网代理/全局代理。<br>
|
25 |
-
|
26 |
-
这个不仅是客户端的代理,也包括服务端的代理(例如你使用autodl设置了http_proxy和https_proxy学术加速,使用时也需要unset关掉)<br>
|
27 |
-
|
28 |
-
## Q7:不用WebUI如何通过命令训练推理
|
29 |
-
训练脚本:<br>
|
30 |
-
可先跑通WebUI,消息窗内会显示数据集处理和训练用命令行;<br>
|
31 |
-
|
32 |
-
推理脚本:<br>
|
33 |
-
https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/myinfer.py<br>
|
34 |
-
|
35 |
-
例子:<br>
|
36 |
-
|
37 |
-
runtime\python.exe myinfer.py 0 "E:\codes\py39\RVC-beta\todo-songs\1111.wav" "E:\codes\py39\logs\mi-test\added_IVF677_Flat_nprobe_7.index" harvest "test.wav" "weights/mi-test.pth" 0.6 cuda:0 True<br>
|
38 |
-
|
39 |
-
f0up_key=sys.argv[1]<br>
|
40 |
-
input_path=sys.argv[2]<br>
|
41 |
-
index_path=sys.argv[3]<br>
|
42 |
-
f0method=sys.argv[4]#harvest or pm<br>
|
43 |
-
opt_path=sys.argv[5]<br>
|
44 |
-
model_path=sys.argv[6]<br>
|
45 |
-
index_rate=float(sys.argv[7])<br>
|
46 |
-
device=sys.argv[8]<br>
|
47 |
-
is_half=bool(sys.argv[9])<br>
|
48 |
-
|
49 |
-
## Q8:Cuda error/Cuda out of memory.
|
50 |
-
小概率是cuda配置问题、设备不支持;大概率是显存不够(out of memory);<br>
|
51 |
-
|
52 |
-
训练的话缩小batch size(如果缩小到1还不够只能更换显卡训练),推理的话酌情缩小config.py结尾的x_pad,x_query,x_center,x_max。4G以下显存(例如1060(3G)和各种2G显卡)可以直接放弃,4G显存显卡还有救。<br>
|
53 |
-
|
54 |
-
## Q9:total_epoch调多少比较好
|
55 |
-
|
56 |
-
如果训练集音质差底噪大,20~30足够了,调太高,底模音质无法带高你的低音质训练集<br>
|
57 |
-
如果训练集音质高底噪低时长多,可以调高,200是ok的(训练速度很快,既然你有条件准备高音质训练集,显卡想必条件也不错,肯定不在乎多一些训练时间)<br>
|
58 |
-
|
59 |
-
## Q10:需要多少训练集时长
|
60 |
-
推荐10min至50min<br>
|
61 |
-
保证音质高底噪低的情况下,如果有个人特色的音色统一,则多多益善<br>
|
62 |
-
高水平的训练集(精简+音色有特色),5min至10min也是ok的,仓库作者本人就经常这么玩<br>
|
63 |
-
也有人拿1min至2min的数据来训练并且训练成功的,但是成功经验是其他人不可复现的,不太具备参考价值。这要求训练集音色特色非常明显(比如说高频气声较明显的萝莉少女音),且音质高;<br>
|
64 |
-
1min以下时长数据目前没见有人尝试(成功)过。不建议进行这种鬼畜行为。<br>
|
65 |
-
|
66 |
-
## Q11:index rate干嘛用的,怎么调(科普)
|
67 |
-
如果底模和推理源的音质高于训练集的音质,他们可以带高推理结果的音质,但代价可能是音色往底模/推理源的音色靠,这种现象叫做"音色泄露";<br>
|
68 |
-
index rate用来削减/解决音色泄露问题。调到1,则理论上不存在推理源的音色泄露问题,但音质更倾向于训练集。如果训练集音质比推理源低,则index rate调高可能降低音质。调到0,则不具备利用检索混合来保护训练集音色的效果;<br>
|
69 |
-
如果训练集优���时长多,可调高total_epoch,此时模型本身不太会引用推理源和底模的音色,很少存在"音色泄露"问题,此时index_rate不重要,你甚至可以不建立/分享index索引文件。<br>
|
70 |
-
|
71 |
-
## Q11:推理怎么选gpu
|
72 |
-
config.py文件里device cuda:后面选择卡号;<br>
|
73 |
-
卡号和显卡的映射关系,在训练选项卡的显卡信息栏里能看到。<br>
|
74 |
-
|
75 |
-
## Q12:如何推理训练中间保存的pth
|
76 |
-
通过ckpt选项卡最下面提取小模型。<br>
|
77 |
-
|
78 |
-
|
79 |
-
## Q13:如何中断和继续训练
|
80 |
-
现阶段只能关闭WebUI控制台双击go-web.bat重启程序。网页参数也要刷新重新填写;<br>
|
81 |
-
继续训练:相同网页参数点训练模型,就会接着上次的checkpoint继续训练。<br>
|
82 |
-
|
83 |
-
## Q14:训练时出现文件页面/内存error
|
84 |
-
进程开太多了,内存炸了。你可能可以通过如下方式解决<br>
|
85 |
-
1、"提取音高和处理数据使用的CPU进程数" 酌情拉低;<br>
|
86 |
-
2、训练集音频手工切一下,不要太长。<br>
|
87 |
-
|
88 |
-
|
89 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/generate_human_motion/pyrender/tests/unit/test_meshes.py
DELETED
@@ -1,133 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import pytest
|
3 |
-
import trimesh
|
4 |
-
|
5 |
-
from pyrender import (Mesh, Primitive)
|
6 |
-
|
7 |
-
|
8 |
-
def test_meshes():
|
9 |
-
|
10 |
-
with pytest.raises(TypeError):
|
11 |
-
x = Mesh()
|
12 |
-
with pytest.raises(TypeError):
|
13 |
-
x = Primitive()
|
14 |
-
with pytest.raises(ValueError):
|
15 |
-
x = Primitive([], mode=10)
|
16 |
-
|
17 |
-
# Basics
|
18 |
-
x = Mesh([])
|
19 |
-
assert x.name is None
|
20 |
-
assert x.is_visible
|
21 |
-
assert x.weights is None
|
22 |
-
|
23 |
-
x.name = 'str'
|
24 |
-
|
25 |
-
# From Trimesh
|
26 |
-
x = Mesh.from_trimesh(trimesh.creation.box())
|
27 |
-
assert isinstance(x, Mesh)
|
28 |
-
assert len(x.primitives) == 1
|
29 |
-
assert x.is_visible
|
30 |
-
assert np.allclose(x.bounds, np.array([
|
31 |
-
[-0.5, -0.5, -0.5],
|
32 |
-
[0.5, 0.5, 0.5]
|
33 |
-
]))
|
34 |
-
assert np.allclose(x.centroid, np.zeros(3))
|
35 |
-
assert np.allclose(x.extents, np.ones(3))
|
36 |
-
assert np.allclose(x.scale, np.sqrt(3))
|
37 |
-
assert not x.is_transparent
|
38 |
-
|
39 |
-
# Test some primitive functions
|
40 |
-
x = x.primitives[0]
|
41 |
-
with pytest.raises(ValueError):
|
42 |
-
x.normals = np.zeros(10)
|
43 |
-
with pytest.raises(ValueError):
|
44 |
-
x.tangents = np.zeros(10)
|
45 |
-
with pytest.raises(ValueError):
|
46 |
-
x.texcoord_0 = np.zeros(10)
|
47 |
-
with pytest.raises(ValueError):
|
48 |
-
x.texcoord_1 = np.zeros(10)
|
49 |
-
with pytest.raises(TypeError):
|
50 |
-
x.material = np.zeros(10)
|
51 |
-
assert x.targets is None
|
52 |
-
assert np.allclose(x.bounds, np.array([
|
53 |
-
[-0.5, -0.5, -0.5],
|
54 |
-
[0.5, 0.5, 0.5]
|
55 |
-
]))
|
56 |
-
assert np.allclose(x.centroid, np.zeros(3))
|
57 |
-
assert np.allclose(x.extents, np.ones(3))
|
58 |
-
assert np.allclose(x.scale, np.sqrt(3))
|
59 |
-
x.material.baseColorFactor = np.array([0.0, 0.0, 0.0, 0.0])
|
60 |
-
assert x.is_transparent
|
61 |
-
|
62 |
-
# From two trimeshes
|
63 |
-
x = Mesh.from_trimesh([trimesh.creation.box(),
|
64 |
-
trimesh.creation.cylinder(radius=0.1, height=2.0)],
|
65 |
-
smooth=False)
|
66 |
-
assert isinstance(x, Mesh)
|
67 |
-
assert len(x.primitives) == 2
|
68 |
-
assert x.is_visible
|
69 |
-
assert np.allclose(x.bounds, np.array([
|
70 |
-
[-0.5, -0.5, -1.0],
|
71 |
-
[0.5, 0.5, 1.0]
|
72 |
-
]))
|
73 |
-
assert np.allclose(x.centroid, np.zeros(3))
|
74 |
-
assert np.allclose(x.extents, [1.0, 1.0, 2.0])
|
75 |
-
assert np.allclose(x.scale, np.sqrt(6))
|
76 |
-
assert not x.is_transparent
|
77 |
-
|
78 |
-
# From bad data
|
79 |
-
with pytest.raises(TypeError):
|
80 |
-
x = Mesh.from_trimesh(None)
|
81 |
-
|
82 |
-
# With instancing
|
83 |
-
poses = np.tile(np.eye(4), (5,1,1))
|
84 |
-
poses[:,0,3] = np.array([0,1,2,3,4])
|
85 |
-
x = Mesh.from_trimesh(trimesh.creation.box(), poses=poses)
|
86 |
-
assert np.allclose(x.bounds, np.array([
|
87 |
-
[-0.5, -0.5, -0.5],
|
88 |
-
[4.5, 0.5, 0.5]
|
89 |
-
]))
|
90 |
-
poses = np.eye(4)
|
91 |
-
x = Mesh.from_trimesh(trimesh.creation.box(), poses=poses)
|
92 |
-
poses = np.eye(3)
|
93 |
-
with pytest.raises(ValueError):
|
94 |
-
x = Mesh.from_trimesh(trimesh.creation.box(), poses=poses)
|
95 |
-
|
96 |
-
# From textured meshes
|
97 |
-
fm = trimesh.load('tests/data/fuze.obj')
|
98 |
-
x = Mesh.from_trimesh(fm)
|
99 |
-
assert isinstance(x, Mesh)
|
100 |
-
assert len(x.primitives) == 1
|
101 |
-
assert x.is_visible
|
102 |
-
assert not x.is_transparent
|
103 |
-
assert x.primitives[0].material.baseColorTexture is not None
|
104 |
-
|
105 |
-
x = Mesh.from_trimesh(fm, smooth=False)
|
106 |
-
fm.visual = fm.visual.to_color()
|
107 |
-
fm.visual.face_colors = np.array([1.0, 0.0, 0.0, 1.0])
|
108 |
-
x = Mesh.from_trimesh(fm, smooth=False)
|
109 |
-
with pytest.raises(ValueError):
|
110 |
-
x = Mesh.from_trimesh(fm, smooth=True)
|
111 |
-
|
112 |
-
fm.visual.vertex_colors = np.array([1.0, 0.0, 0.0, 0.5])
|
113 |
-
x = Mesh.from_trimesh(fm, smooth=False)
|
114 |
-
x = Mesh.from_trimesh(fm, smooth=True)
|
115 |
-
assert x.primitives[0].color_0 is not None
|
116 |
-
assert x.is_transparent
|
117 |
-
|
118 |
-
bm = trimesh.load('tests/data/WaterBottle.glb').dump()[0]
|
119 |
-
x = Mesh.from_trimesh(bm)
|
120 |
-
assert x.primitives[0].material.baseColorTexture is not None
|
121 |
-
assert x.primitives[0].material.emissiveTexture is not None
|
122 |
-
assert x.primitives[0].material.metallicRoughnessTexture is not None
|
123 |
-
|
124 |
-
# From point cloud
|
125 |
-
x = Mesh.from_points(fm.vertices)
|
126 |
-
|
127 |
-
# def test_duck():
|
128 |
-
# bm = trimesh.load('tests/data/Duck.glb').dump()[0]
|
129 |
-
# x = Mesh.from_trimesh(bm)
|
130 |
-
# assert x.primitives[0].material.baseColorTexture is not None
|
131 |
-
# pixel = x.primitives[0].material.baseColorTexture.source[100, 100]
|
132 |
-
# yellowish = np.array([1.0, 0.7411765, 0.0, 1.0])
|
133 |
-
# assert np.allclose(pixel, yellowish)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abhay1210/prompt-generator_V1/app.py
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
2 |
-
import gradio as gr
|
3 |
-
|
4 |
-
tokenizer = AutoTokenizer.from_pretrained("merve/chatgpt-prompts-bart-long")
|
5 |
-
model = AutoModelForSeq2SeqLM.from_pretrained("merve/chatgpt-prompts-bart-long", from_tf=True)
|
6 |
-
|
7 |
-
def generate(prompt):
|
8 |
-
|
9 |
-
batch = tokenizer(prompt, return_tensors="pt")
|
10 |
-
generated_ids = model.generate(batch["input_ids"], max_new_tokens=150)
|
11 |
-
output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
|
12 |
-
return output[0]
|
13 |
-
|
14 |
-
input_component = gr.Textbox(label = "Input a persona, e.g. photographer", value = "photographer")
|
15 |
-
output_component = gr.Textbox(label = "Prompt")
|
16 |
-
examples = [["photographer"], ["Linux Admin"]]
|
17 |
-
description = "This app generates ChatGPT prompts, it's based on a BART model trained on [this dataset](https://huggingface.co/datasets/fka/awesome-chatgpt-prompts). Simply enter a persona that you want the prompt to be generated based on."
|
18 |
-
gr.Interface(generate, inputs = input_component, outputs=output_component, examples=examples, title = "👨 ChatGPT Prompt Generator 👨", description=description).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abubakari/Sales_Prediction/app.py
DELETED
@@ -1,166 +0,0 @@
|
|
1 |
-
import pandas as pd
|
2 |
-
import streamlit as st
|
3 |
-
import numpy as np
|
4 |
-
from matplotlib import pyplot as plt
|
5 |
-
import pickle
|
6 |
-
import sklearn
|
7 |
-
import joblib
|
8 |
-
from PIL import Image
|
9 |
-
import base64
|
10 |
-
|
11 |
-
|
12 |
-
num_imputer = joblib.load('numerical_imputer.joblib')
|
13 |
-
cat_imputer = joblib.load('categorical_imputer.joblib')
|
14 |
-
encoder = joblib.load('encoder.joblib')
|
15 |
-
scaler = joblib.load('scaler.joblib')
|
16 |
-
dt_model = joblib.load('Final_model.joblib')
|
17 |
-
|
18 |
-
# Add a title and subtitle
|
19 |
-
st.write("<center><h1>Sales Prediction App</h1></center>", unsafe_allow_html=True)
|
20 |
-
|
21 |
-
#image = Image.open("grocery_shopping_woman.png")
|
22 |
-
|
23 |
-
# Display the image
|
24 |
-
#st.image(image, width=600)
|
25 |
-
|
26 |
-
# Load the image
|
27 |
-
image = Image.open("grocery_shopping_woman.png")
|
28 |
-
|
29 |
-
# Set up the layout
|
30 |
-
col1, col2, col3 = st.columns([1, 3, 3])
|
31 |
-
col2.image(image, width=600)
|
32 |
-
|
33 |
-
|
34 |
-
#st.image("https://www.example.com/logo.png", width=200)
|
35 |
-
# Add a subtitle or description
|
36 |
-
st.write("This app uses machine learning to predict sales based on certain input parameters. Simply enter the required information and click 'Predict' to get a sales prediction!")
|
37 |
-
|
38 |
-
st.subheader("Enter the details to predict sales")
|
39 |
-
|
40 |
-
# Add some text
|
41 |
-
#st.write("Enter some data for Prediction.")
|
42 |
-
|
43 |
-
# Create the input fields
|
44 |
-
input_data = {}
|
45 |
-
col1,col2 = st.columns(2)
|
46 |
-
with col1:
|
47 |
-
input_data['store_nbr'] = st.slider("store_nbr",0,54)
|
48 |
-
input_data['products'] = st.selectbox("products", ['AUTOMOTIVE', 'CLEANING', 'BEAUTY', 'FOODS', 'STATIONERY',
|
49 |
-
'CELEBRATION', 'GROCERY', 'HARDWARE', 'HOME', 'LADIESWEAR',
|
50 |
-
'LAWN AND GARDEN', 'CLOTHING', 'LIQUOR,WINE,BEER', 'PET SUPPLIES'])
|
51 |
-
input_data['onpromotion'] =st.number_input("onpromotion",step=1)
|
52 |
-
input_data['state'] = st.selectbox("state", ['Pichincha', 'Cotopaxi', 'Chimborazo', 'Imbabura',
|
53 |
-
'Santo Domingo de los Tsachilas', 'Bolivar', 'Pastaza',
|
54 |
-
'Tungurahua', 'Guayas', 'Santa Elena', 'Los Rios', 'Azuay', 'Loja',
|
55 |
-
'El Oro', 'Esmeraldas', 'Manabi'])
|
56 |
-
input_data['store_type'] = st.selectbox("store_type",['D', 'C', 'B', 'E', 'A'])
|
57 |
-
input_data['cluster'] = st.number_input("cluster",step=1)
|
58 |
-
|
59 |
-
with col2:
|
60 |
-
input_data['dcoilwtico'] = st.number_input("dcoilwtico",step=1)
|
61 |
-
input_data['year'] = st.number_input("year",step=1)
|
62 |
-
input_data['month'] = st.slider("month",1,12)
|
63 |
-
input_data['day'] = st.slider("day",1,31)
|
64 |
-
input_data['dayofweek'] = st.number_input("dayofweek,0=Sun and 6=Sat",step=1)
|
65 |
-
input_data['end_month'] = st.selectbox("end_month",['True','False'])
|
66 |
-
|
67 |
-
|
68 |
-
# Define CSS style for the download button
|
69 |
-
# Define the custom CSS
|
70 |
-
predict_button_css = """
|
71 |
-
<style>
|
72 |
-
.predict-button {
|
73 |
-
background-color: #C4C4C4;
|
74 |
-
color: gray;
|
75 |
-
padding: 0.75rem 2rem;
|
76 |
-
border-radius: 0.5rem;
|
77 |
-
border: none;
|
78 |
-
font-size: 1.1rem;
|
79 |
-
font-weight: bold;
|
80 |
-
text-align: center;
|
81 |
-
margin-top: 2rem;
|
82 |
-
}
|
83 |
-
</style>
|
84 |
-
"""
|
85 |
-
|
86 |
-
download_button_css = """
|
87 |
-
<style>
|
88 |
-
.download-button {
|
89 |
-
background-color: #C4C4C4;
|
90 |
-
color: white;
|
91 |
-
padding: 0.75rem 2rem;
|
92 |
-
border-radius: 0.5rem;
|
93 |
-
border: none;
|
94 |
-
font-size: 1.1rem;
|
95 |
-
font-weight: bold;
|
96 |
-
text-align: center;
|
97 |
-
margin-top: 1rem;
|
98 |
-
}
|
99 |
-
</style>
|
100 |
-
"""
|
101 |
-
|
102 |
-
# Display the custom CSS
|
103 |
-
st.markdown(predict_button_css + download_button_css, unsafe_allow_html=True)
|
104 |
-
|
105 |
-
|
106 |
-
# Create a button to make a prediction
|
107 |
-
|
108 |
-
if st.button("Predict", key="predict_button", help="Click to make a prediction."):
|
109 |
-
# Convert the input data to a pandas DataFrame
|
110 |
-
input_df = pd.DataFrame([input_data])
|
111 |
-
|
112 |
-
|
113 |
-
# Selecting categorical and numerical columns separately
|
114 |
-
cat_columns = [col for col in input_df.columns if input_df[col].dtype == 'object']
|
115 |
-
num_columns = [col for col in input_df.columns if input_df[col].dtype != 'object']
|
116 |
-
|
117 |
-
|
118 |
-
# Apply the imputers
|
119 |
-
input_df_imputed_cat = cat_imputer.transform(input_df[cat_columns])
|
120 |
-
input_df_imputed_num = num_imputer.transform(input_df[num_columns])
|
121 |
-
|
122 |
-
|
123 |
-
# Encode the categorical columns
|
124 |
-
input_encoded_df = pd.DataFrame(encoder.transform(input_df_imputed_cat).toarray(),
|
125 |
-
columns=encoder.get_feature_names(cat_columns))
|
126 |
-
|
127 |
-
# Scale the numerical columns
|
128 |
-
input_df_scaled = scaler.transform(input_df_imputed_num)
|
129 |
-
input_scaled_df = pd.DataFrame(input_df_scaled , columns = num_columns)
|
130 |
-
|
131 |
-
#joining the cat encoded and num scaled
|
132 |
-
final_df = pd.concat([input_encoded_df, input_scaled_df], axis=1)
|
133 |
-
|
134 |
-
# Make a prediction
|
135 |
-
prediction = dt_model.predict(final_df)[0]
|
136 |
-
|
137 |
-
|
138 |
-
# Display the prediction
|
139 |
-
st.write(f"The predicted sales are: {prediction}.")
|
140 |
-
input_df.to_csv("data.csv", index=False)
|
141 |
-
st.table(input_df)
|
142 |
-
|
143 |
-
# Define custom CSS
|
144 |
-
css = """
|
145 |
-
table {
|
146 |
-
background-color: #f2f2f2;
|
147 |
-
color: #333333;
|
148 |
-
}
|
149 |
-
"""
|
150 |
-
|
151 |
-
# Set custom CSS
|
152 |
-
st.write(f'<style>{css}</style>', unsafe_allow_html=True)
|
153 |
-
|
154 |
-
|
155 |
-
# Add the download button
|
156 |
-
def download_csv():
|
157 |
-
with open("data.csv", "r") as f:
|
158 |
-
csv = f.read()
|
159 |
-
b64 = base64.b64encode(csv.encode()).decode()
|
160 |
-
button = f'<button class="download-button"><a href="data:file/csv;base64,{b64}" download="data.csv">Download Data CSV</a></button>'
|
161 |
-
return button
|
162 |
-
|
163 |
-
st.markdown(
|
164 |
-
f'<div style="text-align: center">{download_csv()}</div>',
|
165 |
-
unsafe_allow_html=True
|
166 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Adr740/SmartHadithFR/app.py
DELETED
@@ -1,41 +0,0 @@
|
|
1 |
-
|
2 |
-
import gradio as gr
|
3 |
-
from functools import partial
|
4 |
-
from get_similar_hadiths import search_hadiths
|
5 |
-
import pandas as pd
|
6 |
-
|
7 |
-
title = "Smart Hadith (Version Française) -> [English version here](https://huggingface.co/spaces/Adr740/Hadith_AI_Explorer)"
|
8 |
-
desc = "Bienvenue dans Smart Hadith. Smart Hadith est un outil de recherche sémantique de hadith utilisant l'intelligence artificelle. Contact suggestions/questions: [[email protected]](mailto:[email protected])"
|
9 |
-
|
10 |
-
# "This is a tool that helps you find quickly relevant hadiths on a topic or a problem you have. Just type in plain English what you are looking for in the box below.\n\n"
|
11 |
-
warning = "\n\n**AVERTISSEMENT (seulement environ 3000 hadiths sont présents)**\nCET OUTIL EST DESTINÉ À DES FINS DE RÉFÉRENCE AFIN DE FACILITER LA RECHERCHE SUR LES HADITHS (PAROLES ET ACTES PROPHÉTIQUES), IL N'EST PAS DESTINÉ À ÊTRE UTILISÉ COMME OUTIL DE GUIDANCE OU DANS TOUT AUTRE BUT. LES UTILISATEURS SONT RESPONSABLES DE CONDUIRE LEURS PROPRES RECHERCHES ET DE DEMANDER DES CONSEILS AUX SAVANTS RELIGIEUX.\nVEUILLEZ NOTER QUE LE CONTENU AFFICHÉ PAR CET OUTIL N'EST PAS GARANTI COMME ÉTANT PRÉCIS, COMPLET OU À JOUR, ET N'EST PAS DESTINÉ À ÊTRE UTILISÉ COMME SOURCE RELIGIEUSE UNIQUE.\nLES DÉVELOPPEURS DE CET OUTIL NE SERONT PAS TENUS RESPONSABLES DE TOUTE DÉCISION OU UTILISATION FAITE PAR LES UTILISATEURS DE CET OUTIL."
|
12 |
-
disclaimer = "\n## DISCLAIMER\n\nTHIS TOOL IS INTENDED FOR REFERENCE PURPOSES ONLY AND IS NOT INTENDED TO BE TAKEN AS RELIGIOUS ADVICE. THE HADITHS DISPLAYED BY THIS TOOL ARE NOT INTENDED TO BE USED AS A SOLE SOURCE OF RELIGIOUS GUIDANCE. USERS ARE RESPONSIBLE FOR CONDUCTING THEIR OWN RESEARCH AND SEEKING GUIDANCE FROM RELIGIOUS SCHOLARS.\n\nPLEASE NOTE THAT THE CONTENT DISPLAYED BY THIS TOOL IS NOT GUARANTEED TO BE ACCURATE, COMPLETE, OR UP-TO-DATE.\n\nTHE DEVELOPERS OF THIS TOOL WILL NOT BE HELD RESPONSIBLE FOR ANY DECISIONS MADE BY THE USERS OF THIS TOOL THAT ARE BASED ON THE CONTENT DISPLAYED BY THIS TOOL.\n\nHadiths gathered from this repository: https:\/\/www.kaggle.com\/datasets\/fahd09\/hadith-dataset"
|
13 |
-
def iter_grid(n_rows, n_cols):
|
14 |
-
for _ in range(n_rows):
|
15 |
-
with gr.Row():
|
16 |
-
for _ in range(n_cols):
|
17 |
-
with gr.Column():
|
18 |
-
yield
|
19 |
-
with gr.Blocks(title=title) as demo:
|
20 |
-
gr.Markdown(f"## {title}")
|
21 |
-
gr.Markdown(desc+warning)
|
22 |
-
# gr.Markdown(warning)
|
23 |
-
with gr.Row():
|
24 |
-
with gr.Column(scale=4):
|
25 |
-
text_area = gr.Textbox(placeholder="Écrivez ici... Exemple: 'Hadiths sur le bon comportement et la nourriture'", lines=3, label="Décrivez avec vos mots ce que vous recherchez (mot-clé, sujet etc...)")
|
26 |
-
with gr.Column(scale=1):
|
27 |
-
number_to_display = gr.Number(value=10,label = "Nombre de hadiths à afficher")
|
28 |
-
submit_button = gr.Button(value="Trouver des hadiths")
|
29 |
-
pass
|
30 |
-
|
31 |
-
fn = partial(search_hadiths)
|
32 |
-
|
33 |
-
with gr.Accordion("Tous les résultats:"):
|
34 |
-
ll = gr.Markdown("Vide")
|
35 |
-
|
36 |
-
|
37 |
-
submit_button.click(fn=fn, inputs=[text_area,number_to_display], outputs=[ll])
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
demo.launch( enable_queue=True,max_threads=40)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/ResolveChildrenWidth.js
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
var ResolveChildrenWidth = function (parentWidth) {
|
2 |
-
// Resolve width of sizer children
|
3 |
-
var child, childWidth;
|
4 |
-
var colWidth;
|
5 |
-
for (var i in this.sizerChildren) {
|
6 |
-
child = this.sizerChildren[i];
|
7 |
-
if (child && child.isRexSizer && !child.ignoreLayout) {
|
8 |
-
colWidth = this.getColumnWidth(parseInt(i) % this.columnCount);
|
9 |
-
childWidth = this.getExpandedChildWidth(child, colWidth);
|
10 |
-
childWidth = child.resolveWidth(childWidth);
|
11 |
-
child.resolveChildrenWidth(childWidth);
|
12 |
-
}
|
13 |
-
}
|
14 |
-
}
|
15 |
-
|
16 |
-
export default ResolveChildrenWidth;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/press/Press.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import { Press } from '../../../plugins/gestures.js';
|
2 |
-
export default Press;
|
|
|
|
|
|
spaces/Ajay07pandey/Netfilx_Movie_Recommendation_System/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Netfilx Movie Recommendation System
|
3 |
-
emoji: 👀
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: blue
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.27.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alycer/VITS-Umamusume-voice-synthesizer/utils.py
DELETED
@@ -1,226 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import glob
|
3 |
-
import sys
|
4 |
-
import argparse
|
5 |
-
import logging
|
6 |
-
import json
|
7 |
-
import subprocess
|
8 |
-
import numpy as np
|
9 |
-
from scipy.io.wavfile import read
|
10 |
-
import torch
|
11 |
-
|
12 |
-
MATPLOTLIB_FLAG = False
|
13 |
-
|
14 |
-
logging.basicConfig(stream=sys.stdout, level=logging.ERROR)
|
15 |
-
logger = logging
|
16 |
-
|
17 |
-
|
18 |
-
def load_checkpoint(checkpoint_path, model, optimizer=None):
|
19 |
-
assert os.path.isfile(checkpoint_path)
|
20 |
-
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
|
21 |
-
iteration = checkpoint_dict['iteration']
|
22 |
-
learning_rate = checkpoint_dict['learning_rate']
|
23 |
-
if optimizer is not None:
|
24 |
-
optimizer.load_state_dict(checkpoint_dict['optimizer'])
|
25 |
-
saved_state_dict = checkpoint_dict['model']
|
26 |
-
if hasattr(model, 'module'):
|
27 |
-
state_dict = model.module.state_dict()
|
28 |
-
else:
|
29 |
-
state_dict = model.state_dict()
|
30 |
-
new_state_dict = {}
|
31 |
-
for k, v in state_dict.items():
|
32 |
-
try:
|
33 |
-
new_state_dict[k] = saved_state_dict[k]
|
34 |
-
except:
|
35 |
-
logger.info("%s is not in the checkpoint" % k)
|
36 |
-
new_state_dict[k] = v
|
37 |
-
if hasattr(model, 'module'):
|
38 |
-
model.module.load_state_dict(new_state_dict)
|
39 |
-
else:
|
40 |
-
model.load_state_dict(new_state_dict)
|
41 |
-
logger.info("Loaded checkpoint '{}' (iteration {})".format(
|
42 |
-
checkpoint_path, iteration))
|
43 |
-
return model, optimizer, learning_rate, iteration
|
44 |
-
|
45 |
-
|
46 |
-
def plot_spectrogram_to_numpy(spectrogram):
|
47 |
-
global MATPLOTLIB_FLAG
|
48 |
-
if not MATPLOTLIB_FLAG:
|
49 |
-
import matplotlib
|
50 |
-
matplotlib.use("Agg")
|
51 |
-
MATPLOTLIB_FLAG = True
|
52 |
-
mpl_logger = logging.getLogger('matplotlib')
|
53 |
-
mpl_logger.setLevel(logging.WARNING)
|
54 |
-
import matplotlib.pylab as plt
|
55 |
-
import numpy as np
|
56 |
-
|
57 |
-
fig, ax = plt.subplots(figsize=(10, 2))
|
58 |
-
im = ax.imshow(spectrogram, aspect="auto", origin="lower",
|
59 |
-
interpolation='none')
|
60 |
-
plt.colorbar(im, ax=ax)
|
61 |
-
plt.xlabel("Frames")
|
62 |
-
plt.ylabel("Channels")
|
63 |
-
plt.tight_layout()
|
64 |
-
|
65 |
-
fig.canvas.draw()
|
66 |
-
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
|
67 |
-
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
68 |
-
plt.close()
|
69 |
-
return data
|
70 |
-
|
71 |
-
|
72 |
-
def plot_alignment_to_numpy(alignment, info=None):
|
73 |
-
global MATPLOTLIB_FLAG
|
74 |
-
if not MATPLOTLIB_FLAG:
|
75 |
-
import matplotlib
|
76 |
-
matplotlib.use("Agg")
|
77 |
-
MATPLOTLIB_FLAG = True
|
78 |
-
mpl_logger = logging.getLogger('matplotlib')
|
79 |
-
mpl_logger.setLevel(logging.WARNING)
|
80 |
-
import matplotlib.pylab as plt
|
81 |
-
import numpy as np
|
82 |
-
|
83 |
-
fig, ax = plt.subplots(figsize=(6, 4))
|
84 |
-
im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
|
85 |
-
interpolation='none')
|
86 |
-
fig.colorbar(im, ax=ax)
|
87 |
-
xlabel = 'Decoder timestep'
|
88 |
-
if info is not None:
|
89 |
-
xlabel += '\n\n' + info
|
90 |
-
plt.xlabel(xlabel)
|
91 |
-
plt.ylabel('Encoder timestep')
|
92 |
-
plt.tight_layout()
|
93 |
-
|
94 |
-
fig.canvas.draw()
|
95 |
-
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
|
96 |
-
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
97 |
-
plt.close()
|
98 |
-
return data
|
99 |
-
|
100 |
-
|
101 |
-
def load_wav_to_torch(full_path):
|
102 |
-
sampling_rate, data = read(full_path)
|
103 |
-
return torch.FloatTensor(data.astype(np.float32)), sampling_rate
|
104 |
-
|
105 |
-
|
106 |
-
def load_filepaths_and_text(filename, split="|"):
|
107 |
-
with open(filename, encoding='utf-8') as f:
|
108 |
-
filepaths_and_text = [line.strip().split(split) for line in f]
|
109 |
-
return filepaths_and_text
|
110 |
-
|
111 |
-
|
112 |
-
def get_hparams(init=True):
|
113 |
-
parser = argparse.ArgumentParser()
|
114 |
-
parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
|
115 |
-
help='JSON file for configuration')
|
116 |
-
parser.add_argument('-m', '--model', type=str, required=True,
|
117 |
-
help='Model name')
|
118 |
-
|
119 |
-
args = parser.parse_args()
|
120 |
-
model_dir = os.path.join("./logs", args.model)
|
121 |
-
|
122 |
-
if not os.path.exists(model_dir):
|
123 |
-
os.makedirs(model_dir)
|
124 |
-
|
125 |
-
config_path = args.config
|
126 |
-
config_save_path = os.path.join(model_dir, "config.json")
|
127 |
-
if init:
|
128 |
-
with open(config_path, "r") as f:
|
129 |
-
data = f.read()
|
130 |
-
with open(config_save_path, "w") as f:
|
131 |
-
f.write(data)
|
132 |
-
else:
|
133 |
-
with open(config_save_path, "r") as f:
|
134 |
-
data = f.read()
|
135 |
-
config = json.loads(data)
|
136 |
-
|
137 |
-
hparams = HParams(**config)
|
138 |
-
hparams.model_dir = model_dir
|
139 |
-
return hparams
|
140 |
-
|
141 |
-
|
142 |
-
def get_hparams_from_dir(model_dir):
|
143 |
-
config_save_path = os.path.join(model_dir, "config.json")
|
144 |
-
with open(config_save_path, "r") as f:
|
145 |
-
data = f.read()
|
146 |
-
config = json.loads(data)
|
147 |
-
|
148 |
-
hparams = HParams(**config)
|
149 |
-
hparams.model_dir = model_dir
|
150 |
-
return hparams
|
151 |
-
|
152 |
-
|
153 |
-
def get_hparams_from_file(config_path):
|
154 |
-
with open(config_path, "r", encoding="utf-8") as f:
|
155 |
-
data = f.read()
|
156 |
-
config = json.loads(data)
|
157 |
-
|
158 |
-
hparams = HParams(**config)
|
159 |
-
return hparams
|
160 |
-
|
161 |
-
|
162 |
-
def check_git_hash(model_dir):
|
163 |
-
source_dir = os.path.dirname(os.path.realpath(__file__))
|
164 |
-
if not os.path.exists(os.path.join(source_dir, ".git")):
|
165 |
-
logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
|
166 |
-
source_dir
|
167 |
-
))
|
168 |
-
return
|
169 |
-
|
170 |
-
cur_hash = subprocess.getoutput("git rev-parse HEAD")
|
171 |
-
|
172 |
-
path = os.path.join(model_dir, "githash")
|
173 |
-
if os.path.exists(path):
|
174 |
-
saved_hash = open(path).read()
|
175 |
-
if saved_hash != cur_hash:
|
176 |
-
logger.warn("git hash values are different. {}(saved) != {}(current)".format(
|
177 |
-
saved_hash[:8], cur_hash[:8]))
|
178 |
-
else:
|
179 |
-
open(path, "w").write(cur_hash)
|
180 |
-
|
181 |
-
|
182 |
-
def get_logger(model_dir, filename="train.log"):
|
183 |
-
global logger
|
184 |
-
logger = logging.getLogger(os.path.basename(model_dir))
|
185 |
-
logger.setLevel(logging.DEBUG)
|
186 |
-
|
187 |
-
formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
|
188 |
-
if not os.path.exists(model_dir):
|
189 |
-
os.makedirs(model_dir)
|
190 |
-
h = logging.FileHandler(os.path.join(model_dir, filename))
|
191 |
-
h.setLevel(logging.DEBUG)
|
192 |
-
h.setFormatter(formatter)
|
193 |
-
logger.addHandler(h)
|
194 |
-
return logger
|
195 |
-
|
196 |
-
|
197 |
-
class HParams():
|
198 |
-
def __init__(self, **kwargs):
|
199 |
-
for k, v in kwargs.items():
|
200 |
-
if type(v) == dict:
|
201 |
-
v = HParams(**v)
|
202 |
-
self[k] = v
|
203 |
-
|
204 |
-
def keys(self):
|
205 |
-
return self.__dict__.keys()
|
206 |
-
|
207 |
-
def items(self):
|
208 |
-
return self.__dict__.items()
|
209 |
-
|
210 |
-
def values(self):
|
211 |
-
return self.__dict__.values()
|
212 |
-
|
213 |
-
def __len__(self):
|
214 |
-
return len(self.__dict__)
|
215 |
-
|
216 |
-
def __getitem__(self, key):
|
217 |
-
return getattr(self, key)
|
218 |
-
|
219 |
-
def __setitem__(self, key, value):
|
220 |
-
return setattr(self, key, value)
|
221 |
-
|
222 |
-
def __contains__(self, key):
|
223 |
-
return key in self.__dict__
|
224 |
-
|
225 |
-
def __repr__(self):
|
226 |
-
return self.__dict__.__repr__()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/cpp/cppipc/waiter.h
DELETED
@@ -1,83 +0,0 @@
|
|
1 |
-
#pragma once
|
2 |
-
|
3 |
-
#include <utility>
|
4 |
-
#include <string>
|
5 |
-
#include <mutex>
|
6 |
-
#include <atomic>
|
7 |
-
|
8 |
-
#include "libipc/def.h"
|
9 |
-
#include "libipc/mutex.h"
|
10 |
-
#include "libipc/condition.h"
|
11 |
-
#include "libipc/platform/detail.h"
|
12 |
-
|
13 |
-
namespace ipc {
|
14 |
-
namespace detail {
|
15 |
-
|
16 |
-
class waiter {
|
17 |
-
ipc::sync::condition cond_;
|
18 |
-
ipc::sync::mutex lock_;
|
19 |
-
std::atomic<bool> quit_ {false};
|
20 |
-
|
21 |
-
public:
|
22 |
-
static void init();
|
23 |
-
|
24 |
-
waiter() = default;
|
25 |
-
waiter(char const *name) {
|
26 |
-
open(name);
|
27 |
-
}
|
28 |
-
|
29 |
-
~waiter() {
|
30 |
-
close();
|
31 |
-
}
|
32 |
-
|
33 |
-
bool valid() const noexcept {
|
34 |
-
return cond_.valid() && lock_.valid();
|
35 |
-
}
|
36 |
-
|
37 |
-
bool open(char const *name) noexcept {
|
38 |
-
quit_.store(false, std::memory_order_relaxed);
|
39 |
-
if (!cond_.open((std::string{"_waiter_cond_"} + name).c_str())) {
|
40 |
-
return false;
|
41 |
-
}
|
42 |
-
if (!lock_.open((std::string{"_waiter_lock_"} + name).c_str())) {
|
43 |
-
cond_.close();
|
44 |
-
return false;
|
45 |
-
}
|
46 |
-
return valid();
|
47 |
-
}
|
48 |
-
|
49 |
-
void close() noexcept {
|
50 |
-
cond_.close();
|
51 |
-
lock_.close();
|
52 |
-
}
|
53 |
-
|
54 |
-
template <typename F>
|
55 |
-
bool wait_if(F &&pred, std::uint64_t tm = ipc::invalid_value) noexcept {
|
56 |
-
IPC_UNUSED_ std::lock_guard<ipc::sync::mutex> guard {lock_};
|
57 |
-
while ([this, &pred] {
|
58 |
-
return !quit_.load(std::memory_order_relaxed)
|
59 |
-
&& std::forward<F>(pred)();
|
60 |
-
}()) {
|
61 |
-
if (!cond_.wait(lock_, tm)) return false;
|
62 |
-
}
|
63 |
-
return true;
|
64 |
-
}
|
65 |
-
|
66 |
-
bool notify() noexcept {
|
67 |
-
std::lock_guard<ipc::sync::mutex>{lock_}; // barrier
|
68 |
-
return cond_.notify(lock_);
|
69 |
-
}
|
70 |
-
|
71 |
-
bool broadcast() noexcept {
|
72 |
-
std::lock_guard<ipc::sync::mutex>{lock_}; // barrier
|
73 |
-
return cond_.broadcast(lock_);
|
74 |
-
}
|
75 |
-
|
76 |
-
bool quit_waiting() {
|
77 |
-
quit_.store(true, std::memory_order_release);
|
78 |
-
return broadcast();
|
79 |
-
}
|
80 |
-
};
|
81 |
-
|
82 |
-
} // namespace detail
|
83 |
-
} // namespace ipc
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/training/coaches/__init__.py
DELETED
File without changes
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/iadb.py
DELETED
@@ -1,149 +0,0 @@
|
|
1 |
-
from typing import List, Optional, Tuple, Union
|
2 |
-
|
3 |
-
import torch
|
4 |
-
|
5 |
-
from diffusers import DiffusionPipeline
|
6 |
-
from diffusers.configuration_utils import ConfigMixin
|
7 |
-
from diffusers.pipeline_utils import ImagePipelineOutput
|
8 |
-
from diffusers.schedulers.scheduling_utils import SchedulerMixin
|
9 |
-
|
10 |
-
|
11 |
-
class IADBScheduler(SchedulerMixin, ConfigMixin):
|
12 |
-
"""
|
13 |
-
IADBScheduler is a scheduler for the Iterative α-(de)Blending denoising method. It is simple and minimalist.
|
14 |
-
|
15 |
-
For more details, see the original paper: https://arxiv.org/abs/2305.03486 and the blog post: https://ggx-research.github.io/publication/2023/05/10/publication-iadb.html
|
16 |
-
"""
|
17 |
-
|
18 |
-
def step(
|
19 |
-
self,
|
20 |
-
model_output: torch.FloatTensor,
|
21 |
-
timestep: int,
|
22 |
-
x_alpha: torch.FloatTensor,
|
23 |
-
) -> torch.FloatTensor:
|
24 |
-
"""
|
25 |
-
Predict the sample at the previous timestep by reversing the ODE. Core function to propagate the diffusion
|
26 |
-
process from the learned model outputs (most often the predicted noise).
|
27 |
-
|
28 |
-
Args:
|
29 |
-
model_output (`torch.FloatTensor`): direct output from learned diffusion model. It is the direction from x0 to x1.
|
30 |
-
timestep (`float`): current timestep in the diffusion chain.
|
31 |
-
x_alpha (`torch.FloatTensor`): x_alpha sample for the current timestep
|
32 |
-
|
33 |
-
Returns:
|
34 |
-
`torch.FloatTensor`: the sample at the previous timestep
|
35 |
-
|
36 |
-
"""
|
37 |
-
if self.num_inference_steps is None:
|
38 |
-
raise ValueError(
|
39 |
-
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
|
40 |
-
)
|
41 |
-
|
42 |
-
alpha = timestep / self.num_inference_steps
|
43 |
-
alpha_next = (timestep + 1) / self.num_inference_steps
|
44 |
-
|
45 |
-
d = model_output
|
46 |
-
|
47 |
-
x_alpha = x_alpha + (alpha_next - alpha) * d
|
48 |
-
|
49 |
-
return x_alpha
|
50 |
-
|
51 |
-
def set_timesteps(self, num_inference_steps: int):
|
52 |
-
self.num_inference_steps = num_inference_steps
|
53 |
-
|
54 |
-
def add_noise(
|
55 |
-
self,
|
56 |
-
original_samples: torch.FloatTensor,
|
57 |
-
noise: torch.FloatTensor,
|
58 |
-
alpha: torch.FloatTensor,
|
59 |
-
) -> torch.FloatTensor:
|
60 |
-
return original_samples * alpha + noise * (1 - alpha)
|
61 |
-
|
62 |
-
def __len__(self):
|
63 |
-
return self.config.num_train_timesteps
|
64 |
-
|
65 |
-
|
66 |
-
class IADBPipeline(DiffusionPipeline):
|
67 |
-
r"""
|
68 |
-
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
69 |
-
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
70 |
-
|
71 |
-
Parameters:
|
72 |
-
unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
|
73 |
-
scheduler ([`SchedulerMixin`]):
|
74 |
-
A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
|
75 |
-
[`DDPMScheduler`], or [`DDIMScheduler`].
|
76 |
-
"""
|
77 |
-
|
78 |
-
def __init__(self, unet, scheduler):
|
79 |
-
super().__init__()
|
80 |
-
|
81 |
-
self.register_modules(unet=unet, scheduler=scheduler)
|
82 |
-
|
83 |
-
@torch.no_grad()
|
84 |
-
def __call__(
|
85 |
-
self,
|
86 |
-
batch_size: int = 1,
|
87 |
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
88 |
-
num_inference_steps: int = 50,
|
89 |
-
output_type: Optional[str] = "pil",
|
90 |
-
return_dict: bool = True,
|
91 |
-
) -> Union[ImagePipelineOutput, Tuple]:
|
92 |
-
r"""
|
93 |
-
Args:
|
94 |
-
batch_size (`int`, *optional*, defaults to 1):
|
95 |
-
The number of images to generate.
|
96 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
97 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
98 |
-
expense of slower inference.
|
99 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
100 |
-
The output format of the generate image. Choose between
|
101 |
-
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
102 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
103 |
-
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
|
104 |
-
|
105 |
-
Returns:
|
106 |
-
[`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is
|
107 |
-
True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images.
|
108 |
-
"""
|
109 |
-
|
110 |
-
# Sample gaussian noise to begin loop
|
111 |
-
if isinstance(self.unet.config.sample_size, int):
|
112 |
-
image_shape = (
|
113 |
-
batch_size,
|
114 |
-
self.unet.config.in_channels,
|
115 |
-
self.unet.config.sample_size,
|
116 |
-
self.unet.config.sample_size,
|
117 |
-
)
|
118 |
-
else:
|
119 |
-
image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
|
120 |
-
|
121 |
-
if isinstance(generator, list) and len(generator) != batch_size:
|
122 |
-
raise ValueError(
|
123 |
-
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
124 |
-
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
125 |
-
)
|
126 |
-
|
127 |
-
image = torch.randn(image_shape, generator=generator, device=self.device, dtype=self.unet.dtype)
|
128 |
-
|
129 |
-
# set step values
|
130 |
-
self.scheduler.set_timesteps(num_inference_steps)
|
131 |
-
x_alpha = image.clone()
|
132 |
-
for t in self.progress_bar(range(num_inference_steps)):
|
133 |
-
alpha = t / num_inference_steps
|
134 |
-
|
135 |
-
# 1. predict noise model_output
|
136 |
-
model_output = self.unet(x_alpha, torch.tensor(alpha, device=x_alpha.device)).sample
|
137 |
-
|
138 |
-
# 2. step
|
139 |
-
x_alpha = self.scheduler.step(model_output, t, x_alpha)
|
140 |
-
|
141 |
-
image = (x_alpha * 0.5 + 0.5).clamp(0, 1)
|
142 |
-
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
143 |
-
if output_type == "pil":
|
144 |
-
image = self.numpy_to_pil(image)
|
145 |
-
|
146 |
-
if not return_dict:
|
147 |
-
return (image,)
|
148 |
-
|
149 |
-
return ImagePipelineOutput(images=image)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/vae_flax.py
DELETED
@@ -1,869 +0,0 @@
|
|
1 |
-
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
# JAX implementation of VQGAN from taming-transformers https://github.com/CompVis/taming-transformers
|
16 |
-
|
17 |
-
import math
|
18 |
-
from functools import partial
|
19 |
-
from typing import Tuple
|
20 |
-
|
21 |
-
import flax
|
22 |
-
import flax.linen as nn
|
23 |
-
import jax
|
24 |
-
import jax.numpy as jnp
|
25 |
-
from flax.core.frozen_dict import FrozenDict
|
26 |
-
|
27 |
-
from ..configuration_utils import ConfigMixin, flax_register_to_config
|
28 |
-
from ..utils import BaseOutput
|
29 |
-
from .modeling_flax_utils import FlaxModelMixin
|
30 |
-
|
31 |
-
|
32 |
-
@flax.struct.dataclass
|
33 |
-
class FlaxDecoderOutput(BaseOutput):
|
34 |
-
"""
|
35 |
-
Output of decoding method.
|
36 |
-
|
37 |
-
Args:
|
38 |
-
sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`):
|
39 |
-
The decoded output sample from the last layer of the model.
|
40 |
-
dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`):
|
41 |
-
The `dtype` of the parameters.
|
42 |
-
"""
|
43 |
-
|
44 |
-
sample: jnp.ndarray
|
45 |
-
|
46 |
-
|
47 |
-
@flax.struct.dataclass
|
48 |
-
class FlaxAutoencoderKLOutput(BaseOutput):
|
49 |
-
"""
|
50 |
-
Output of AutoencoderKL encoding method.
|
51 |
-
|
52 |
-
Args:
|
53 |
-
latent_dist (`FlaxDiagonalGaussianDistribution`):
|
54 |
-
Encoded outputs of `Encoder` represented as the mean and logvar of `FlaxDiagonalGaussianDistribution`.
|
55 |
-
`FlaxDiagonalGaussianDistribution` allows for sampling latents from the distribution.
|
56 |
-
"""
|
57 |
-
|
58 |
-
latent_dist: "FlaxDiagonalGaussianDistribution"
|
59 |
-
|
60 |
-
|
61 |
-
class FlaxUpsample2D(nn.Module):
|
62 |
-
"""
|
63 |
-
Flax implementation of 2D Upsample layer
|
64 |
-
|
65 |
-
Args:
|
66 |
-
in_channels (`int`):
|
67 |
-
Input channels
|
68 |
-
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
|
69 |
-
Parameters `dtype`
|
70 |
-
"""
|
71 |
-
|
72 |
-
in_channels: int
|
73 |
-
dtype: jnp.dtype = jnp.float32
|
74 |
-
|
75 |
-
def setup(self):
|
76 |
-
self.conv = nn.Conv(
|
77 |
-
self.in_channels,
|
78 |
-
kernel_size=(3, 3),
|
79 |
-
strides=(1, 1),
|
80 |
-
padding=((1, 1), (1, 1)),
|
81 |
-
dtype=self.dtype,
|
82 |
-
)
|
83 |
-
|
84 |
-
def __call__(self, hidden_states):
|
85 |
-
batch, height, width, channels = hidden_states.shape
|
86 |
-
hidden_states = jax.image.resize(
|
87 |
-
hidden_states,
|
88 |
-
shape=(batch, height * 2, width * 2, channels),
|
89 |
-
method="nearest",
|
90 |
-
)
|
91 |
-
hidden_states = self.conv(hidden_states)
|
92 |
-
return hidden_states
|
93 |
-
|
94 |
-
|
95 |
-
class FlaxDownsample2D(nn.Module):
|
96 |
-
"""
|
97 |
-
Flax implementation of 2D Downsample layer
|
98 |
-
|
99 |
-
Args:
|
100 |
-
in_channels (`int`):
|
101 |
-
Input channels
|
102 |
-
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
|
103 |
-
Parameters `dtype`
|
104 |
-
"""
|
105 |
-
|
106 |
-
in_channels: int
|
107 |
-
dtype: jnp.dtype = jnp.float32
|
108 |
-
|
109 |
-
def setup(self):
|
110 |
-
self.conv = nn.Conv(
|
111 |
-
self.in_channels,
|
112 |
-
kernel_size=(3, 3),
|
113 |
-
strides=(2, 2),
|
114 |
-
padding="VALID",
|
115 |
-
dtype=self.dtype,
|
116 |
-
)
|
117 |
-
|
118 |
-
def __call__(self, hidden_states):
|
119 |
-
pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
|
120 |
-
hidden_states = jnp.pad(hidden_states, pad_width=pad)
|
121 |
-
hidden_states = self.conv(hidden_states)
|
122 |
-
return hidden_states
|
123 |
-
|
124 |
-
|
125 |
-
class FlaxResnetBlock2D(nn.Module):
|
126 |
-
"""
|
127 |
-
Flax implementation of 2D Resnet Block.
|
128 |
-
|
129 |
-
Args:
|
130 |
-
in_channels (`int`):
|
131 |
-
Input channels
|
132 |
-
out_channels (`int`):
|
133 |
-
Output channels
|
134 |
-
dropout (:obj:`float`, *optional*, defaults to 0.0):
|
135 |
-
Dropout rate
|
136 |
-
groups (:obj:`int`, *optional*, defaults to `32`):
|
137 |
-
The number of groups to use for group norm.
|
138 |
-
use_nin_shortcut (:obj:`bool`, *optional*, defaults to `None`):
|
139 |
-
Whether to use `nin_shortcut`. This activates a new layer inside ResNet block
|
140 |
-
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
|
141 |
-
Parameters `dtype`
|
142 |
-
"""
|
143 |
-
|
144 |
-
in_channels: int
|
145 |
-
out_channels: int = None
|
146 |
-
dropout: float = 0.0
|
147 |
-
groups: int = 32
|
148 |
-
use_nin_shortcut: bool = None
|
149 |
-
dtype: jnp.dtype = jnp.float32
|
150 |
-
|
151 |
-
def setup(self):
|
152 |
-
out_channels = self.in_channels if self.out_channels is None else self.out_channels
|
153 |
-
|
154 |
-
self.norm1 = nn.GroupNorm(num_groups=self.groups, epsilon=1e-6)
|
155 |
-
self.conv1 = nn.Conv(
|
156 |
-
out_channels,
|
157 |
-
kernel_size=(3, 3),
|
158 |
-
strides=(1, 1),
|
159 |
-
padding=((1, 1), (1, 1)),
|
160 |
-
dtype=self.dtype,
|
161 |
-
)
|
162 |
-
|
163 |
-
self.norm2 = nn.GroupNorm(num_groups=self.groups, epsilon=1e-6)
|
164 |
-
self.dropout_layer = nn.Dropout(self.dropout)
|
165 |
-
self.conv2 = nn.Conv(
|
166 |
-
out_channels,
|
167 |
-
kernel_size=(3, 3),
|
168 |
-
strides=(1, 1),
|
169 |
-
padding=((1, 1), (1, 1)),
|
170 |
-
dtype=self.dtype,
|
171 |
-
)
|
172 |
-
|
173 |
-
use_nin_shortcut = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
|
174 |
-
|
175 |
-
self.conv_shortcut = None
|
176 |
-
if use_nin_shortcut:
|
177 |
-
self.conv_shortcut = nn.Conv(
|
178 |
-
out_channels,
|
179 |
-
kernel_size=(1, 1),
|
180 |
-
strides=(1, 1),
|
181 |
-
padding="VALID",
|
182 |
-
dtype=self.dtype,
|
183 |
-
)
|
184 |
-
|
185 |
-
def __call__(self, hidden_states, deterministic=True):
|
186 |
-
residual = hidden_states
|
187 |
-
hidden_states = self.norm1(hidden_states)
|
188 |
-
hidden_states = nn.swish(hidden_states)
|
189 |
-
hidden_states = self.conv1(hidden_states)
|
190 |
-
|
191 |
-
hidden_states = self.norm2(hidden_states)
|
192 |
-
hidden_states = nn.swish(hidden_states)
|
193 |
-
hidden_states = self.dropout_layer(hidden_states, deterministic)
|
194 |
-
hidden_states = self.conv2(hidden_states)
|
195 |
-
|
196 |
-
if self.conv_shortcut is not None:
|
197 |
-
residual = self.conv_shortcut(residual)
|
198 |
-
|
199 |
-
return hidden_states + residual
|
200 |
-
|
201 |
-
|
202 |
-
class FlaxAttentionBlock(nn.Module):
|
203 |
-
r"""
|
204 |
-
Flax Convolutional based multi-head attention block for diffusion-based VAE.
|
205 |
-
|
206 |
-
Parameters:
|
207 |
-
channels (:obj:`int`):
|
208 |
-
Input channels
|
209 |
-
num_head_channels (:obj:`int`, *optional*, defaults to `None`):
|
210 |
-
Number of attention heads
|
211 |
-
num_groups (:obj:`int`, *optional*, defaults to `32`):
|
212 |
-
The number of groups to use for group norm
|
213 |
-
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
|
214 |
-
Parameters `dtype`
|
215 |
-
|
216 |
-
"""
|
217 |
-
channels: int
|
218 |
-
num_head_channels: int = None
|
219 |
-
num_groups: int = 32
|
220 |
-
dtype: jnp.dtype = jnp.float32
|
221 |
-
|
222 |
-
def setup(self):
|
223 |
-
self.num_heads = self.channels // self.num_head_channels if self.num_head_channels is not None else 1
|
224 |
-
|
225 |
-
dense = partial(nn.Dense, self.channels, dtype=self.dtype)
|
226 |
-
|
227 |
-
self.group_norm = nn.GroupNorm(num_groups=self.num_groups, epsilon=1e-6)
|
228 |
-
self.query, self.key, self.value = dense(), dense(), dense()
|
229 |
-
self.proj_attn = dense()
|
230 |
-
|
231 |
-
def transpose_for_scores(self, projection):
|
232 |
-
new_projection_shape = projection.shape[:-1] + (self.num_heads, -1)
|
233 |
-
# move heads to 2nd position (B, T, H * D) -> (B, T, H, D)
|
234 |
-
new_projection = projection.reshape(new_projection_shape)
|
235 |
-
# (B, T, H, D) -> (B, H, T, D)
|
236 |
-
new_projection = jnp.transpose(new_projection, (0, 2, 1, 3))
|
237 |
-
return new_projection
|
238 |
-
|
239 |
-
def __call__(self, hidden_states):
|
240 |
-
residual = hidden_states
|
241 |
-
batch, height, width, channels = hidden_states.shape
|
242 |
-
|
243 |
-
hidden_states = self.group_norm(hidden_states)
|
244 |
-
|
245 |
-
hidden_states = hidden_states.reshape((batch, height * width, channels))
|
246 |
-
|
247 |
-
query = self.query(hidden_states)
|
248 |
-
key = self.key(hidden_states)
|
249 |
-
value = self.value(hidden_states)
|
250 |
-
|
251 |
-
# transpose
|
252 |
-
query = self.transpose_for_scores(query)
|
253 |
-
key = self.transpose_for_scores(key)
|
254 |
-
value = self.transpose_for_scores(value)
|
255 |
-
|
256 |
-
# compute attentions
|
257 |
-
scale = 1 / math.sqrt(math.sqrt(self.channels / self.num_heads))
|
258 |
-
attn_weights = jnp.einsum("...qc,...kc->...qk", query * scale, key * scale)
|
259 |
-
attn_weights = nn.softmax(attn_weights, axis=-1)
|
260 |
-
|
261 |
-
# attend to values
|
262 |
-
hidden_states = jnp.einsum("...kc,...qk->...qc", value, attn_weights)
|
263 |
-
|
264 |
-
hidden_states = jnp.transpose(hidden_states, (0, 2, 1, 3))
|
265 |
-
new_hidden_states_shape = hidden_states.shape[:-2] + (self.channels,)
|
266 |
-
hidden_states = hidden_states.reshape(new_hidden_states_shape)
|
267 |
-
|
268 |
-
hidden_states = self.proj_attn(hidden_states)
|
269 |
-
hidden_states = hidden_states.reshape((batch, height, width, channels))
|
270 |
-
hidden_states = hidden_states + residual
|
271 |
-
return hidden_states
|
272 |
-
|
273 |
-
|
274 |
-
class FlaxDownEncoderBlock2D(nn.Module):
|
275 |
-
r"""
|
276 |
-
Flax Resnet blocks-based Encoder block for diffusion-based VAE.
|
277 |
-
|
278 |
-
Parameters:
|
279 |
-
in_channels (:obj:`int`):
|
280 |
-
Input channels
|
281 |
-
out_channels (:obj:`int`):
|
282 |
-
Output channels
|
283 |
-
dropout (:obj:`float`, *optional*, defaults to 0.0):
|
284 |
-
Dropout rate
|
285 |
-
num_layers (:obj:`int`, *optional*, defaults to 1):
|
286 |
-
Number of Resnet layer block
|
287 |
-
resnet_groups (:obj:`int`, *optional*, defaults to `32`):
|
288 |
-
The number of groups to use for the Resnet block group norm
|
289 |
-
add_downsample (:obj:`bool`, *optional*, defaults to `True`):
|
290 |
-
Whether to add downsample layer
|
291 |
-
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
|
292 |
-
Parameters `dtype`
|
293 |
-
"""
|
294 |
-
in_channels: int
|
295 |
-
out_channels: int
|
296 |
-
dropout: float = 0.0
|
297 |
-
num_layers: int = 1
|
298 |
-
resnet_groups: int = 32
|
299 |
-
add_downsample: bool = True
|
300 |
-
dtype: jnp.dtype = jnp.float32
|
301 |
-
|
302 |
-
def setup(self):
|
303 |
-
resnets = []
|
304 |
-
for i in range(self.num_layers):
|
305 |
-
in_channels = self.in_channels if i == 0 else self.out_channels
|
306 |
-
|
307 |
-
res_block = FlaxResnetBlock2D(
|
308 |
-
in_channels=in_channels,
|
309 |
-
out_channels=self.out_channels,
|
310 |
-
dropout=self.dropout,
|
311 |
-
groups=self.resnet_groups,
|
312 |
-
dtype=self.dtype,
|
313 |
-
)
|
314 |
-
resnets.append(res_block)
|
315 |
-
self.resnets = resnets
|
316 |
-
|
317 |
-
if self.add_downsample:
|
318 |
-
self.downsamplers_0 = FlaxDownsample2D(self.out_channels, dtype=self.dtype)
|
319 |
-
|
320 |
-
def __call__(self, hidden_states, deterministic=True):
|
321 |
-
for resnet in self.resnets:
|
322 |
-
hidden_states = resnet(hidden_states, deterministic=deterministic)
|
323 |
-
|
324 |
-
if self.add_downsample:
|
325 |
-
hidden_states = self.downsamplers_0(hidden_states)
|
326 |
-
|
327 |
-
return hidden_states
|
328 |
-
|
329 |
-
|
330 |
-
class FlaxUpDecoderBlock2D(nn.Module):
|
331 |
-
r"""
|
332 |
-
Flax Resnet blocks-based Decoder block for diffusion-based VAE.
|
333 |
-
|
334 |
-
Parameters:
|
335 |
-
in_channels (:obj:`int`):
|
336 |
-
Input channels
|
337 |
-
out_channels (:obj:`int`):
|
338 |
-
Output channels
|
339 |
-
dropout (:obj:`float`, *optional*, defaults to 0.0):
|
340 |
-
Dropout rate
|
341 |
-
num_layers (:obj:`int`, *optional*, defaults to 1):
|
342 |
-
Number of Resnet layer block
|
343 |
-
resnet_groups (:obj:`int`, *optional*, defaults to `32`):
|
344 |
-
The number of groups to use for the Resnet block group norm
|
345 |
-
add_upsample (:obj:`bool`, *optional*, defaults to `True`):
|
346 |
-
Whether to add upsample layer
|
347 |
-
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
|
348 |
-
Parameters `dtype`
|
349 |
-
"""
|
350 |
-
in_channels: int
|
351 |
-
out_channels: int
|
352 |
-
dropout: float = 0.0
|
353 |
-
num_layers: int = 1
|
354 |
-
resnet_groups: int = 32
|
355 |
-
add_upsample: bool = True
|
356 |
-
dtype: jnp.dtype = jnp.float32
|
357 |
-
|
358 |
-
def setup(self):
|
359 |
-
resnets = []
|
360 |
-
for i in range(self.num_layers):
|
361 |
-
in_channels = self.in_channels if i == 0 else self.out_channels
|
362 |
-
res_block = FlaxResnetBlock2D(
|
363 |
-
in_channels=in_channels,
|
364 |
-
out_channels=self.out_channels,
|
365 |
-
dropout=self.dropout,
|
366 |
-
groups=self.resnet_groups,
|
367 |
-
dtype=self.dtype,
|
368 |
-
)
|
369 |
-
resnets.append(res_block)
|
370 |
-
|
371 |
-
self.resnets = resnets
|
372 |
-
|
373 |
-
if self.add_upsample:
|
374 |
-
self.upsamplers_0 = FlaxUpsample2D(self.out_channels, dtype=self.dtype)
|
375 |
-
|
376 |
-
def __call__(self, hidden_states, deterministic=True):
|
377 |
-
for resnet in self.resnets:
|
378 |
-
hidden_states = resnet(hidden_states, deterministic=deterministic)
|
379 |
-
|
380 |
-
if self.add_upsample:
|
381 |
-
hidden_states = self.upsamplers_0(hidden_states)
|
382 |
-
|
383 |
-
return hidden_states
|
384 |
-
|
385 |
-
|
386 |
-
class FlaxUNetMidBlock2D(nn.Module):
|
387 |
-
r"""
|
388 |
-
Flax Unet Mid-Block module.
|
389 |
-
|
390 |
-
Parameters:
|
391 |
-
in_channels (:obj:`int`):
|
392 |
-
Input channels
|
393 |
-
dropout (:obj:`float`, *optional*, defaults to 0.0):
|
394 |
-
Dropout rate
|
395 |
-
num_layers (:obj:`int`, *optional*, defaults to 1):
|
396 |
-
Number of Resnet layer block
|
397 |
-
resnet_groups (:obj:`int`, *optional*, defaults to `32`):
|
398 |
-
The number of groups to use for the Resnet and Attention block group norm
|
399 |
-
num_attention_heads (:obj:`int`, *optional*, defaults to `1`):
|
400 |
-
Number of attention heads for each attention block
|
401 |
-
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
|
402 |
-
Parameters `dtype`
|
403 |
-
"""
|
404 |
-
in_channels: int
|
405 |
-
dropout: float = 0.0
|
406 |
-
num_layers: int = 1
|
407 |
-
resnet_groups: int = 32
|
408 |
-
num_attention_heads: int = 1
|
409 |
-
dtype: jnp.dtype = jnp.float32
|
410 |
-
|
411 |
-
def setup(self):
|
412 |
-
resnet_groups = self.resnet_groups if self.resnet_groups is not None else min(self.in_channels // 4, 32)
|
413 |
-
|
414 |
-
# there is always at least one resnet
|
415 |
-
resnets = [
|
416 |
-
FlaxResnetBlock2D(
|
417 |
-
in_channels=self.in_channels,
|
418 |
-
out_channels=self.in_channels,
|
419 |
-
dropout=self.dropout,
|
420 |
-
groups=resnet_groups,
|
421 |
-
dtype=self.dtype,
|
422 |
-
)
|
423 |
-
]
|
424 |
-
|
425 |
-
attentions = []
|
426 |
-
|
427 |
-
for _ in range(self.num_layers):
|
428 |
-
attn_block = FlaxAttentionBlock(
|
429 |
-
channels=self.in_channels,
|
430 |
-
num_head_channels=self.num_attention_heads,
|
431 |
-
num_groups=resnet_groups,
|
432 |
-
dtype=self.dtype,
|
433 |
-
)
|
434 |
-
attentions.append(attn_block)
|
435 |
-
|
436 |
-
res_block = FlaxResnetBlock2D(
|
437 |
-
in_channels=self.in_channels,
|
438 |
-
out_channels=self.in_channels,
|
439 |
-
dropout=self.dropout,
|
440 |
-
groups=resnet_groups,
|
441 |
-
dtype=self.dtype,
|
442 |
-
)
|
443 |
-
resnets.append(res_block)
|
444 |
-
|
445 |
-
self.resnets = resnets
|
446 |
-
self.attentions = attentions
|
447 |
-
|
448 |
-
def __call__(self, hidden_states, deterministic=True):
|
449 |
-
hidden_states = self.resnets[0](hidden_states, deterministic=deterministic)
|
450 |
-
for attn, resnet in zip(self.attentions, self.resnets[1:]):
|
451 |
-
hidden_states = attn(hidden_states)
|
452 |
-
hidden_states = resnet(hidden_states, deterministic=deterministic)
|
453 |
-
|
454 |
-
return hidden_states
|
455 |
-
|
456 |
-
|
457 |
-
class FlaxEncoder(nn.Module):
|
458 |
-
r"""
|
459 |
-
Flax Implementation of VAE Encoder.
|
460 |
-
|
461 |
-
This model is a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module)
|
462 |
-
subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to
|
463 |
-
general usage and behavior.
|
464 |
-
|
465 |
-
Finally, this model supports inherent JAX features such as:
|
466 |
-
- [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
|
467 |
-
- [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
|
468 |
-
- [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
|
469 |
-
- [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
|
470 |
-
|
471 |
-
Parameters:
|
472 |
-
in_channels (:obj:`int`, *optional*, defaults to 3):
|
473 |
-
Input channels
|
474 |
-
out_channels (:obj:`int`, *optional*, defaults to 3):
|
475 |
-
Output channels
|
476 |
-
down_block_types (:obj:`Tuple[str]`, *optional*, defaults to `(DownEncoderBlock2D)`):
|
477 |
-
DownEncoder block type
|
478 |
-
block_out_channels (:obj:`Tuple[str]`, *optional*, defaults to `(64,)`):
|
479 |
-
Tuple containing the number of output channels for each block
|
480 |
-
layers_per_block (:obj:`int`, *optional*, defaults to `2`):
|
481 |
-
Number of Resnet layer for each block
|
482 |
-
norm_num_groups (:obj:`int`, *optional*, defaults to `32`):
|
483 |
-
norm num group
|
484 |
-
act_fn (:obj:`str`, *optional*, defaults to `silu`):
|
485 |
-
Activation function
|
486 |
-
double_z (:obj:`bool`, *optional*, defaults to `False`):
|
487 |
-
Whether to double the last output channels
|
488 |
-
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
|
489 |
-
Parameters `dtype`
|
490 |
-
"""
|
491 |
-
in_channels: int = 3
|
492 |
-
out_channels: int = 3
|
493 |
-
down_block_types: Tuple[str] = ("DownEncoderBlock2D",)
|
494 |
-
block_out_channels: Tuple[int] = (64,)
|
495 |
-
layers_per_block: int = 2
|
496 |
-
norm_num_groups: int = 32
|
497 |
-
act_fn: str = "silu"
|
498 |
-
double_z: bool = False
|
499 |
-
dtype: jnp.dtype = jnp.float32
|
500 |
-
|
501 |
-
def setup(self):
|
502 |
-
block_out_channels = self.block_out_channels
|
503 |
-
# in
|
504 |
-
self.conv_in = nn.Conv(
|
505 |
-
block_out_channels[0],
|
506 |
-
kernel_size=(3, 3),
|
507 |
-
strides=(1, 1),
|
508 |
-
padding=((1, 1), (1, 1)),
|
509 |
-
dtype=self.dtype,
|
510 |
-
)
|
511 |
-
|
512 |
-
# downsampling
|
513 |
-
down_blocks = []
|
514 |
-
output_channel = block_out_channels[0]
|
515 |
-
for i, _ in enumerate(self.down_block_types):
|
516 |
-
input_channel = output_channel
|
517 |
-
output_channel = block_out_channels[i]
|
518 |
-
is_final_block = i == len(block_out_channels) - 1
|
519 |
-
|
520 |
-
down_block = FlaxDownEncoderBlock2D(
|
521 |
-
in_channels=input_channel,
|
522 |
-
out_channels=output_channel,
|
523 |
-
num_layers=self.layers_per_block,
|
524 |
-
resnet_groups=self.norm_num_groups,
|
525 |
-
add_downsample=not is_final_block,
|
526 |
-
dtype=self.dtype,
|
527 |
-
)
|
528 |
-
down_blocks.append(down_block)
|
529 |
-
self.down_blocks = down_blocks
|
530 |
-
|
531 |
-
# middle
|
532 |
-
self.mid_block = FlaxUNetMidBlock2D(
|
533 |
-
in_channels=block_out_channels[-1],
|
534 |
-
resnet_groups=self.norm_num_groups,
|
535 |
-
num_attention_heads=None,
|
536 |
-
dtype=self.dtype,
|
537 |
-
)
|
538 |
-
|
539 |
-
# end
|
540 |
-
conv_out_channels = 2 * self.out_channels if self.double_z else self.out_channels
|
541 |
-
self.conv_norm_out = nn.GroupNorm(num_groups=self.norm_num_groups, epsilon=1e-6)
|
542 |
-
self.conv_out = nn.Conv(
|
543 |
-
conv_out_channels,
|
544 |
-
kernel_size=(3, 3),
|
545 |
-
strides=(1, 1),
|
546 |
-
padding=((1, 1), (1, 1)),
|
547 |
-
dtype=self.dtype,
|
548 |
-
)
|
549 |
-
|
550 |
-
def __call__(self, sample, deterministic: bool = True):
|
551 |
-
# in
|
552 |
-
sample = self.conv_in(sample)
|
553 |
-
|
554 |
-
# downsampling
|
555 |
-
for block in self.down_blocks:
|
556 |
-
sample = block(sample, deterministic=deterministic)
|
557 |
-
|
558 |
-
# middle
|
559 |
-
sample = self.mid_block(sample, deterministic=deterministic)
|
560 |
-
|
561 |
-
# end
|
562 |
-
sample = self.conv_norm_out(sample)
|
563 |
-
sample = nn.swish(sample)
|
564 |
-
sample = self.conv_out(sample)
|
565 |
-
|
566 |
-
return sample
|
567 |
-
|
568 |
-
|
569 |
-
class FlaxDecoder(nn.Module):
|
570 |
-
r"""
|
571 |
-
Flax Implementation of VAE Decoder.
|
572 |
-
|
573 |
-
This model is a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module)
|
574 |
-
subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to
|
575 |
-
general usage and behavior.
|
576 |
-
|
577 |
-
Finally, this model supports inherent JAX features such as:
|
578 |
-
- [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
|
579 |
-
- [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
|
580 |
-
- [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
|
581 |
-
- [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
|
582 |
-
|
583 |
-
Parameters:
|
584 |
-
in_channels (:obj:`int`, *optional*, defaults to 3):
|
585 |
-
Input channels
|
586 |
-
out_channels (:obj:`int`, *optional*, defaults to 3):
|
587 |
-
Output channels
|
588 |
-
up_block_types (:obj:`Tuple[str]`, *optional*, defaults to `(UpDecoderBlock2D)`):
|
589 |
-
UpDecoder block type
|
590 |
-
block_out_channels (:obj:`Tuple[str]`, *optional*, defaults to `(64,)`):
|
591 |
-
Tuple containing the number of output channels for each block
|
592 |
-
layers_per_block (:obj:`int`, *optional*, defaults to `2`):
|
593 |
-
Number of Resnet layer for each block
|
594 |
-
norm_num_groups (:obj:`int`, *optional*, defaults to `32`):
|
595 |
-
norm num group
|
596 |
-
act_fn (:obj:`str`, *optional*, defaults to `silu`):
|
597 |
-
Activation function
|
598 |
-
double_z (:obj:`bool`, *optional*, defaults to `False`):
|
599 |
-
Whether to double the last output channels
|
600 |
-
dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):
|
601 |
-
parameters `dtype`
|
602 |
-
"""
|
603 |
-
in_channels: int = 3
|
604 |
-
out_channels: int = 3
|
605 |
-
up_block_types: Tuple[str] = ("UpDecoderBlock2D",)
|
606 |
-
block_out_channels: int = (64,)
|
607 |
-
layers_per_block: int = 2
|
608 |
-
norm_num_groups: int = 32
|
609 |
-
act_fn: str = "silu"
|
610 |
-
dtype: jnp.dtype = jnp.float32
|
611 |
-
|
612 |
-
def setup(self):
|
613 |
-
block_out_channels = self.block_out_channels
|
614 |
-
|
615 |
-
# z to block_in
|
616 |
-
self.conv_in = nn.Conv(
|
617 |
-
block_out_channels[-1],
|
618 |
-
kernel_size=(3, 3),
|
619 |
-
strides=(1, 1),
|
620 |
-
padding=((1, 1), (1, 1)),
|
621 |
-
dtype=self.dtype,
|
622 |
-
)
|
623 |
-
|
624 |
-
# middle
|
625 |
-
self.mid_block = FlaxUNetMidBlock2D(
|
626 |
-
in_channels=block_out_channels[-1],
|
627 |
-
resnet_groups=self.norm_num_groups,
|
628 |
-
num_attention_heads=None,
|
629 |
-
dtype=self.dtype,
|
630 |
-
)
|
631 |
-
|
632 |
-
# upsampling
|
633 |
-
reversed_block_out_channels = list(reversed(block_out_channels))
|
634 |
-
output_channel = reversed_block_out_channels[0]
|
635 |
-
up_blocks = []
|
636 |
-
for i, _ in enumerate(self.up_block_types):
|
637 |
-
prev_output_channel = output_channel
|
638 |
-
output_channel = reversed_block_out_channels[i]
|
639 |
-
|
640 |
-
is_final_block = i == len(block_out_channels) - 1
|
641 |
-
|
642 |
-
up_block = FlaxUpDecoderBlock2D(
|
643 |
-
in_channels=prev_output_channel,
|
644 |
-
out_channels=output_channel,
|
645 |
-
num_layers=self.layers_per_block + 1,
|
646 |
-
resnet_groups=self.norm_num_groups,
|
647 |
-
add_upsample=not is_final_block,
|
648 |
-
dtype=self.dtype,
|
649 |
-
)
|
650 |
-
up_blocks.append(up_block)
|
651 |
-
prev_output_channel = output_channel
|
652 |
-
|
653 |
-
self.up_blocks = up_blocks
|
654 |
-
|
655 |
-
# end
|
656 |
-
self.conv_norm_out = nn.GroupNorm(num_groups=self.norm_num_groups, epsilon=1e-6)
|
657 |
-
self.conv_out = nn.Conv(
|
658 |
-
self.out_channels,
|
659 |
-
kernel_size=(3, 3),
|
660 |
-
strides=(1, 1),
|
661 |
-
padding=((1, 1), (1, 1)),
|
662 |
-
dtype=self.dtype,
|
663 |
-
)
|
664 |
-
|
665 |
-
def __call__(self, sample, deterministic: bool = True):
|
666 |
-
# z to block_in
|
667 |
-
sample = self.conv_in(sample)
|
668 |
-
|
669 |
-
# middle
|
670 |
-
sample = self.mid_block(sample, deterministic=deterministic)
|
671 |
-
|
672 |
-
# upsampling
|
673 |
-
for block in self.up_blocks:
|
674 |
-
sample = block(sample, deterministic=deterministic)
|
675 |
-
|
676 |
-
sample = self.conv_norm_out(sample)
|
677 |
-
sample = nn.swish(sample)
|
678 |
-
sample = self.conv_out(sample)
|
679 |
-
|
680 |
-
return sample
|
681 |
-
|
682 |
-
|
683 |
-
class FlaxDiagonalGaussianDistribution(object):
|
684 |
-
def __init__(self, parameters, deterministic=False):
|
685 |
-
# Last axis to account for channels-last
|
686 |
-
self.mean, self.logvar = jnp.split(parameters, 2, axis=-1)
|
687 |
-
self.logvar = jnp.clip(self.logvar, -30.0, 20.0)
|
688 |
-
self.deterministic = deterministic
|
689 |
-
self.std = jnp.exp(0.5 * self.logvar)
|
690 |
-
self.var = jnp.exp(self.logvar)
|
691 |
-
if self.deterministic:
|
692 |
-
self.var = self.std = jnp.zeros_like(self.mean)
|
693 |
-
|
694 |
-
def sample(self, key):
|
695 |
-
return self.mean + self.std * jax.random.normal(key, self.mean.shape)
|
696 |
-
|
697 |
-
def kl(self, other=None):
|
698 |
-
if self.deterministic:
|
699 |
-
return jnp.array([0.0])
|
700 |
-
|
701 |
-
if other is None:
|
702 |
-
return 0.5 * jnp.sum(self.mean**2 + self.var - 1.0 - self.logvar, axis=[1, 2, 3])
|
703 |
-
|
704 |
-
return 0.5 * jnp.sum(
|
705 |
-
jnp.square(self.mean - other.mean) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar,
|
706 |
-
axis=[1, 2, 3],
|
707 |
-
)
|
708 |
-
|
709 |
-
def nll(self, sample, axis=[1, 2, 3]):
|
710 |
-
if self.deterministic:
|
711 |
-
return jnp.array([0.0])
|
712 |
-
|
713 |
-
logtwopi = jnp.log(2.0 * jnp.pi)
|
714 |
-
return 0.5 * jnp.sum(logtwopi + self.logvar + jnp.square(sample - self.mean) / self.var, axis=axis)
|
715 |
-
|
716 |
-
def mode(self):
|
717 |
-
return self.mean
|
718 |
-
|
719 |
-
|
720 |
-
@flax_register_to_config
|
721 |
-
class FlaxAutoencoderKL(nn.Module, FlaxModelMixin, ConfigMixin):
|
722 |
-
r"""
|
723 |
-
Flax implementation of a VAE model with KL loss for decoding latent representations.
|
724 |
-
|
725 |
-
This model inherits from [`FlaxModelMixin`]. Check the superclass documentation for it's generic methods
|
726 |
-
implemented for all models (such as downloading or saving).
|
727 |
-
|
728 |
-
This model is a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module)
|
729 |
-
subclass. Use it as a regular Flax Linen module and refer to the Flax documentation for all matter related to its
|
730 |
-
general usage and behavior.
|
731 |
-
|
732 |
-
Inherent JAX features such as the following are supported:
|
733 |
-
|
734 |
-
- [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
|
735 |
-
- [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
|
736 |
-
- [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
|
737 |
-
- [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
|
738 |
-
|
739 |
-
Parameters:
|
740 |
-
in_channels (`int`, *optional*, defaults to 3):
|
741 |
-
Number of channels in the input image.
|
742 |
-
out_channels (`int`, *optional*, defaults to 3):
|
743 |
-
Number of channels in the output.
|
744 |
-
down_block_types (`Tuple[str]`, *optional*, defaults to `(DownEncoderBlock2D)`):
|
745 |
-
Tuple of downsample block types.
|
746 |
-
up_block_types (`Tuple[str]`, *optional*, defaults to `(UpDecoderBlock2D)`):
|
747 |
-
Tuple of upsample block types.
|
748 |
-
block_out_channels (`Tuple[str]`, *optional*, defaults to `(64,)`):
|
749 |
-
Tuple of block output channels.
|
750 |
-
layers_per_block (`int`, *optional*, defaults to `2`):
|
751 |
-
Number of ResNet layer for each block.
|
752 |
-
act_fn (`str`, *optional*, defaults to `silu`):
|
753 |
-
The activation function to use.
|
754 |
-
latent_channels (`int`, *optional*, defaults to `4`):
|
755 |
-
Number of channels in the latent space.
|
756 |
-
norm_num_groups (`int`, *optional*, defaults to `32`):
|
757 |
-
The number of groups for normalization.
|
758 |
-
sample_size (`int`, *optional*, defaults to 32):
|
759 |
-
Sample input size.
|
760 |
-
scaling_factor (`float`, *optional*, defaults to 0.18215):
|
761 |
-
The component-wise standard deviation of the trained latent space computed using the first batch of the
|
762 |
-
training set. This is used to scale the latent space to have unit variance when training the diffusion
|
763 |
-
model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
|
764 |
-
diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
|
765 |
-
/ scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
|
766 |
-
Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
|
767 |
-
dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`):
|
768 |
-
The `dtype` of the parameters.
|
769 |
-
"""
|
770 |
-
in_channels: int = 3
|
771 |
-
out_channels: int = 3
|
772 |
-
down_block_types: Tuple[str] = ("DownEncoderBlock2D",)
|
773 |
-
up_block_types: Tuple[str] = ("UpDecoderBlock2D",)
|
774 |
-
block_out_channels: Tuple[int] = (64,)
|
775 |
-
layers_per_block: int = 1
|
776 |
-
act_fn: str = "silu"
|
777 |
-
latent_channels: int = 4
|
778 |
-
norm_num_groups: int = 32
|
779 |
-
sample_size: int = 32
|
780 |
-
scaling_factor: float = 0.18215
|
781 |
-
dtype: jnp.dtype = jnp.float32
|
782 |
-
|
783 |
-
def setup(self):
|
784 |
-
self.encoder = FlaxEncoder(
|
785 |
-
in_channels=self.config.in_channels,
|
786 |
-
out_channels=self.config.latent_channels,
|
787 |
-
down_block_types=self.config.down_block_types,
|
788 |
-
block_out_channels=self.config.block_out_channels,
|
789 |
-
layers_per_block=self.config.layers_per_block,
|
790 |
-
act_fn=self.config.act_fn,
|
791 |
-
norm_num_groups=self.config.norm_num_groups,
|
792 |
-
double_z=True,
|
793 |
-
dtype=self.dtype,
|
794 |
-
)
|
795 |
-
self.decoder = FlaxDecoder(
|
796 |
-
in_channels=self.config.latent_channels,
|
797 |
-
out_channels=self.config.out_channels,
|
798 |
-
up_block_types=self.config.up_block_types,
|
799 |
-
block_out_channels=self.config.block_out_channels,
|
800 |
-
layers_per_block=self.config.layers_per_block,
|
801 |
-
norm_num_groups=self.config.norm_num_groups,
|
802 |
-
act_fn=self.config.act_fn,
|
803 |
-
dtype=self.dtype,
|
804 |
-
)
|
805 |
-
self.quant_conv = nn.Conv(
|
806 |
-
2 * self.config.latent_channels,
|
807 |
-
kernel_size=(1, 1),
|
808 |
-
strides=(1, 1),
|
809 |
-
padding="VALID",
|
810 |
-
dtype=self.dtype,
|
811 |
-
)
|
812 |
-
self.post_quant_conv = nn.Conv(
|
813 |
-
self.config.latent_channels,
|
814 |
-
kernel_size=(1, 1),
|
815 |
-
strides=(1, 1),
|
816 |
-
padding="VALID",
|
817 |
-
dtype=self.dtype,
|
818 |
-
)
|
819 |
-
|
820 |
-
def init_weights(self, rng: jax.random.KeyArray) -> FrozenDict:
|
821 |
-
# init input tensors
|
822 |
-
sample_shape = (1, self.in_channels, self.sample_size, self.sample_size)
|
823 |
-
sample = jnp.zeros(sample_shape, dtype=jnp.float32)
|
824 |
-
|
825 |
-
params_rng, dropout_rng, gaussian_rng = jax.random.split(rng, 3)
|
826 |
-
rngs = {"params": params_rng, "dropout": dropout_rng, "gaussian": gaussian_rng}
|
827 |
-
|
828 |
-
return self.init(rngs, sample)["params"]
|
829 |
-
|
830 |
-
def encode(self, sample, deterministic: bool = True, return_dict: bool = True):
|
831 |
-
sample = jnp.transpose(sample, (0, 2, 3, 1))
|
832 |
-
|
833 |
-
hidden_states = self.encoder(sample, deterministic=deterministic)
|
834 |
-
moments = self.quant_conv(hidden_states)
|
835 |
-
posterior = FlaxDiagonalGaussianDistribution(moments)
|
836 |
-
|
837 |
-
if not return_dict:
|
838 |
-
return (posterior,)
|
839 |
-
|
840 |
-
return FlaxAutoencoderKLOutput(latent_dist=posterior)
|
841 |
-
|
842 |
-
def decode(self, latents, deterministic: bool = True, return_dict: bool = True):
|
843 |
-
if latents.shape[-1] != self.config.latent_channels:
|
844 |
-
latents = jnp.transpose(latents, (0, 2, 3, 1))
|
845 |
-
|
846 |
-
hidden_states = self.post_quant_conv(latents)
|
847 |
-
hidden_states = self.decoder(hidden_states, deterministic=deterministic)
|
848 |
-
|
849 |
-
hidden_states = jnp.transpose(hidden_states, (0, 3, 1, 2))
|
850 |
-
|
851 |
-
if not return_dict:
|
852 |
-
return (hidden_states,)
|
853 |
-
|
854 |
-
return FlaxDecoderOutput(sample=hidden_states)
|
855 |
-
|
856 |
-
def __call__(self, sample, sample_posterior=False, deterministic: bool = True, return_dict: bool = True):
|
857 |
-
posterior = self.encode(sample, deterministic=deterministic, return_dict=return_dict)
|
858 |
-
if sample_posterior:
|
859 |
-
rng = self.make_rng("gaussian")
|
860 |
-
hidden_states = posterior.latent_dist.sample(rng)
|
861 |
-
else:
|
862 |
-
hidden_states = posterior.latent_dist.mode()
|
863 |
-
|
864 |
-
sample = self.decode(hidden_states, return_dict=return_dict).sample
|
865 |
-
|
866 |
-
if not return_dict:
|
867 |
-
return (sample,)
|
868 |
-
|
869 |
-
return FlaxDecoderOutput(sample=sample)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py
DELETED
@@ -1,561 +0,0 @@
|
|
1 |
-
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
import inspect
|
16 |
-
from typing import Callable, List, Optional, Union
|
17 |
-
|
18 |
-
import numpy as np
|
19 |
-
import PIL
|
20 |
-
import torch
|
21 |
-
from transformers import CLIPImageProcessor, CLIPTokenizer
|
22 |
-
|
23 |
-
from ...configuration_utils import FrozenDict
|
24 |
-
from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
|
25 |
-
from ...utils import PIL_INTERPOLATION, deprecate, logging
|
26 |
-
from ..onnx_utils import ORT_TO_NP_TYPE, OnnxRuntimeModel
|
27 |
-
from ..pipeline_utils import DiffusionPipeline
|
28 |
-
from . import StableDiffusionPipelineOutput
|
29 |
-
|
30 |
-
|
31 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
32 |
-
|
33 |
-
|
34 |
-
NUM_UNET_INPUT_CHANNELS = 9
|
35 |
-
NUM_LATENT_CHANNELS = 4
|
36 |
-
|
37 |
-
|
38 |
-
def prepare_mask_and_masked_image(image, mask, latents_shape):
|
39 |
-
image = np.array(image.convert("RGB").resize((latents_shape[1] * 8, latents_shape[0] * 8)))
|
40 |
-
image = image[None].transpose(0, 3, 1, 2)
|
41 |
-
image = image.astype(np.float32) / 127.5 - 1.0
|
42 |
-
|
43 |
-
image_mask = np.array(mask.convert("L").resize((latents_shape[1] * 8, latents_shape[0] * 8)))
|
44 |
-
masked_image = image * (image_mask < 127.5)
|
45 |
-
|
46 |
-
mask = mask.resize((latents_shape[1], latents_shape[0]), PIL_INTERPOLATION["nearest"])
|
47 |
-
mask = np.array(mask.convert("L"))
|
48 |
-
mask = mask.astype(np.float32) / 255.0
|
49 |
-
mask = mask[None, None]
|
50 |
-
mask[mask < 0.5] = 0
|
51 |
-
mask[mask >= 0.5] = 1
|
52 |
-
|
53 |
-
return mask, masked_image
|
54 |
-
|
55 |
-
|
56 |
-
class OnnxStableDiffusionInpaintPipeline(DiffusionPipeline):
|
57 |
-
r"""
|
58 |
-
Pipeline for text-guided image inpainting using Stable Diffusion. *This is an experimental feature*.
|
59 |
-
|
60 |
-
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
61 |
-
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
62 |
-
|
63 |
-
Args:
|
64 |
-
vae ([`AutoencoderKL`]):
|
65 |
-
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
66 |
-
text_encoder ([`CLIPTextModel`]):
|
67 |
-
Frozen text-encoder. Stable Diffusion uses the text portion of
|
68 |
-
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
69 |
-
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
70 |
-
tokenizer (`CLIPTokenizer`):
|
71 |
-
Tokenizer of class
|
72 |
-
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
73 |
-
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
74 |
-
scheduler ([`SchedulerMixin`]):
|
75 |
-
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
76 |
-
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
77 |
-
safety_checker ([`StableDiffusionSafetyChecker`]):
|
78 |
-
Classification module that estimates whether generated images could be considered offensive or harmful.
|
79 |
-
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
|
80 |
-
feature_extractor ([`CLIPImageProcessor`]):
|
81 |
-
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
82 |
-
"""
|
83 |
-
vae_encoder: OnnxRuntimeModel
|
84 |
-
vae_decoder: OnnxRuntimeModel
|
85 |
-
text_encoder: OnnxRuntimeModel
|
86 |
-
tokenizer: CLIPTokenizer
|
87 |
-
unet: OnnxRuntimeModel
|
88 |
-
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler]
|
89 |
-
safety_checker: OnnxRuntimeModel
|
90 |
-
feature_extractor: CLIPImageProcessor
|
91 |
-
|
92 |
-
_optional_components = ["safety_checker", "feature_extractor"]
|
93 |
-
_is_onnx = True
|
94 |
-
|
95 |
-
def __init__(
|
96 |
-
self,
|
97 |
-
vae_encoder: OnnxRuntimeModel,
|
98 |
-
vae_decoder: OnnxRuntimeModel,
|
99 |
-
text_encoder: OnnxRuntimeModel,
|
100 |
-
tokenizer: CLIPTokenizer,
|
101 |
-
unet: OnnxRuntimeModel,
|
102 |
-
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
|
103 |
-
safety_checker: OnnxRuntimeModel,
|
104 |
-
feature_extractor: CLIPImageProcessor,
|
105 |
-
requires_safety_checker: bool = True,
|
106 |
-
):
|
107 |
-
super().__init__()
|
108 |
-
logger.info("`OnnxStableDiffusionInpaintPipeline` is experimental and will very likely change in the future.")
|
109 |
-
|
110 |
-
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
|
111 |
-
deprecation_message = (
|
112 |
-
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
113 |
-
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
114 |
-
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
115 |
-
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
116 |
-
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
117 |
-
" file"
|
118 |
-
)
|
119 |
-
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
120 |
-
new_config = dict(scheduler.config)
|
121 |
-
new_config["steps_offset"] = 1
|
122 |
-
scheduler._internal_dict = FrozenDict(new_config)
|
123 |
-
|
124 |
-
if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
|
125 |
-
deprecation_message = (
|
126 |
-
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
|
127 |
-
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
|
128 |
-
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
|
129 |
-
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
|
130 |
-
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
|
131 |
-
)
|
132 |
-
deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
|
133 |
-
new_config = dict(scheduler.config)
|
134 |
-
new_config["clip_sample"] = False
|
135 |
-
scheduler._internal_dict = FrozenDict(new_config)
|
136 |
-
|
137 |
-
if safety_checker is None and requires_safety_checker:
|
138 |
-
logger.warning(
|
139 |
-
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
140 |
-
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
141 |
-
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
142 |
-
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
143 |
-
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
144 |
-
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
145 |
-
)
|
146 |
-
|
147 |
-
if safety_checker is not None and feature_extractor is None:
|
148 |
-
raise ValueError(
|
149 |
-
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
150 |
-
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
151 |
-
)
|
152 |
-
|
153 |
-
self.register_modules(
|
154 |
-
vae_encoder=vae_encoder,
|
155 |
-
vae_decoder=vae_decoder,
|
156 |
-
text_encoder=text_encoder,
|
157 |
-
tokenizer=tokenizer,
|
158 |
-
unet=unet,
|
159 |
-
scheduler=scheduler,
|
160 |
-
safety_checker=safety_checker,
|
161 |
-
feature_extractor=feature_extractor,
|
162 |
-
)
|
163 |
-
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
164 |
-
|
165 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_onnx_stable_diffusion.OnnxStableDiffusionPipeline._encode_prompt
|
166 |
-
def _encode_prompt(
|
167 |
-
self,
|
168 |
-
prompt: Union[str, List[str]],
|
169 |
-
num_images_per_prompt: Optional[int],
|
170 |
-
do_classifier_free_guidance: bool,
|
171 |
-
negative_prompt: Optional[str],
|
172 |
-
prompt_embeds: Optional[np.ndarray] = None,
|
173 |
-
negative_prompt_embeds: Optional[np.ndarray] = None,
|
174 |
-
):
|
175 |
-
r"""
|
176 |
-
Encodes the prompt into text encoder hidden states.
|
177 |
-
|
178 |
-
Args:
|
179 |
-
prompt (`str` or `List[str]`):
|
180 |
-
prompt to be encoded
|
181 |
-
num_images_per_prompt (`int`):
|
182 |
-
number of images that should be generated per prompt
|
183 |
-
do_classifier_free_guidance (`bool`):
|
184 |
-
whether to use classifier free guidance or not
|
185 |
-
negative_prompt (`str` or `List[str]`):
|
186 |
-
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
187 |
-
if `guidance_scale` is less than `1`).
|
188 |
-
prompt_embeds (`np.ndarray`, *optional*):
|
189 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
190 |
-
provided, text embeddings will be generated from `prompt` input argument.
|
191 |
-
negative_prompt_embeds (`np.ndarray`, *optional*):
|
192 |
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
193 |
-
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
194 |
-
argument.
|
195 |
-
"""
|
196 |
-
if prompt is not None and isinstance(prompt, str):
|
197 |
-
batch_size = 1
|
198 |
-
elif prompt is not None and isinstance(prompt, list):
|
199 |
-
batch_size = len(prompt)
|
200 |
-
else:
|
201 |
-
batch_size = prompt_embeds.shape[0]
|
202 |
-
|
203 |
-
if prompt_embeds is None:
|
204 |
-
# get prompt text embeddings
|
205 |
-
text_inputs = self.tokenizer(
|
206 |
-
prompt,
|
207 |
-
padding="max_length",
|
208 |
-
max_length=self.tokenizer.model_max_length,
|
209 |
-
truncation=True,
|
210 |
-
return_tensors="np",
|
211 |
-
)
|
212 |
-
text_input_ids = text_inputs.input_ids
|
213 |
-
untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids
|
214 |
-
|
215 |
-
if not np.array_equal(text_input_ids, untruncated_ids):
|
216 |
-
removed_text = self.tokenizer.batch_decode(
|
217 |
-
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
218 |
-
)
|
219 |
-
logger.warning(
|
220 |
-
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
221 |
-
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
222 |
-
)
|
223 |
-
|
224 |
-
prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0]
|
225 |
-
|
226 |
-
prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0)
|
227 |
-
|
228 |
-
# get unconditional embeddings for classifier free guidance
|
229 |
-
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
230 |
-
uncond_tokens: List[str]
|
231 |
-
if negative_prompt is None:
|
232 |
-
uncond_tokens = [""] * batch_size
|
233 |
-
elif type(prompt) is not type(negative_prompt):
|
234 |
-
raise TypeError(
|
235 |
-
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
236 |
-
f" {type(prompt)}."
|
237 |
-
)
|
238 |
-
elif isinstance(negative_prompt, str):
|
239 |
-
uncond_tokens = [negative_prompt] * batch_size
|
240 |
-
elif batch_size != len(negative_prompt):
|
241 |
-
raise ValueError(
|
242 |
-
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
243 |
-
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
244 |
-
" the batch size of `prompt`."
|
245 |
-
)
|
246 |
-
else:
|
247 |
-
uncond_tokens = negative_prompt
|
248 |
-
|
249 |
-
max_length = prompt_embeds.shape[1]
|
250 |
-
uncond_input = self.tokenizer(
|
251 |
-
uncond_tokens,
|
252 |
-
padding="max_length",
|
253 |
-
max_length=max_length,
|
254 |
-
truncation=True,
|
255 |
-
return_tensors="np",
|
256 |
-
)
|
257 |
-
negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0]
|
258 |
-
|
259 |
-
if do_classifier_free_guidance:
|
260 |
-
negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0)
|
261 |
-
|
262 |
-
# For classifier free guidance, we need to do two forward passes.
|
263 |
-
# Here we concatenate the unconditional and text embeddings into a single batch
|
264 |
-
# to avoid doing two forward passes
|
265 |
-
prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds])
|
266 |
-
|
267 |
-
return prompt_embeds
|
268 |
-
|
269 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_onnx_stable_diffusion.OnnxStableDiffusionPipeline.check_inputs
|
270 |
-
def check_inputs(
|
271 |
-
self,
|
272 |
-
prompt: Union[str, List[str]],
|
273 |
-
height: Optional[int],
|
274 |
-
width: Optional[int],
|
275 |
-
callback_steps: int,
|
276 |
-
negative_prompt: Optional[str] = None,
|
277 |
-
prompt_embeds: Optional[np.ndarray] = None,
|
278 |
-
negative_prompt_embeds: Optional[np.ndarray] = None,
|
279 |
-
):
|
280 |
-
if height % 8 != 0 or width % 8 != 0:
|
281 |
-
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
282 |
-
|
283 |
-
if (callback_steps is None) or (
|
284 |
-
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
285 |
-
):
|
286 |
-
raise ValueError(
|
287 |
-
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
288 |
-
f" {type(callback_steps)}."
|
289 |
-
)
|
290 |
-
|
291 |
-
if prompt is not None and prompt_embeds is not None:
|
292 |
-
raise ValueError(
|
293 |
-
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
294 |
-
" only forward one of the two."
|
295 |
-
)
|
296 |
-
elif prompt is None and prompt_embeds is None:
|
297 |
-
raise ValueError(
|
298 |
-
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
299 |
-
)
|
300 |
-
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
301 |
-
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
302 |
-
|
303 |
-
if negative_prompt is not None and negative_prompt_embeds is not None:
|
304 |
-
raise ValueError(
|
305 |
-
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
306 |
-
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
307 |
-
)
|
308 |
-
|
309 |
-
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
310 |
-
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
311 |
-
raise ValueError(
|
312 |
-
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
313 |
-
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
314 |
-
f" {negative_prompt_embeds.shape}."
|
315 |
-
)
|
316 |
-
|
317 |
-
@torch.no_grad()
|
318 |
-
def __call__(
|
319 |
-
self,
|
320 |
-
prompt: Union[str, List[str]],
|
321 |
-
image: PIL.Image.Image,
|
322 |
-
mask_image: PIL.Image.Image,
|
323 |
-
height: Optional[int] = 512,
|
324 |
-
width: Optional[int] = 512,
|
325 |
-
num_inference_steps: int = 50,
|
326 |
-
guidance_scale: float = 7.5,
|
327 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
328 |
-
num_images_per_prompt: Optional[int] = 1,
|
329 |
-
eta: float = 0.0,
|
330 |
-
generator: Optional[np.random.RandomState] = None,
|
331 |
-
latents: Optional[np.ndarray] = None,
|
332 |
-
prompt_embeds: Optional[np.ndarray] = None,
|
333 |
-
negative_prompt_embeds: Optional[np.ndarray] = None,
|
334 |
-
output_type: Optional[str] = "pil",
|
335 |
-
return_dict: bool = True,
|
336 |
-
callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
|
337 |
-
callback_steps: int = 1,
|
338 |
-
):
|
339 |
-
r"""
|
340 |
-
Function invoked when calling the pipeline for generation.
|
341 |
-
|
342 |
-
Args:
|
343 |
-
prompt (`str` or `List[str]`):
|
344 |
-
The prompt or prompts to guide the image generation.
|
345 |
-
image (`PIL.Image.Image`):
|
346 |
-
`Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
|
347 |
-
be masked out with `mask_image` and repainted according to `prompt`.
|
348 |
-
mask_image (`PIL.Image.Image`):
|
349 |
-
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
|
350 |
-
repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
|
351 |
-
to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
|
352 |
-
instead of 3, so the expected shape would be `(B, H, W, 1)`.
|
353 |
-
height (`int`, *optional*, defaults to 512):
|
354 |
-
The height in pixels of the generated image.
|
355 |
-
width (`int`, *optional*, defaults to 512):
|
356 |
-
The width in pixels of the generated image.
|
357 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
358 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
359 |
-
expense of slower inference.
|
360 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
361 |
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
362 |
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
363 |
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
364 |
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
365 |
-
usually at the expense of lower image quality.
|
366 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
367 |
-
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
368 |
-
if `guidance_scale` is less than `1`).
|
369 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
370 |
-
The number of images to generate per prompt.
|
371 |
-
eta (`float`, *optional*, defaults to 0.0):
|
372 |
-
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
373 |
-
[`schedulers.DDIMScheduler`], will be ignored for others.
|
374 |
-
generator (`np.random.RandomState`, *optional*):
|
375 |
-
A np.random.RandomState to make generation deterministic.
|
376 |
-
latents (`np.ndarray`, *optional*):
|
377 |
-
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
378 |
-
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
379 |
-
tensor will ge generated by sampling using the supplied random `generator`.
|
380 |
-
prompt_embeds (`np.ndarray`, *optional*):
|
381 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
382 |
-
provided, text embeddings will be generated from `prompt` input argument.
|
383 |
-
negative_prompt_embeds (`np.ndarray`, *optional*):
|
384 |
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
385 |
-
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
386 |
-
argument.
|
387 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
388 |
-
The output format of the generate image. Choose between
|
389 |
-
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
390 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
391 |
-
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
392 |
-
plain tuple.
|
393 |
-
callback (`Callable`, *optional*):
|
394 |
-
A function that will be called every `callback_steps` steps during inference. The function will be
|
395 |
-
called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
|
396 |
-
callback_steps (`int`, *optional*, defaults to 1):
|
397 |
-
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
398 |
-
called at every step.
|
399 |
-
|
400 |
-
Returns:
|
401 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
402 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
403 |
-
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
404 |
-
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
405 |
-
(nsfw) content, according to the `safety_checker`.
|
406 |
-
"""
|
407 |
-
|
408 |
-
# check inputs. Raise error if not correct
|
409 |
-
self.check_inputs(
|
410 |
-
prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
|
411 |
-
)
|
412 |
-
|
413 |
-
# define call parameters
|
414 |
-
if prompt is not None and isinstance(prompt, str):
|
415 |
-
batch_size = 1
|
416 |
-
elif prompt is not None and isinstance(prompt, list):
|
417 |
-
batch_size = len(prompt)
|
418 |
-
else:
|
419 |
-
batch_size = prompt_embeds.shape[0]
|
420 |
-
|
421 |
-
if generator is None:
|
422 |
-
generator = np.random
|
423 |
-
|
424 |
-
# set timesteps
|
425 |
-
self.scheduler.set_timesteps(num_inference_steps)
|
426 |
-
|
427 |
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
428 |
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
429 |
-
# corresponds to doing no classifier free guidance.
|
430 |
-
do_classifier_free_guidance = guidance_scale > 1.0
|
431 |
-
|
432 |
-
prompt_embeds = self._encode_prompt(
|
433 |
-
prompt,
|
434 |
-
num_images_per_prompt,
|
435 |
-
do_classifier_free_guidance,
|
436 |
-
negative_prompt,
|
437 |
-
prompt_embeds=prompt_embeds,
|
438 |
-
negative_prompt_embeds=negative_prompt_embeds,
|
439 |
-
)
|
440 |
-
|
441 |
-
num_channels_latents = NUM_LATENT_CHANNELS
|
442 |
-
latents_shape = (batch_size * num_images_per_prompt, num_channels_latents, height // 8, width // 8)
|
443 |
-
latents_dtype = prompt_embeds.dtype
|
444 |
-
if latents is None:
|
445 |
-
latents = generator.randn(*latents_shape).astype(latents_dtype)
|
446 |
-
else:
|
447 |
-
if latents.shape != latents_shape:
|
448 |
-
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
|
449 |
-
|
450 |
-
# prepare mask and masked_image
|
451 |
-
mask, masked_image = prepare_mask_and_masked_image(image, mask_image, latents_shape[-2:])
|
452 |
-
mask = mask.astype(latents.dtype)
|
453 |
-
masked_image = masked_image.astype(latents.dtype)
|
454 |
-
|
455 |
-
masked_image_latents = self.vae_encoder(sample=masked_image)[0]
|
456 |
-
masked_image_latents = 0.18215 * masked_image_latents
|
457 |
-
|
458 |
-
# duplicate mask and masked_image_latents for each generation per prompt
|
459 |
-
mask = mask.repeat(batch_size * num_images_per_prompt, 0)
|
460 |
-
masked_image_latents = masked_image_latents.repeat(batch_size * num_images_per_prompt, 0)
|
461 |
-
|
462 |
-
mask = np.concatenate([mask] * 2) if do_classifier_free_guidance else mask
|
463 |
-
masked_image_latents = (
|
464 |
-
np.concatenate([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
|
465 |
-
)
|
466 |
-
|
467 |
-
num_channels_mask = mask.shape[1]
|
468 |
-
num_channels_masked_image = masked_image_latents.shape[1]
|
469 |
-
|
470 |
-
unet_input_channels = NUM_UNET_INPUT_CHANNELS
|
471 |
-
if num_channels_latents + num_channels_mask + num_channels_masked_image != unet_input_channels:
|
472 |
-
raise ValueError(
|
473 |
-
"Incorrect configuration settings! The config of `pipeline.unet` expects"
|
474 |
-
f" {unet_input_channels} but received `num_channels_latents`: {num_channels_latents} +"
|
475 |
-
f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
|
476 |
-
f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
|
477 |
-
" `pipeline.unet` or your `mask_image` or `image` input."
|
478 |
-
)
|
479 |
-
|
480 |
-
# set timesteps
|
481 |
-
self.scheduler.set_timesteps(num_inference_steps)
|
482 |
-
|
483 |
-
# scale the initial noise by the standard deviation required by the scheduler
|
484 |
-
latents = latents * np.float64(self.scheduler.init_noise_sigma)
|
485 |
-
|
486 |
-
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
487 |
-
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
488 |
-
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
489 |
-
# and should be between [0, 1]
|
490 |
-
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
491 |
-
extra_step_kwargs = {}
|
492 |
-
if accepts_eta:
|
493 |
-
extra_step_kwargs["eta"] = eta
|
494 |
-
|
495 |
-
timestep_dtype = next(
|
496 |
-
(input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)"
|
497 |
-
)
|
498 |
-
timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype]
|
499 |
-
|
500 |
-
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
|
501 |
-
# expand the latents if we are doing classifier free guidance
|
502 |
-
latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents
|
503 |
-
# concat latents, mask, masked_image_latnets in the channel dimension
|
504 |
-
latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t)
|
505 |
-
latent_model_input = latent_model_input.cpu().numpy()
|
506 |
-
latent_model_input = np.concatenate([latent_model_input, mask, masked_image_latents], axis=1)
|
507 |
-
|
508 |
-
# predict the noise residual
|
509 |
-
timestep = np.array([t], dtype=timestep_dtype)
|
510 |
-
noise_pred = self.unet(sample=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds)[
|
511 |
-
0
|
512 |
-
]
|
513 |
-
|
514 |
-
# perform guidance
|
515 |
-
if do_classifier_free_guidance:
|
516 |
-
noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2)
|
517 |
-
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
518 |
-
|
519 |
-
# compute the previous noisy sample x_t -> x_t-1
|
520 |
-
scheduler_output = self.scheduler.step(
|
521 |
-
torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs
|
522 |
-
)
|
523 |
-
latents = scheduler_output.prev_sample.numpy()
|
524 |
-
|
525 |
-
# call the callback, if provided
|
526 |
-
if callback is not None and i % callback_steps == 0:
|
527 |
-
callback(i, t, latents)
|
528 |
-
|
529 |
-
latents = 1 / 0.18215 * latents
|
530 |
-
# image = self.vae_decoder(latent_sample=latents)[0]
|
531 |
-
# it seems likes there is a strange result for using half-precision vae decoder if batchsize>1
|
532 |
-
image = np.concatenate(
|
533 |
-
[self.vae_decoder(latent_sample=latents[i : i + 1])[0] for i in range(latents.shape[0])]
|
534 |
-
)
|
535 |
-
|
536 |
-
image = np.clip(image / 2 + 0.5, 0, 1)
|
537 |
-
image = image.transpose((0, 2, 3, 1))
|
538 |
-
|
539 |
-
if self.safety_checker is not None:
|
540 |
-
safety_checker_input = self.feature_extractor(
|
541 |
-
self.numpy_to_pil(image), return_tensors="np"
|
542 |
-
).pixel_values.astype(image.dtype)
|
543 |
-
# safety_checker does not support batched inputs yet
|
544 |
-
images, has_nsfw_concept = [], []
|
545 |
-
for i in range(image.shape[0]):
|
546 |
-
image_i, has_nsfw_concept_i = self.safety_checker(
|
547 |
-
clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1]
|
548 |
-
)
|
549 |
-
images.append(image_i)
|
550 |
-
has_nsfw_concept.append(has_nsfw_concept_i[0])
|
551 |
-
image = np.concatenate(images)
|
552 |
-
else:
|
553 |
-
has_nsfw_concept = None
|
554 |
-
|
555 |
-
if output_type == "pil":
|
556 |
-
image = self.numpy_to_pil(image)
|
557 |
-
|
558 |
-
if not return_dict:
|
559 |
-
return (image, has_nsfw_concept)
|
560 |
-
|
561 |
-
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/deepfloyd_if/test_if_inpainting.py
DELETED
@@ -1,88 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import random
|
17 |
-
import unittest
|
18 |
-
|
19 |
-
import torch
|
20 |
-
|
21 |
-
from diffusers import IFInpaintingPipeline
|
22 |
-
from diffusers.utils import floats_tensor
|
23 |
-
from diffusers.utils.import_utils import is_xformers_available
|
24 |
-
from diffusers.utils.testing_utils import skip_mps, torch_device
|
25 |
-
|
26 |
-
from ..pipeline_params import (
|
27 |
-
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
|
28 |
-
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
|
29 |
-
)
|
30 |
-
from ..test_pipelines_common import PipelineTesterMixin
|
31 |
-
from . import IFPipelineTesterMixin
|
32 |
-
|
33 |
-
|
34 |
-
@skip_mps
|
35 |
-
class IFInpaintingPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase):
|
36 |
-
pipeline_class = IFInpaintingPipeline
|
37 |
-
params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
|
38 |
-
batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
|
39 |
-
required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"}
|
40 |
-
|
41 |
-
def get_dummy_components(self):
|
42 |
-
return self._get_dummy_components()
|
43 |
-
|
44 |
-
def get_dummy_inputs(self, device, seed=0):
|
45 |
-
if str(device).startswith("mps"):
|
46 |
-
generator = torch.manual_seed(seed)
|
47 |
-
else:
|
48 |
-
generator = torch.Generator(device=device).manual_seed(seed)
|
49 |
-
|
50 |
-
image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
|
51 |
-
mask_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
|
52 |
-
|
53 |
-
inputs = {
|
54 |
-
"prompt": "A painting of a squirrel eating a burger",
|
55 |
-
"image": image,
|
56 |
-
"mask_image": mask_image,
|
57 |
-
"generator": generator,
|
58 |
-
"num_inference_steps": 2,
|
59 |
-
"output_type": "numpy",
|
60 |
-
}
|
61 |
-
|
62 |
-
return inputs
|
63 |
-
|
64 |
-
@unittest.skipIf(
|
65 |
-
torch_device != "cuda" or not is_xformers_available(),
|
66 |
-
reason="XFormers attention is only available with CUDA and `xformers` installed",
|
67 |
-
)
|
68 |
-
def test_xformers_attention_forwardGenerator_pass(self):
|
69 |
-
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
|
70 |
-
|
71 |
-
def test_save_load_optional_components(self):
|
72 |
-
self._test_save_load_optional_components()
|
73 |
-
|
74 |
-
@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA")
|
75 |
-
def test_save_load_float16(self):
|
76 |
-
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
|
77 |
-
super().test_save_load_float16(expected_max_diff=1e-1)
|
78 |
-
|
79 |
-
def test_attention_slicing_forward_pass(self):
|
80 |
-
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
|
81 |
-
|
82 |
-
def test_save_load_local(self):
|
83 |
-
self._test_save_load_local()
|
84 |
-
|
85 |
-
def test_inference_batch_single_identical(self):
|
86 |
-
self._test_inference_batch_single_identical(
|
87 |
-
expected_max_diff=1e-2,
|
88 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/utils/overwrite_expected_slice.py
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 The HuggingFace Inc. team.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
import argparse
|
16 |
-
from collections import defaultdict
|
17 |
-
|
18 |
-
|
19 |
-
def overwrite_file(file, class_name, test_name, correct_line, done_test):
|
20 |
-
_id = f"{file}_{class_name}_{test_name}"
|
21 |
-
done_test[_id] += 1
|
22 |
-
|
23 |
-
with open(file, "r") as f:
|
24 |
-
lines = f.readlines()
|
25 |
-
|
26 |
-
class_regex = f"class {class_name}("
|
27 |
-
test_regex = f"{4 * ' '}def {test_name}("
|
28 |
-
line_begin_regex = f"{8 * ' '}{correct_line.split()[0]}"
|
29 |
-
another_line_begin_regex = f"{16 * ' '}{correct_line.split()[0]}"
|
30 |
-
in_class = False
|
31 |
-
in_func = False
|
32 |
-
in_line = False
|
33 |
-
insert_line = False
|
34 |
-
count = 0
|
35 |
-
spaces = 0
|
36 |
-
|
37 |
-
new_lines = []
|
38 |
-
for line in lines:
|
39 |
-
if line.startswith(class_regex):
|
40 |
-
in_class = True
|
41 |
-
elif in_class and line.startswith(test_regex):
|
42 |
-
in_func = True
|
43 |
-
elif in_class and in_func and (line.startswith(line_begin_regex) or line.startswith(another_line_begin_regex)):
|
44 |
-
spaces = len(line.split(correct_line.split()[0])[0])
|
45 |
-
count += 1
|
46 |
-
|
47 |
-
if count == done_test[_id]:
|
48 |
-
in_line = True
|
49 |
-
|
50 |
-
if in_class and in_func and in_line:
|
51 |
-
if ")" not in line:
|
52 |
-
continue
|
53 |
-
else:
|
54 |
-
insert_line = True
|
55 |
-
|
56 |
-
if in_class and in_func and in_line and insert_line:
|
57 |
-
new_lines.append(f"{spaces * ' '}{correct_line}")
|
58 |
-
in_class = in_func = in_line = insert_line = False
|
59 |
-
else:
|
60 |
-
new_lines.append(line)
|
61 |
-
|
62 |
-
with open(file, "w") as f:
|
63 |
-
for line in new_lines:
|
64 |
-
f.write(line)
|
65 |
-
|
66 |
-
|
67 |
-
def main(correct, fail=None):
|
68 |
-
if fail is not None:
|
69 |
-
with open(fail, "r") as f:
|
70 |
-
test_failures = {l.strip() for l in f.readlines()}
|
71 |
-
else:
|
72 |
-
test_failures = None
|
73 |
-
|
74 |
-
with open(correct, "r") as f:
|
75 |
-
correct_lines = f.readlines()
|
76 |
-
|
77 |
-
done_tests = defaultdict(int)
|
78 |
-
for line in correct_lines:
|
79 |
-
file, class_name, test_name, correct_line = line.split(";")
|
80 |
-
if test_failures is None or "::".join([file, class_name, test_name]) in test_failures:
|
81 |
-
overwrite_file(file, class_name, test_name, correct_line, done_tests)
|
82 |
-
|
83 |
-
|
84 |
-
if __name__ == "__main__":
|
85 |
-
parser = argparse.ArgumentParser()
|
86 |
-
parser.add_argument("--correct_filename", help="filename of tests with expected result")
|
87 |
-
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
|
88 |
-
args = parser.parse_args()
|
89 |
-
|
90 |
-
main(args.correct_filename, args.fail_filename)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/base.py
DELETED
@@ -1,166 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
import numbers
|
3 |
-
from abc import ABCMeta, abstractmethod
|
4 |
-
|
5 |
-
import numpy as np
|
6 |
-
import torch
|
7 |
-
|
8 |
-
from ..hook import Hook
|
9 |
-
|
10 |
-
|
11 |
-
class LoggerHook(Hook):
|
12 |
-
"""Base class for logger hooks.
|
13 |
-
|
14 |
-
Args:
|
15 |
-
interval (int): Logging interval (every k iterations).
|
16 |
-
ignore_last (bool): Ignore the log of last iterations in each epoch
|
17 |
-
if less than `interval`.
|
18 |
-
reset_flag (bool): Whether to clear the output buffer after logging.
|
19 |
-
by_epoch (bool): Whether EpochBasedRunner is used.
|
20 |
-
"""
|
21 |
-
|
22 |
-
__metaclass__ = ABCMeta
|
23 |
-
|
24 |
-
def __init__(self,
|
25 |
-
interval=10,
|
26 |
-
ignore_last=True,
|
27 |
-
reset_flag=False,
|
28 |
-
by_epoch=True):
|
29 |
-
self.interval = interval
|
30 |
-
self.ignore_last = ignore_last
|
31 |
-
self.reset_flag = reset_flag
|
32 |
-
self.by_epoch = by_epoch
|
33 |
-
|
34 |
-
@abstractmethod
|
35 |
-
def log(self, runner):
|
36 |
-
pass
|
37 |
-
|
38 |
-
@staticmethod
|
39 |
-
def is_scalar(val, include_np=True, include_torch=True):
|
40 |
-
"""Tell the input variable is a scalar or not.
|
41 |
-
|
42 |
-
Args:
|
43 |
-
val: Input variable.
|
44 |
-
include_np (bool): Whether include 0-d np.ndarray as a scalar.
|
45 |
-
include_torch (bool): Whether include 0-d torch.Tensor as a scalar.
|
46 |
-
|
47 |
-
Returns:
|
48 |
-
bool: True or False.
|
49 |
-
"""
|
50 |
-
if isinstance(val, numbers.Number):
|
51 |
-
return True
|
52 |
-
elif include_np and isinstance(val, np.ndarray) and val.ndim == 0:
|
53 |
-
return True
|
54 |
-
elif include_torch and isinstance(val, torch.Tensor) and len(val) == 1:
|
55 |
-
return True
|
56 |
-
else:
|
57 |
-
return False
|
58 |
-
|
59 |
-
def get_mode(self, runner):
|
60 |
-
if runner.mode == 'train':
|
61 |
-
if 'time' in runner.log_buffer.output:
|
62 |
-
mode = 'train'
|
63 |
-
else:
|
64 |
-
mode = 'val'
|
65 |
-
elif runner.mode == 'val':
|
66 |
-
mode = 'val'
|
67 |
-
else:
|
68 |
-
raise ValueError(f"runner mode should be 'train' or 'val', "
|
69 |
-
f'but got {runner.mode}')
|
70 |
-
return mode
|
71 |
-
|
72 |
-
def get_epoch(self, runner):
|
73 |
-
if runner.mode == 'train':
|
74 |
-
epoch = runner.epoch + 1
|
75 |
-
elif runner.mode == 'val':
|
76 |
-
# normal val mode
|
77 |
-
# runner.epoch += 1 has been done before val workflow
|
78 |
-
epoch = runner.epoch
|
79 |
-
else:
|
80 |
-
raise ValueError(f"runner mode should be 'train' or 'val', "
|
81 |
-
f'but got {runner.mode}')
|
82 |
-
return epoch
|
83 |
-
|
84 |
-
def get_iter(self, runner, inner_iter=False):
|
85 |
-
"""Get the current training iteration step."""
|
86 |
-
if self.by_epoch and inner_iter:
|
87 |
-
current_iter = runner.inner_iter + 1
|
88 |
-
else:
|
89 |
-
current_iter = runner.iter + 1
|
90 |
-
return current_iter
|
91 |
-
|
92 |
-
def get_lr_tags(self, runner):
|
93 |
-
tags = {}
|
94 |
-
lrs = runner.current_lr()
|
95 |
-
if isinstance(lrs, dict):
|
96 |
-
for name, value in lrs.items():
|
97 |
-
tags[f'learning_rate/{name}'] = value[0]
|
98 |
-
else:
|
99 |
-
tags['learning_rate'] = lrs[0]
|
100 |
-
return tags
|
101 |
-
|
102 |
-
def get_momentum_tags(self, runner):
|
103 |
-
tags = {}
|
104 |
-
momentums = runner.current_momentum()
|
105 |
-
if isinstance(momentums, dict):
|
106 |
-
for name, value in momentums.items():
|
107 |
-
tags[f'momentum/{name}'] = value[0]
|
108 |
-
else:
|
109 |
-
tags['momentum'] = momentums[0]
|
110 |
-
return tags
|
111 |
-
|
112 |
-
def get_loggable_tags(self,
|
113 |
-
runner,
|
114 |
-
allow_scalar=True,
|
115 |
-
allow_text=False,
|
116 |
-
add_mode=True,
|
117 |
-
tags_to_skip=('time', 'data_time')):
|
118 |
-
tags = {}
|
119 |
-
for var, val in runner.log_buffer.output.items():
|
120 |
-
if var in tags_to_skip:
|
121 |
-
continue
|
122 |
-
if self.is_scalar(val) and not allow_scalar:
|
123 |
-
continue
|
124 |
-
if isinstance(val, str) and not allow_text:
|
125 |
-
continue
|
126 |
-
if add_mode:
|
127 |
-
var = f'{self.get_mode(runner)}/{var}'
|
128 |
-
tags[var] = val
|
129 |
-
tags.update(self.get_lr_tags(runner))
|
130 |
-
tags.update(self.get_momentum_tags(runner))
|
131 |
-
return tags
|
132 |
-
|
133 |
-
def before_run(self, runner):
|
134 |
-
for hook in runner.hooks[::-1]:
|
135 |
-
if isinstance(hook, LoggerHook):
|
136 |
-
hook.reset_flag = True
|
137 |
-
break
|
138 |
-
|
139 |
-
def before_epoch(self, runner):
|
140 |
-
runner.log_buffer.clear() # clear logs of last epoch
|
141 |
-
|
142 |
-
def after_train_iter(self, runner):
|
143 |
-
if self.by_epoch and self.every_n_inner_iters(runner, self.interval):
|
144 |
-
runner.log_buffer.average(self.interval)
|
145 |
-
elif not self.by_epoch and self.every_n_iters(runner, self.interval):
|
146 |
-
runner.log_buffer.average(self.interval)
|
147 |
-
elif self.end_of_epoch(runner) and not self.ignore_last:
|
148 |
-
# not precise but more stable
|
149 |
-
runner.log_buffer.average(self.interval)
|
150 |
-
|
151 |
-
if runner.log_buffer.ready:
|
152 |
-
self.log(runner)
|
153 |
-
if self.reset_flag:
|
154 |
-
runner.log_buffer.clear_output()
|
155 |
-
|
156 |
-
def after_train_epoch(self, runner):
|
157 |
-
if runner.log_buffer.ready:
|
158 |
-
self.log(runner)
|
159 |
-
if self.reset_flag:
|
160 |
-
runner.log_buffer.clear_output()
|
161 |
-
|
162 |
-
def after_val_epoch(self, runner):
|
163 |
-
runner.log_buffer.average()
|
164 |
-
self.log(runner)
|
165 |
-
if self.reset_flag:
|
166 |
-
runner.log_buffer.clear_output()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/LICENSE.md
DELETED
@@ -1,201 +0,0 @@
|
|
1 |
-
Apache License
|
2 |
-
Version 2.0, January 2004
|
3 |
-
http://www.apache.org/licenses/
|
4 |
-
|
5 |
-
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
6 |
-
|
7 |
-
1. Definitions.
|
8 |
-
|
9 |
-
"License" shall mean the terms and conditions for use, reproduction,
|
10 |
-
and distribution as defined by Sections 1 through 9 of this document.
|
11 |
-
|
12 |
-
"Licensor" shall mean the copyright owner or entity authorized by
|
13 |
-
the copyright owner that is granting the License.
|
14 |
-
|
15 |
-
"Legal Entity" shall mean the union of the acting entity and all
|
16 |
-
other entities that control, are controlled by, or are under common
|
17 |
-
control with that entity. For the purposes of this definition,
|
18 |
-
"control" means (i) the power, direct or indirect, to cause the
|
19 |
-
direction or management of such entity, whether by contract or
|
20 |
-
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
21 |
-
outstanding shares, or (iii) beneficial ownership of such entity.
|
22 |
-
|
23 |
-
"You" (or "Your") shall mean an individual or Legal Entity
|
24 |
-
exercising permissions granted by this License.
|
25 |
-
|
26 |
-
"Source" form shall mean the preferred form for making modifications,
|
27 |
-
including but not limited to software source code, documentation
|
28 |
-
source, and configuration files.
|
29 |
-
|
30 |
-
"Object" form shall mean any form resulting from mechanical
|
31 |
-
transformation or translation of a Source form, including but
|
32 |
-
not limited to compiled object code, generated documentation,
|
33 |
-
and conversions to other media types.
|
34 |
-
|
35 |
-
"Work" shall mean the work of authorship, whether in Source or
|
36 |
-
Object form, made available under the License, as indicated by a
|
37 |
-
copyright notice that is included in or attached to the work
|
38 |
-
(an example is provided in the Appendix below).
|
39 |
-
|
40 |
-
"Derivative Works" shall mean any work, whether in Source or Object
|
41 |
-
form, that is based on (or derived from) the Work and for which the
|
42 |
-
editorial revisions, annotations, elaborations, or other modifications
|
43 |
-
represent, as a whole, an original work of authorship. For the purposes
|
44 |
-
of this License, Derivative Works shall not include works that remain
|
45 |
-
separable from, or merely link (or bind by name) to the interfaces of,
|
46 |
-
the Work and Derivative Works thereof.
|
47 |
-
|
48 |
-
"Contribution" shall mean any work of authorship, including
|
49 |
-
the original version of the Work and any modifications or additions
|
50 |
-
to that Work or Derivative Works thereof, that is intentionally
|
51 |
-
submitted to Licensor for inclusion in the Work by the copyright owner
|
52 |
-
or by an individual or Legal Entity authorized to submit on behalf of
|
53 |
-
the copyright owner. For the purposes of this definition, "submitted"
|
54 |
-
means any form of electronic, verbal, or written communication sent
|
55 |
-
to the Licensor or its representatives, including but not limited to
|
56 |
-
communication on electronic mailing lists, source code control systems,
|
57 |
-
and issue tracking systems that are managed by, or on behalf of, the
|
58 |
-
Licensor for the purpose of discussing and improving the Work, but
|
59 |
-
excluding communication that is conspicuously marked or otherwise
|
60 |
-
designated in writing by the copyright owner as "Not a Contribution."
|
61 |
-
|
62 |
-
"Contributor" shall mean Licensor and any individual or Legal Entity
|
63 |
-
on behalf of whom a Contribution has been received by Licensor and
|
64 |
-
subsequently incorporated within the Work.
|
65 |
-
|
66 |
-
2. Grant of Copyright License. Subject to the terms and conditions of
|
67 |
-
this License, each Contributor hereby grants to You a perpetual,
|
68 |
-
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
69 |
-
copyright license to reproduce, prepare Derivative Works of,
|
70 |
-
publicly display, publicly perform, sublicense, and distribute the
|
71 |
-
Work and such Derivative Works in Source or Object form.
|
72 |
-
|
73 |
-
3. Grant of Patent License. Subject to the terms and conditions of
|
74 |
-
this License, each Contributor hereby grants to You a perpetual,
|
75 |
-
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
76 |
-
(except as stated in this section) patent license to make, have made,
|
77 |
-
use, offer to sell, sell, import, and otherwise transfer the Work,
|
78 |
-
where such license applies only to those patent claims licensable
|
79 |
-
by such Contributor that are necessarily infringed by their
|
80 |
-
Contribution(s) alone or by combination of their Contribution(s)
|
81 |
-
with the Work to which such Contribution(s) was submitted. If You
|
82 |
-
institute patent litigation against any entity (including a
|
83 |
-
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
84 |
-
or a Contribution incorporated within the Work constitutes direct
|
85 |
-
or contributory patent infringement, then any patent licenses
|
86 |
-
granted to You under this License for that Work shall terminate
|
87 |
-
as of the date such litigation is filed.
|
88 |
-
|
89 |
-
4. Redistribution. You may reproduce and distribute copies of the
|
90 |
-
Work or Derivative Works thereof in any medium, with or without
|
91 |
-
modifications, and in Source or Object form, provided that You
|
92 |
-
meet the following conditions:
|
93 |
-
|
94 |
-
(a) You must give any other recipients of the Work or
|
95 |
-
Derivative Works a copy of this License; and
|
96 |
-
|
97 |
-
(b) You must cause any modified files to carry prominent notices
|
98 |
-
stating that You changed the files; and
|
99 |
-
|
100 |
-
(c) You must retain, in the Source form of any Derivative Works
|
101 |
-
that You distribute, all copyright, patent, trademark, and
|
102 |
-
attribution notices from the Source form of the Work,
|
103 |
-
excluding those notices that do not pertain to any part of
|
104 |
-
the Derivative Works; and
|
105 |
-
|
106 |
-
(d) If the Work includes a "NOTICE" text file as part of its
|
107 |
-
distribution, then any Derivative Works that You distribute must
|
108 |
-
include a readable copy of the attribution notices contained
|
109 |
-
within such NOTICE file, excluding those notices that do not
|
110 |
-
pertain to any part of the Derivative Works, in at least one
|
111 |
-
of the following places: within a NOTICE text file distributed
|
112 |
-
as part of the Derivative Works; within the Source form or
|
113 |
-
documentation, if provided along with the Derivative Works; or,
|
114 |
-
within a display generated by the Derivative Works, if and
|
115 |
-
wherever such third-party notices normally appear. The contents
|
116 |
-
of the NOTICE file are for informational purposes only and
|
117 |
-
do not modify the License. You may add Your own attribution
|
118 |
-
notices within Derivative Works that You distribute, alongside
|
119 |
-
or as an addendum to the NOTICE text from the Work, provided
|
120 |
-
that such additional attribution notices cannot be construed
|
121 |
-
as modifying the License.
|
122 |
-
|
123 |
-
You may add Your own copyright statement to Your modifications and
|
124 |
-
may provide additional or different license terms and conditions
|
125 |
-
for use, reproduction, or distribution of Your modifications, or
|
126 |
-
for any such Derivative Works as a whole, provided Your use,
|
127 |
-
reproduction, and distribution of the Work otherwise complies with
|
128 |
-
the conditions stated in this License.
|
129 |
-
|
130 |
-
5. Submission of Contributions. Unless You explicitly state otherwise,
|
131 |
-
any Contribution intentionally submitted for inclusion in the Work
|
132 |
-
by You to the Licensor shall be under the terms and conditions of
|
133 |
-
this License, without any additional terms or conditions.
|
134 |
-
Notwithstanding the above, nothing herein shall supersede or modify
|
135 |
-
the terms of any separate license agreement you may have executed
|
136 |
-
with Licensor regarding such Contributions.
|
137 |
-
|
138 |
-
6. Trademarks. This License does not grant permission to use the trade
|
139 |
-
names, trademarks, service marks, or product names of the Licensor,
|
140 |
-
except as required for reasonable and customary use in describing the
|
141 |
-
origin of the Work and reproducing the content of the NOTICE file.
|
142 |
-
|
143 |
-
7. Disclaimer of Warranty. Unless required by applicable law or
|
144 |
-
agreed to in writing, Licensor provides the Work (and each
|
145 |
-
Contributor provides its Contributions) on an "AS IS" BASIS,
|
146 |
-
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
147 |
-
implied, including, without limitation, any warranties or conditions
|
148 |
-
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
149 |
-
PARTICULAR PURPOSE. You are solely responsible for determining the
|
150 |
-
appropriateness of using or redistributing the Work and assume any
|
151 |
-
risks associated with Your exercise of permissions under this License.
|
152 |
-
|
153 |
-
8. Limitation of Liability. In no event and under no legal theory,
|
154 |
-
whether in tort (including negligence), contract, or otherwise,
|
155 |
-
unless required by applicable law (such as deliberate and grossly
|
156 |
-
negligent acts) or agreed to in writing, shall any Contributor be
|
157 |
-
liable to You for damages, including any direct, indirect, special,
|
158 |
-
incidental, or consequential damages of any character arising as a
|
159 |
-
result of this License or out of the use or inability to use the
|
160 |
-
Work (including but not limited to damages for loss of goodwill,
|
161 |
-
work stoppage, computer failure or malfunction, or any and all
|
162 |
-
other commercial damages or losses), even if such Contributor
|
163 |
-
has been advised of the possibility of such damages.
|
164 |
-
|
165 |
-
9. Accepting Warranty or Additional Liability. While redistributing
|
166 |
-
the Work or Derivative Works thereof, You may choose to offer,
|
167 |
-
and charge a fee for, acceptance of support, warranty, indemnity,
|
168 |
-
or other liability obligations and/or rights consistent with this
|
169 |
-
License. However, in accepting such obligations, You may act only
|
170 |
-
on Your own behalf and on Your sole responsibility, not on behalf
|
171 |
-
of any other Contributor, and only if You agree to indemnify,
|
172 |
-
defend, and hold each Contributor harmless for any liability
|
173 |
-
incurred by, or claims asserted against, such Contributor by reason
|
174 |
-
of your accepting any such warranty or additional liability.
|
175 |
-
|
176 |
-
END OF TERMS AND CONDITIONS
|
177 |
-
|
178 |
-
APPENDIX: How to apply the Apache License to your work.
|
179 |
-
|
180 |
-
To apply the Apache License to your work, attach the following
|
181 |
-
boilerplate notice, with the fields enclosed by brackets "[]"
|
182 |
-
replaced with your own identifying information. (Don't include
|
183 |
-
the brackets!) The text should be enclosed in the appropriate
|
184 |
-
comment syntax for the file format. We also recommend that a
|
185 |
-
file or class name and description of purpose be included on the
|
186 |
-
same "printed page" as the copyright notice for easier
|
187 |
-
identification within third-party archives.
|
188 |
-
|
189 |
-
Copyright [yyyy] [name of copyright owner]
|
190 |
-
|
191 |
-
Licensed under the Apache License, Version 2.0 (the "License");
|
192 |
-
you may not use this file except in compliance with the License.
|
193 |
-
You may obtain a copy of the License at
|
194 |
-
|
195 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
196 |
-
|
197 |
-
Unless required by applicable law or agreed to in writing, software
|
198 |
-
distributed under the License is distributed on an "AS IS" BASIS,
|
199 |
-
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
200 |
-
See the License for the specific language governing permissions and
|
201 |
-
limitations under the License.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AntNikYab/NaturalLanguageProcessing/pages/pushkin.py
DELETED
@@ -1,64 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import textwrap
|
3 |
-
import torch
|
4 |
-
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
5 |
-
|
6 |
-
DEVICE = torch.device("cpu")
|
7 |
-
# Load GPT-2 model and tokenizer
|
8 |
-
tokenizer = GPT2Tokenizer.from_pretrained('sberbank-ai/rugpt3small_based_on_gpt2')
|
9 |
-
model_finetuned = GPT2LMHeadModel.from_pretrained(
|
10 |
-
'sberbank-ai/rugpt3small_based_on_gpt2',
|
11 |
-
output_attentions = False,
|
12 |
-
output_hidden_states = False,
|
13 |
-
)
|
14 |
-
if torch.cuda.is_available():
|
15 |
-
model_finetuned.load_state_dict(torch.load('models/model_pushkin.pt'))
|
16 |
-
else:
|
17 |
-
model_finetuned.load_state_dict(torch.load('models/model_pushkin.pt', map_location=torch.device('cpu')))
|
18 |
-
model_finetuned.eval()
|
19 |
-
|
20 |
-
# Function to generate text
|
21 |
-
def generate_text(prompt, temperature, top_p, max_length, top_k):
|
22 |
-
input_ids = tokenizer.encode(prompt, return_tensors="pt")
|
23 |
-
|
24 |
-
with torch.no_grad():
|
25 |
-
out = model_finetuned.generate(
|
26 |
-
input_ids,
|
27 |
-
do_sample=True,
|
28 |
-
num_beams=5,
|
29 |
-
temperature=temperature,
|
30 |
-
top_p=top_p,
|
31 |
-
max_length=max_length,
|
32 |
-
top_k=top_k,
|
33 |
-
no_repeat_ngram_size=3,
|
34 |
-
num_return_sequences=1,
|
35 |
-
)
|
36 |
-
|
37 |
-
generated_text = list(map(tokenizer.decode, out))
|
38 |
-
return generated_text
|
39 |
-
|
40 |
-
# Streamlit app
|
41 |
-
def main():
|
42 |
-
st.title("Генерация текста GPT-моделью в стиле А.С. Пушкина")
|
43 |
-
|
44 |
-
# User inputs
|
45 |
-
prompt = st.text_area("Введите начало текста")
|
46 |
-
temperature = st.slider("Temperature", min_value=0.2, max_value=2.5, value=1.8, step=0.1)
|
47 |
-
top_p = st.slider("Top-p", min_value=0.1, max_value=1.0, value=0.9, step=0.1)
|
48 |
-
max_length = st.slider("Max Length", min_value=10, max_value=300, value=100, step=10)
|
49 |
-
top_k = st.slider("Top-k", min_value=1, max_value=500, value=500, step=10)
|
50 |
-
num_return_sequences = st.slider("Number of Sequences", min_value=1, max_value=5, value=1, step=1)
|
51 |
-
|
52 |
-
if st.button("Generate Text"):
|
53 |
-
st.subheader("Generated Text:")
|
54 |
-
for i in range(num_return_sequences):
|
55 |
-
generated_text = generate_text(prompt, temperature, top_p, max_length, top_k)
|
56 |
-
st.write(f"Generated Text {i + 1}:")
|
57 |
-
wrapped_text = textwrap.fill(generated_text[0], width=80)
|
58 |
-
st.write(wrapped_text)
|
59 |
-
st.write("------------------")
|
60 |
-
|
61 |
-
st.sidebar.image('images/pushkin.jpeg', use_column_width=True)
|
62 |
-
|
63 |
-
if __name__ == "__main__":
|
64 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/__init__.py
DELETED
@@ -1,132 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Package containing all pip commands
|
3 |
-
"""
|
4 |
-
|
5 |
-
import importlib
|
6 |
-
from collections import namedtuple
|
7 |
-
from typing import Any, Dict, Optional
|
8 |
-
|
9 |
-
from pip._internal.cli.base_command import Command
|
10 |
-
|
11 |
-
CommandInfo = namedtuple("CommandInfo", "module_path, class_name, summary")
|
12 |
-
|
13 |
-
# This dictionary does a bunch of heavy lifting for help output:
|
14 |
-
# - Enables avoiding additional (costly) imports for presenting `--help`.
|
15 |
-
# - The ordering matters for help display.
|
16 |
-
#
|
17 |
-
# Even though the module path starts with the same "pip._internal.commands"
|
18 |
-
# prefix, the full path makes testing easier (specifically when modifying
|
19 |
-
# `commands_dict` in test setup / teardown).
|
20 |
-
commands_dict: Dict[str, CommandInfo] = {
|
21 |
-
"install": CommandInfo(
|
22 |
-
"pip._internal.commands.install",
|
23 |
-
"InstallCommand",
|
24 |
-
"Install packages.",
|
25 |
-
),
|
26 |
-
"download": CommandInfo(
|
27 |
-
"pip._internal.commands.download",
|
28 |
-
"DownloadCommand",
|
29 |
-
"Download packages.",
|
30 |
-
),
|
31 |
-
"uninstall": CommandInfo(
|
32 |
-
"pip._internal.commands.uninstall",
|
33 |
-
"UninstallCommand",
|
34 |
-
"Uninstall packages.",
|
35 |
-
),
|
36 |
-
"freeze": CommandInfo(
|
37 |
-
"pip._internal.commands.freeze",
|
38 |
-
"FreezeCommand",
|
39 |
-
"Output installed packages in requirements format.",
|
40 |
-
),
|
41 |
-
"inspect": CommandInfo(
|
42 |
-
"pip._internal.commands.inspect",
|
43 |
-
"InspectCommand",
|
44 |
-
"Inspect the python environment.",
|
45 |
-
),
|
46 |
-
"list": CommandInfo(
|
47 |
-
"pip._internal.commands.list",
|
48 |
-
"ListCommand",
|
49 |
-
"List installed packages.",
|
50 |
-
),
|
51 |
-
"show": CommandInfo(
|
52 |
-
"pip._internal.commands.show",
|
53 |
-
"ShowCommand",
|
54 |
-
"Show information about installed packages.",
|
55 |
-
),
|
56 |
-
"check": CommandInfo(
|
57 |
-
"pip._internal.commands.check",
|
58 |
-
"CheckCommand",
|
59 |
-
"Verify installed packages have compatible dependencies.",
|
60 |
-
),
|
61 |
-
"config": CommandInfo(
|
62 |
-
"pip._internal.commands.configuration",
|
63 |
-
"ConfigurationCommand",
|
64 |
-
"Manage local and global configuration.",
|
65 |
-
),
|
66 |
-
"search": CommandInfo(
|
67 |
-
"pip._internal.commands.search",
|
68 |
-
"SearchCommand",
|
69 |
-
"Search PyPI for packages.",
|
70 |
-
),
|
71 |
-
"cache": CommandInfo(
|
72 |
-
"pip._internal.commands.cache",
|
73 |
-
"CacheCommand",
|
74 |
-
"Inspect and manage pip's wheel cache.",
|
75 |
-
),
|
76 |
-
"index": CommandInfo(
|
77 |
-
"pip._internal.commands.index",
|
78 |
-
"IndexCommand",
|
79 |
-
"Inspect information available from package indexes.",
|
80 |
-
),
|
81 |
-
"wheel": CommandInfo(
|
82 |
-
"pip._internal.commands.wheel",
|
83 |
-
"WheelCommand",
|
84 |
-
"Build wheels from your requirements.",
|
85 |
-
),
|
86 |
-
"hash": CommandInfo(
|
87 |
-
"pip._internal.commands.hash",
|
88 |
-
"HashCommand",
|
89 |
-
"Compute hashes of package archives.",
|
90 |
-
),
|
91 |
-
"completion": CommandInfo(
|
92 |
-
"pip._internal.commands.completion",
|
93 |
-
"CompletionCommand",
|
94 |
-
"A helper command used for command completion.",
|
95 |
-
),
|
96 |
-
"debug": CommandInfo(
|
97 |
-
"pip._internal.commands.debug",
|
98 |
-
"DebugCommand",
|
99 |
-
"Show information useful for debugging.",
|
100 |
-
),
|
101 |
-
"help": CommandInfo(
|
102 |
-
"pip._internal.commands.help",
|
103 |
-
"HelpCommand",
|
104 |
-
"Show help for commands.",
|
105 |
-
),
|
106 |
-
}
|
107 |
-
|
108 |
-
|
109 |
-
def create_command(name: str, **kwargs: Any) -> Command:
|
110 |
-
"""
|
111 |
-
Create an instance of the Command class with the given name.
|
112 |
-
"""
|
113 |
-
module_path, class_name, summary = commands_dict[name]
|
114 |
-
module = importlib.import_module(module_path)
|
115 |
-
command_class = getattr(module, class_name)
|
116 |
-
command = command_class(name=name, summary=summary, **kwargs)
|
117 |
-
|
118 |
-
return command
|
119 |
-
|
120 |
-
|
121 |
-
def get_similar_commands(name: str) -> Optional[str]:
|
122 |
-
"""Command name auto-correct."""
|
123 |
-
from difflib import get_close_matches
|
124 |
-
|
125 |
-
name = name.lower()
|
126 |
-
|
127 |
-
close_commands = get_close_matches(name, commands_dict.keys())
|
128 |
-
|
129 |
-
if close_commands:
|
130 |
-
return close_commands[0]
|
131 |
-
else:
|
132 |
-
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/search.py
DELETED
@@ -1,174 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
import shutil
|
3 |
-
import sys
|
4 |
-
import textwrap
|
5 |
-
import xmlrpc.client
|
6 |
-
from collections import OrderedDict
|
7 |
-
from optparse import Values
|
8 |
-
from typing import TYPE_CHECKING, Dict, List, Optional
|
9 |
-
|
10 |
-
from pip._vendor.packaging.version import parse as parse_version
|
11 |
-
|
12 |
-
from pip._internal.cli.base_command import Command
|
13 |
-
from pip._internal.cli.req_command import SessionCommandMixin
|
14 |
-
from pip._internal.cli.status_codes import NO_MATCHES_FOUND, SUCCESS
|
15 |
-
from pip._internal.exceptions import CommandError
|
16 |
-
from pip._internal.metadata import get_default_environment
|
17 |
-
from pip._internal.models.index import PyPI
|
18 |
-
from pip._internal.network.xmlrpc import PipXmlrpcTransport
|
19 |
-
from pip._internal.utils.logging import indent_log
|
20 |
-
from pip._internal.utils.misc import write_output
|
21 |
-
|
22 |
-
if TYPE_CHECKING:
|
23 |
-
from typing import TypedDict
|
24 |
-
|
25 |
-
class TransformedHit(TypedDict):
|
26 |
-
name: str
|
27 |
-
summary: str
|
28 |
-
versions: List[str]
|
29 |
-
|
30 |
-
|
31 |
-
logger = logging.getLogger(__name__)
|
32 |
-
|
33 |
-
|
34 |
-
class SearchCommand(Command, SessionCommandMixin):
|
35 |
-
"""Search for PyPI packages whose name or summary contains <query>."""
|
36 |
-
|
37 |
-
usage = """
|
38 |
-
%prog [options] <query>"""
|
39 |
-
ignore_require_venv = True
|
40 |
-
|
41 |
-
def add_options(self) -> None:
|
42 |
-
self.cmd_opts.add_option(
|
43 |
-
"-i",
|
44 |
-
"--index",
|
45 |
-
dest="index",
|
46 |
-
metavar="URL",
|
47 |
-
default=PyPI.pypi_url,
|
48 |
-
help="Base URL of Python Package Index (default %default)",
|
49 |
-
)
|
50 |
-
|
51 |
-
self.parser.insert_option_group(0, self.cmd_opts)
|
52 |
-
|
53 |
-
def run(self, options: Values, args: List[str]) -> int:
|
54 |
-
if not args:
|
55 |
-
raise CommandError("Missing required argument (search query).")
|
56 |
-
query = args
|
57 |
-
pypi_hits = self.search(query, options)
|
58 |
-
hits = transform_hits(pypi_hits)
|
59 |
-
|
60 |
-
terminal_width = None
|
61 |
-
if sys.stdout.isatty():
|
62 |
-
terminal_width = shutil.get_terminal_size()[0]
|
63 |
-
|
64 |
-
print_results(hits, terminal_width=terminal_width)
|
65 |
-
if pypi_hits:
|
66 |
-
return SUCCESS
|
67 |
-
return NO_MATCHES_FOUND
|
68 |
-
|
69 |
-
def search(self, query: List[str], options: Values) -> List[Dict[str, str]]:
|
70 |
-
index_url = options.index
|
71 |
-
|
72 |
-
session = self.get_default_session(options)
|
73 |
-
|
74 |
-
transport = PipXmlrpcTransport(index_url, session)
|
75 |
-
pypi = xmlrpc.client.ServerProxy(index_url, transport)
|
76 |
-
try:
|
77 |
-
hits = pypi.search({"name": query, "summary": query}, "or")
|
78 |
-
except xmlrpc.client.Fault as fault:
|
79 |
-
message = "XMLRPC request failed [code: {code}]\n{string}".format(
|
80 |
-
code=fault.faultCode,
|
81 |
-
string=fault.faultString,
|
82 |
-
)
|
83 |
-
raise CommandError(message)
|
84 |
-
assert isinstance(hits, list)
|
85 |
-
return hits
|
86 |
-
|
87 |
-
|
88 |
-
def transform_hits(hits: List[Dict[str, str]]) -> List["TransformedHit"]:
|
89 |
-
"""
|
90 |
-
The list from pypi is really a list of versions. We want a list of
|
91 |
-
packages with the list of versions stored inline. This converts the
|
92 |
-
list from pypi into one we can use.
|
93 |
-
"""
|
94 |
-
packages: Dict[str, "TransformedHit"] = OrderedDict()
|
95 |
-
for hit in hits:
|
96 |
-
name = hit["name"]
|
97 |
-
summary = hit["summary"]
|
98 |
-
version = hit["version"]
|
99 |
-
|
100 |
-
if name not in packages.keys():
|
101 |
-
packages[name] = {
|
102 |
-
"name": name,
|
103 |
-
"summary": summary,
|
104 |
-
"versions": [version],
|
105 |
-
}
|
106 |
-
else:
|
107 |
-
packages[name]["versions"].append(version)
|
108 |
-
|
109 |
-
# if this is the highest version, replace summary and score
|
110 |
-
if version == highest_version(packages[name]["versions"]):
|
111 |
-
packages[name]["summary"] = summary
|
112 |
-
|
113 |
-
return list(packages.values())
|
114 |
-
|
115 |
-
|
116 |
-
def print_dist_installation_info(name: str, latest: str) -> None:
|
117 |
-
env = get_default_environment()
|
118 |
-
dist = env.get_distribution(name)
|
119 |
-
if dist is not None:
|
120 |
-
with indent_log():
|
121 |
-
if dist.version == latest:
|
122 |
-
write_output("INSTALLED: %s (latest)", dist.version)
|
123 |
-
else:
|
124 |
-
write_output("INSTALLED: %s", dist.version)
|
125 |
-
if parse_version(latest).pre:
|
126 |
-
write_output(
|
127 |
-
"LATEST: %s (pre-release; install"
|
128 |
-
" with `pip install --pre`)",
|
129 |
-
latest,
|
130 |
-
)
|
131 |
-
else:
|
132 |
-
write_output("LATEST: %s", latest)
|
133 |
-
|
134 |
-
|
135 |
-
def print_results(
|
136 |
-
hits: List["TransformedHit"],
|
137 |
-
name_column_width: Optional[int] = None,
|
138 |
-
terminal_width: Optional[int] = None,
|
139 |
-
) -> None:
|
140 |
-
if not hits:
|
141 |
-
return
|
142 |
-
if name_column_width is None:
|
143 |
-
name_column_width = (
|
144 |
-
max(
|
145 |
-
[
|
146 |
-
len(hit["name"]) + len(highest_version(hit.get("versions", ["-"])))
|
147 |
-
for hit in hits
|
148 |
-
]
|
149 |
-
)
|
150 |
-
+ 4
|
151 |
-
)
|
152 |
-
|
153 |
-
for hit in hits:
|
154 |
-
name = hit["name"]
|
155 |
-
summary = hit["summary"] or ""
|
156 |
-
latest = highest_version(hit.get("versions", ["-"]))
|
157 |
-
if terminal_width is not None:
|
158 |
-
target_width = terminal_width - name_column_width - 5
|
159 |
-
if target_width > 10:
|
160 |
-
# wrap and indent summary to fit terminal
|
161 |
-
summary_lines = textwrap.wrap(summary, target_width)
|
162 |
-
summary = ("\n" + " " * (name_column_width + 3)).join(summary_lines)
|
163 |
-
|
164 |
-
name_latest = f"{name} ({latest})"
|
165 |
-
line = f"{name_latest:{name_column_width}} - {summary}"
|
166 |
-
try:
|
167 |
-
write_output(line)
|
168 |
-
print_dist_installation_info(name, latest)
|
169 |
-
except UnicodeEncodeError:
|
170 |
-
pass
|
171 |
-
|
172 |
-
|
173 |
-
def highest_version(versions: List[str]) -> str:
|
174 |
-
return max(versions, key=parse_version)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/engine/hooks.py
DELETED
@@ -1,686 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
-
|
4 |
-
import datetime
|
5 |
-
import itertools
|
6 |
-
import logging
|
7 |
-
import math
|
8 |
-
import operator
|
9 |
-
import os
|
10 |
-
import tempfile
|
11 |
-
import time
|
12 |
-
import warnings
|
13 |
-
from collections import Counter
|
14 |
-
import torch
|
15 |
-
from fvcore.common.checkpoint import Checkpointer
|
16 |
-
from fvcore.common.checkpoint import PeriodicCheckpointer as _PeriodicCheckpointer
|
17 |
-
from fvcore.common.param_scheduler import ParamScheduler
|
18 |
-
from fvcore.common.timer import Timer
|
19 |
-
from fvcore.nn.precise_bn import get_bn_modules, update_bn_stats
|
20 |
-
|
21 |
-
import detectron2.utils.comm as comm
|
22 |
-
from detectron2.evaluation.testing import flatten_results_dict
|
23 |
-
from detectron2.solver import LRMultiplier
|
24 |
-
from detectron2.utils.events import EventStorage, EventWriter
|
25 |
-
from detectron2.utils.file_io import PathManager
|
26 |
-
|
27 |
-
from .train_loop import HookBase
|
28 |
-
|
29 |
-
__all__ = [
|
30 |
-
"CallbackHook",
|
31 |
-
"IterationTimer",
|
32 |
-
"PeriodicWriter",
|
33 |
-
"PeriodicCheckpointer",
|
34 |
-
"BestCheckpointer",
|
35 |
-
"LRScheduler",
|
36 |
-
"AutogradProfiler",
|
37 |
-
"EvalHook",
|
38 |
-
"PreciseBN",
|
39 |
-
"TorchProfiler",
|
40 |
-
"TorchMemoryStats",
|
41 |
-
]
|
42 |
-
|
43 |
-
|
44 |
-
"""
|
45 |
-
Implement some common hooks.
|
46 |
-
"""
|
47 |
-
|
48 |
-
|
49 |
-
class CallbackHook(HookBase):
|
50 |
-
"""
|
51 |
-
Create a hook using callback functions provided by the user.
|
52 |
-
"""
|
53 |
-
|
54 |
-
def __init__(self, *, before_train=None, after_train=None, before_step=None, after_step=None):
|
55 |
-
"""
|
56 |
-
Each argument is a function that takes one argument: the trainer.
|
57 |
-
"""
|
58 |
-
self._before_train = before_train
|
59 |
-
self._before_step = before_step
|
60 |
-
self._after_step = after_step
|
61 |
-
self._after_train = after_train
|
62 |
-
|
63 |
-
def before_train(self):
|
64 |
-
if self._before_train:
|
65 |
-
self._before_train(self.trainer)
|
66 |
-
|
67 |
-
def after_train(self):
|
68 |
-
if self._after_train:
|
69 |
-
self._after_train(self.trainer)
|
70 |
-
# The functions may be closures that hold reference to the trainer
|
71 |
-
# Therefore, delete them to avoid circular reference.
|
72 |
-
del self._before_train, self._after_train
|
73 |
-
del self._before_step, self._after_step
|
74 |
-
|
75 |
-
def before_step(self):
|
76 |
-
if self._before_step:
|
77 |
-
self._before_step(self.trainer)
|
78 |
-
|
79 |
-
def after_step(self):
|
80 |
-
if self._after_step:
|
81 |
-
self._after_step(self.trainer)
|
82 |
-
|
83 |
-
|
84 |
-
class IterationTimer(HookBase):
|
85 |
-
"""
|
86 |
-
Track the time spent for each iteration (each run_step call in the trainer).
|
87 |
-
Print a summary in the end of training.
|
88 |
-
|
89 |
-
This hook uses the time between the call to its :meth:`before_step`
|
90 |
-
and :meth:`after_step` methods.
|
91 |
-
Under the convention that :meth:`before_step` of all hooks should only
|
92 |
-
take negligible amount of time, the :class:`IterationTimer` hook should be
|
93 |
-
placed at the beginning of the list of hooks to obtain accurate timing.
|
94 |
-
"""
|
95 |
-
|
96 |
-
def __init__(self, warmup_iter=3):
|
97 |
-
"""
|
98 |
-
Args:
|
99 |
-
warmup_iter (int): the number of iterations at the beginning to exclude
|
100 |
-
from timing.
|
101 |
-
"""
|
102 |
-
self._warmup_iter = warmup_iter
|
103 |
-
self._step_timer = Timer()
|
104 |
-
self._start_time = time.perf_counter()
|
105 |
-
self._total_timer = Timer()
|
106 |
-
|
107 |
-
def before_train(self):
|
108 |
-
self._start_time = time.perf_counter()
|
109 |
-
self._total_timer.reset()
|
110 |
-
self._total_timer.pause()
|
111 |
-
|
112 |
-
def after_train(self):
|
113 |
-
logger = logging.getLogger(__name__)
|
114 |
-
total_time = time.perf_counter() - self._start_time
|
115 |
-
total_time_minus_hooks = self._total_timer.seconds()
|
116 |
-
hook_time = total_time - total_time_minus_hooks
|
117 |
-
|
118 |
-
num_iter = self.trainer.storage.iter + 1 - self.trainer.start_iter - self._warmup_iter
|
119 |
-
|
120 |
-
if num_iter > 0 and total_time_minus_hooks > 0:
|
121 |
-
# Speed is meaningful only after warmup
|
122 |
-
# NOTE this format is parsed by grep in some scripts
|
123 |
-
logger.info(
|
124 |
-
"Overall training speed: {} iterations in {} ({:.4f} s / it)".format(
|
125 |
-
num_iter,
|
126 |
-
str(datetime.timedelta(seconds=int(total_time_minus_hooks))),
|
127 |
-
total_time_minus_hooks / num_iter,
|
128 |
-
)
|
129 |
-
)
|
130 |
-
|
131 |
-
logger.info(
|
132 |
-
"Total training time: {} ({} on hooks)".format(
|
133 |
-
str(datetime.timedelta(seconds=int(total_time))),
|
134 |
-
str(datetime.timedelta(seconds=int(hook_time))),
|
135 |
-
)
|
136 |
-
)
|
137 |
-
|
138 |
-
def before_step(self):
|
139 |
-
self._step_timer.reset()
|
140 |
-
self._total_timer.resume()
|
141 |
-
|
142 |
-
def after_step(self):
|
143 |
-
# +1 because we're in after_step, the current step is done
|
144 |
-
# but not yet counted
|
145 |
-
iter_done = self.trainer.storage.iter - self.trainer.start_iter + 1
|
146 |
-
if iter_done >= self._warmup_iter:
|
147 |
-
sec = self._step_timer.seconds()
|
148 |
-
self.trainer.storage.put_scalars(time=sec)
|
149 |
-
else:
|
150 |
-
self._start_time = time.perf_counter()
|
151 |
-
self._total_timer.reset()
|
152 |
-
|
153 |
-
self._total_timer.pause()
|
154 |
-
|
155 |
-
|
156 |
-
class PeriodicWriter(HookBase):
|
157 |
-
"""
|
158 |
-
Write events to EventStorage (by calling ``writer.write()``) periodically.
|
159 |
-
|
160 |
-
It is executed every ``period`` iterations and after the last iteration.
|
161 |
-
Note that ``period`` does not affect how data is smoothed by each writer.
|
162 |
-
"""
|
163 |
-
|
164 |
-
def __init__(self, writers, period=20):
|
165 |
-
"""
|
166 |
-
Args:
|
167 |
-
writers (list[EventWriter]): a list of EventWriter objects
|
168 |
-
period (int):
|
169 |
-
"""
|
170 |
-
self._writers = writers
|
171 |
-
for w in writers:
|
172 |
-
assert isinstance(w, EventWriter), w
|
173 |
-
self._period = period
|
174 |
-
|
175 |
-
def after_step(self):
|
176 |
-
if (self.trainer.iter + 1) % self._period == 0 or (
|
177 |
-
self.trainer.iter == self.trainer.max_iter - 1
|
178 |
-
):
|
179 |
-
for writer in self._writers:
|
180 |
-
writer.write()
|
181 |
-
|
182 |
-
def after_train(self):
|
183 |
-
for writer in self._writers:
|
184 |
-
# If any new data is found (e.g. produced by other after_train),
|
185 |
-
# write them before closing
|
186 |
-
writer.write()
|
187 |
-
writer.close()
|
188 |
-
|
189 |
-
|
190 |
-
class PeriodicCheckpointer(_PeriodicCheckpointer, HookBase):
|
191 |
-
"""
|
192 |
-
Same as :class:`detectron2.checkpoint.PeriodicCheckpointer`, but as a hook.
|
193 |
-
|
194 |
-
Note that when used as a hook,
|
195 |
-
it is unable to save additional data other than what's defined
|
196 |
-
by the given `checkpointer`.
|
197 |
-
|
198 |
-
It is executed every ``period`` iterations and after the last iteration.
|
199 |
-
"""
|
200 |
-
|
201 |
-
def before_train(self):
|
202 |
-
self.max_iter = self.trainer.max_iter
|
203 |
-
|
204 |
-
def after_step(self):
|
205 |
-
# No way to use **kwargs
|
206 |
-
self.step(self.trainer.iter)
|
207 |
-
|
208 |
-
|
209 |
-
class BestCheckpointer(HookBase):
|
210 |
-
"""
|
211 |
-
Checkpoints best weights based off given metric.
|
212 |
-
|
213 |
-
This hook should be used in conjunction to and executed after the hook
|
214 |
-
that produces the metric, e.g. `EvalHook`.
|
215 |
-
"""
|
216 |
-
|
217 |
-
def __init__(
|
218 |
-
self,
|
219 |
-
eval_period: int,
|
220 |
-
checkpointer: Checkpointer,
|
221 |
-
val_metric: str,
|
222 |
-
mode: str = "max",
|
223 |
-
file_prefix: str = "model_best",
|
224 |
-
) -> None:
|
225 |
-
"""
|
226 |
-
Args:
|
227 |
-
eval_period (int): the period `EvalHook` is set to run.
|
228 |
-
checkpointer: the checkpointer object used to save checkpoints.
|
229 |
-
val_metric (str): validation metric to track for best checkpoint, e.g. "bbox/AP50"
|
230 |
-
mode (str): one of {'max', 'min'}. controls whether the chosen val metric should be
|
231 |
-
maximized or minimized, e.g. for "bbox/AP50" it should be "max"
|
232 |
-
file_prefix (str): the prefix of checkpoint's filename, defaults to "model_best"
|
233 |
-
"""
|
234 |
-
self._logger = logging.getLogger(__name__)
|
235 |
-
self._period = eval_period
|
236 |
-
self._val_metric = val_metric
|
237 |
-
assert mode in [
|
238 |
-
"max",
|
239 |
-
"min",
|
240 |
-
], f'Mode "{mode}" to `BestCheckpointer` is unknown. It should be one of {"max", "min"}.'
|
241 |
-
if mode == "max":
|
242 |
-
self._compare = operator.gt
|
243 |
-
else:
|
244 |
-
self._compare = operator.lt
|
245 |
-
self._checkpointer = checkpointer
|
246 |
-
self._file_prefix = file_prefix
|
247 |
-
self.best_metric = None
|
248 |
-
self.best_iter = None
|
249 |
-
|
250 |
-
def _update_best(self, val, iteration):
|
251 |
-
if math.isnan(val) or math.isinf(val):
|
252 |
-
return False
|
253 |
-
self.best_metric = val
|
254 |
-
self.best_iter = iteration
|
255 |
-
return True
|
256 |
-
|
257 |
-
def _best_checking(self):
|
258 |
-
metric_tuple = self.trainer.storage.latest().get(self._val_metric)
|
259 |
-
if metric_tuple is None:
|
260 |
-
self._logger.warning(
|
261 |
-
f"Given val metric {self._val_metric} does not seem to be computed/stored."
|
262 |
-
"Will not be checkpointing based on it."
|
263 |
-
)
|
264 |
-
return
|
265 |
-
else:
|
266 |
-
latest_metric, metric_iter = metric_tuple
|
267 |
-
|
268 |
-
if self.best_metric is None:
|
269 |
-
if self._update_best(latest_metric, metric_iter):
|
270 |
-
additional_state = {"iteration": metric_iter}
|
271 |
-
self._checkpointer.save(f"{self._file_prefix}", **additional_state)
|
272 |
-
self._logger.info(
|
273 |
-
f"Saved first model at {self.best_metric:0.5f} @ {self.best_iter} steps"
|
274 |
-
)
|
275 |
-
elif self._compare(latest_metric, self.best_metric):
|
276 |
-
additional_state = {"iteration": metric_iter}
|
277 |
-
self._checkpointer.save(f"{self._file_prefix}", **additional_state)
|
278 |
-
self._logger.info(
|
279 |
-
f"Saved best model as latest eval score for {self._val_metric} is "
|
280 |
-
f"{latest_metric:0.5f}, better than last best score "
|
281 |
-
f"{self.best_metric:0.5f} @ iteration {self.best_iter}."
|
282 |
-
)
|
283 |
-
self._update_best(latest_metric, metric_iter)
|
284 |
-
else:
|
285 |
-
self._logger.info(
|
286 |
-
f"Not saving as latest eval score for {self._val_metric} is {latest_metric:0.5f}, "
|
287 |
-
f"not better than best score {self.best_metric:0.5f} @ iteration {self.best_iter}."
|
288 |
-
)
|
289 |
-
|
290 |
-
def after_step(self):
|
291 |
-
# same conditions as `EvalHook`
|
292 |
-
next_iter = self.trainer.iter + 1
|
293 |
-
if (
|
294 |
-
self._period > 0
|
295 |
-
and next_iter % self._period == 0
|
296 |
-
and next_iter != self.trainer.max_iter
|
297 |
-
):
|
298 |
-
self._best_checking()
|
299 |
-
|
300 |
-
def after_train(self):
|
301 |
-
# same conditions as `EvalHook`
|
302 |
-
if self.trainer.iter + 1 >= self.trainer.max_iter:
|
303 |
-
self._best_checking()
|
304 |
-
|
305 |
-
|
306 |
-
class LRScheduler(HookBase):
|
307 |
-
"""
|
308 |
-
A hook which executes a torch builtin LR scheduler and summarizes the LR.
|
309 |
-
It is executed after every iteration.
|
310 |
-
"""
|
311 |
-
|
312 |
-
def __init__(self, optimizer=None, scheduler=None):
|
313 |
-
"""
|
314 |
-
Args:
|
315 |
-
optimizer (torch.optim.Optimizer):
|
316 |
-
scheduler (torch.optim.LRScheduler or fvcore.common.param_scheduler.ParamScheduler):
|
317 |
-
if a :class:`ParamScheduler` object, it defines the multiplier over the base LR
|
318 |
-
in the optimizer.
|
319 |
-
|
320 |
-
If any argument is not given, will try to obtain it from the trainer.
|
321 |
-
"""
|
322 |
-
self._optimizer = optimizer
|
323 |
-
self._scheduler = scheduler
|
324 |
-
|
325 |
-
def before_train(self):
|
326 |
-
self._optimizer = self._optimizer or self.trainer.optimizer
|
327 |
-
if isinstance(self.scheduler, ParamScheduler):
|
328 |
-
self._scheduler = LRMultiplier(
|
329 |
-
self._optimizer,
|
330 |
-
self.scheduler,
|
331 |
-
self.trainer.max_iter,
|
332 |
-
last_iter=self.trainer.iter - 1,
|
333 |
-
)
|
334 |
-
self._best_param_group_id = LRScheduler.get_best_param_group_id(self._optimizer)
|
335 |
-
|
336 |
-
@staticmethod
|
337 |
-
def get_best_param_group_id(optimizer):
|
338 |
-
# NOTE: some heuristics on what LR to summarize
|
339 |
-
# summarize the param group with most parameters
|
340 |
-
largest_group = max(len(g["params"]) for g in optimizer.param_groups)
|
341 |
-
|
342 |
-
if largest_group == 1:
|
343 |
-
# If all groups have one parameter,
|
344 |
-
# then find the most common initial LR, and use it for summary
|
345 |
-
lr_count = Counter([g["lr"] for g in optimizer.param_groups])
|
346 |
-
lr = lr_count.most_common()[0][0]
|
347 |
-
for i, g in enumerate(optimizer.param_groups):
|
348 |
-
if g["lr"] == lr:
|
349 |
-
return i
|
350 |
-
else:
|
351 |
-
for i, g in enumerate(optimizer.param_groups):
|
352 |
-
if len(g["params"]) == largest_group:
|
353 |
-
return i
|
354 |
-
|
355 |
-
def after_step(self):
|
356 |
-
lr = self._optimizer.param_groups[self._best_param_group_id]["lr"]
|
357 |
-
self.trainer.storage.put_scalar("lr", lr, smoothing_hint=False)
|
358 |
-
self.scheduler.step()
|
359 |
-
|
360 |
-
@property
|
361 |
-
def scheduler(self):
|
362 |
-
return self._scheduler or self.trainer.scheduler
|
363 |
-
|
364 |
-
def state_dict(self):
|
365 |
-
if isinstance(self.scheduler, torch.optim.lr_scheduler._LRScheduler):
|
366 |
-
return self.scheduler.state_dict()
|
367 |
-
return {}
|
368 |
-
|
369 |
-
def load_state_dict(self, state_dict):
|
370 |
-
if isinstance(self.scheduler, torch.optim.lr_scheduler._LRScheduler):
|
371 |
-
logger = logging.getLogger(__name__)
|
372 |
-
logger.info("Loading scheduler from state_dict ...")
|
373 |
-
self.scheduler.load_state_dict(state_dict)
|
374 |
-
|
375 |
-
|
376 |
-
class TorchProfiler(HookBase):
|
377 |
-
"""
|
378 |
-
A hook which runs `torch.profiler.profile`.
|
379 |
-
|
380 |
-
Examples:
|
381 |
-
::
|
382 |
-
hooks.TorchProfiler(
|
383 |
-
lambda trainer: 10 < trainer.iter < 20, self.cfg.OUTPUT_DIR
|
384 |
-
)
|
385 |
-
|
386 |
-
The above example will run the profiler for iteration 10~20 and dump
|
387 |
-
results to ``OUTPUT_DIR``. We did not profile the first few iterations
|
388 |
-
because they are typically slower than the rest.
|
389 |
-
The result files can be loaded in the ``chrome://tracing`` page in chrome browser,
|
390 |
-
and the tensorboard visualizations can be visualized using
|
391 |
-
``tensorboard --logdir OUTPUT_DIR/log``
|
392 |
-
"""
|
393 |
-
|
394 |
-
def __init__(self, enable_predicate, output_dir, *, activities=None, save_tensorboard=True):
|
395 |
-
"""
|
396 |
-
Args:
|
397 |
-
enable_predicate (callable[trainer -> bool]): a function which takes a trainer,
|
398 |
-
and returns whether to enable the profiler.
|
399 |
-
It will be called once every step, and can be used to select which steps to profile.
|
400 |
-
output_dir (str): the output directory to dump tracing files.
|
401 |
-
activities (iterable): same as in `torch.profiler.profile`.
|
402 |
-
save_tensorboard (bool): whether to save tensorboard visualizations at (output_dir)/log/
|
403 |
-
"""
|
404 |
-
self._enable_predicate = enable_predicate
|
405 |
-
self._activities = activities
|
406 |
-
self._output_dir = output_dir
|
407 |
-
self._save_tensorboard = save_tensorboard
|
408 |
-
|
409 |
-
def before_step(self):
|
410 |
-
if self._enable_predicate(self.trainer):
|
411 |
-
if self._save_tensorboard:
|
412 |
-
on_trace_ready = torch.profiler.tensorboard_trace_handler(
|
413 |
-
os.path.join(
|
414 |
-
self._output_dir,
|
415 |
-
"log",
|
416 |
-
"profiler-tensorboard-iter{}".format(self.trainer.iter),
|
417 |
-
),
|
418 |
-
f"worker{comm.get_rank()}",
|
419 |
-
)
|
420 |
-
else:
|
421 |
-
on_trace_ready = None
|
422 |
-
self._profiler = torch.profiler.profile(
|
423 |
-
activities=self._activities,
|
424 |
-
on_trace_ready=on_trace_ready,
|
425 |
-
record_shapes=True,
|
426 |
-
profile_memory=True,
|
427 |
-
with_stack=True,
|
428 |
-
with_flops=True,
|
429 |
-
)
|
430 |
-
self._profiler.__enter__()
|
431 |
-
else:
|
432 |
-
self._profiler = None
|
433 |
-
|
434 |
-
def after_step(self):
|
435 |
-
if self._profiler is None:
|
436 |
-
return
|
437 |
-
self._profiler.__exit__(None, None, None)
|
438 |
-
if not self._save_tensorboard:
|
439 |
-
PathManager.mkdirs(self._output_dir)
|
440 |
-
out_file = os.path.join(
|
441 |
-
self._output_dir, "profiler-trace-iter{}.json".format(self.trainer.iter)
|
442 |
-
)
|
443 |
-
if "://" not in out_file:
|
444 |
-
self._profiler.export_chrome_trace(out_file)
|
445 |
-
else:
|
446 |
-
# Support non-posix filesystems
|
447 |
-
with tempfile.TemporaryDirectory(prefix="detectron2_profiler") as d:
|
448 |
-
tmp_file = os.path.join(d, "tmp.json")
|
449 |
-
self._profiler.export_chrome_trace(tmp_file)
|
450 |
-
with open(tmp_file) as f:
|
451 |
-
content = f.read()
|
452 |
-
with PathManager.open(out_file, "w") as f:
|
453 |
-
f.write(content)
|
454 |
-
|
455 |
-
|
456 |
-
class AutogradProfiler(TorchProfiler):
|
457 |
-
"""
|
458 |
-
A hook which runs `torch.autograd.profiler.profile`.
|
459 |
-
|
460 |
-
Examples:
|
461 |
-
::
|
462 |
-
hooks.AutogradProfiler(
|
463 |
-
lambda trainer: 10 < trainer.iter < 20, self.cfg.OUTPUT_DIR
|
464 |
-
)
|
465 |
-
|
466 |
-
The above example will run the profiler for iteration 10~20 and dump
|
467 |
-
results to ``OUTPUT_DIR``. We did not profile the first few iterations
|
468 |
-
because they are typically slower than the rest.
|
469 |
-
The result files can be loaded in the ``chrome://tracing`` page in chrome browser.
|
470 |
-
|
471 |
-
Note:
|
472 |
-
When used together with NCCL on older version of GPUs,
|
473 |
-
autograd profiler may cause deadlock because it unnecessarily allocates
|
474 |
-
memory on every device it sees. The memory management calls, if
|
475 |
-
interleaved with NCCL calls, lead to deadlock on GPUs that do not
|
476 |
-
support ``cudaLaunchCooperativeKernelMultiDevice``.
|
477 |
-
"""
|
478 |
-
|
479 |
-
def __init__(self, enable_predicate, output_dir, *, use_cuda=True):
|
480 |
-
"""
|
481 |
-
Args:
|
482 |
-
enable_predicate (callable[trainer -> bool]): a function which takes a trainer,
|
483 |
-
and returns whether to enable the profiler.
|
484 |
-
It will be called once every step, and can be used to select which steps to profile.
|
485 |
-
output_dir (str): the output directory to dump tracing files.
|
486 |
-
use_cuda (bool): same as in `torch.autograd.profiler.profile`.
|
487 |
-
"""
|
488 |
-
warnings.warn("AutogradProfiler has been deprecated in favor of TorchProfiler.")
|
489 |
-
self._enable_predicate = enable_predicate
|
490 |
-
self._use_cuda = use_cuda
|
491 |
-
self._output_dir = output_dir
|
492 |
-
|
493 |
-
def before_step(self):
|
494 |
-
if self._enable_predicate(self.trainer):
|
495 |
-
self._profiler = torch.autograd.profiler.profile(use_cuda=self._use_cuda)
|
496 |
-
self._profiler.__enter__()
|
497 |
-
else:
|
498 |
-
self._profiler = None
|
499 |
-
|
500 |
-
|
501 |
-
class EvalHook(HookBase):
|
502 |
-
"""
|
503 |
-
Run an evaluation function periodically, and at the end of training.
|
504 |
-
|
505 |
-
It is executed every ``eval_period`` iterations and after the last iteration.
|
506 |
-
"""
|
507 |
-
|
508 |
-
def __init__(self, eval_period, eval_function):
|
509 |
-
"""
|
510 |
-
Args:
|
511 |
-
eval_period (int): the period to run `eval_function`. Set to 0 to
|
512 |
-
not evaluate periodically (but still after the last iteration).
|
513 |
-
eval_function (callable): a function which takes no arguments, and
|
514 |
-
returns a nested dict of evaluation metrics.
|
515 |
-
|
516 |
-
Note:
|
517 |
-
This hook must be enabled in all or none workers.
|
518 |
-
If you would like only certain workers to perform evaluation,
|
519 |
-
give other workers a no-op function (`eval_function=lambda: None`).
|
520 |
-
"""
|
521 |
-
self._period = eval_period
|
522 |
-
self._func = eval_function
|
523 |
-
|
524 |
-
def _do_eval(self):
|
525 |
-
results = self._func()
|
526 |
-
|
527 |
-
if results:
|
528 |
-
assert isinstance(
|
529 |
-
results, dict
|
530 |
-
), "Eval function must return a dict. Got {} instead.".format(results)
|
531 |
-
|
532 |
-
flattened_results = flatten_results_dict(results)
|
533 |
-
for k, v in flattened_results.items():
|
534 |
-
try:
|
535 |
-
v = float(v)
|
536 |
-
except Exception as e:
|
537 |
-
raise ValueError(
|
538 |
-
"[EvalHook] eval_function should return a nested dict of float. "
|
539 |
-
"Got '{}: {}' instead.".format(k, v)
|
540 |
-
) from e
|
541 |
-
self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False)
|
542 |
-
|
543 |
-
# Evaluation may take different time among workers.
|
544 |
-
# A barrier make them start the next iteration together.
|
545 |
-
comm.synchronize()
|
546 |
-
|
547 |
-
def after_step(self):
|
548 |
-
next_iter = self.trainer.iter + 1
|
549 |
-
if self._period > 0 and next_iter % self._period == 0:
|
550 |
-
# do the last eval in after_train
|
551 |
-
if next_iter != self.trainer.max_iter:
|
552 |
-
self._do_eval()
|
553 |
-
|
554 |
-
def after_train(self):
|
555 |
-
# This condition is to prevent the eval from running after a failed training
|
556 |
-
if self.trainer.iter + 1 >= self.trainer.max_iter:
|
557 |
-
self._do_eval()
|
558 |
-
# func is likely a closure that holds reference to the trainer
|
559 |
-
# therefore we clean it to avoid circular reference in the end
|
560 |
-
del self._func
|
561 |
-
|
562 |
-
|
563 |
-
class PreciseBN(HookBase):
|
564 |
-
"""
|
565 |
-
The standard implementation of BatchNorm uses EMA in inference, which is
|
566 |
-
sometimes suboptimal.
|
567 |
-
This class computes the true average of statistics rather than the moving average,
|
568 |
-
and put true averages to every BN layer in the given model.
|
569 |
-
|
570 |
-
It is executed every ``period`` iterations and after the last iteration.
|
571 |
-
"""
|
572 |
-
|
573 |
-
def __init__(self, period, model, data_loader, num_iter):
|
574 |
-
"""
|
575 |
-
Args:
|
576 |
-
period (int): the period this hook is run, or 0 to not run during training.
|
577 |
-
The hook will always run in the end of training.
|
578 |
-
model (nn.Module): a module whose all BN layers in training mode will be
|
579 |
-
updated by precise BN.
|
580 |
-
Note that user is responsible for ensuring the BN layers to be
|
581 |
-
updated are in training mode when this hook is triggered.
|
582 |
-
data_loader (iterable): it will produce data to be run by `model(data)`.
|
583 |
-
num_iter (int): number of iterations used to compute the precise
|
584 |
-
statistics.
|
585 |
-
"""
|
586 |
-
self._logger = logging.getLogger(__name__)
|
587 |
-
if len(get_bn_modules(model)) == 0:
|
588 |
-
self._logger.info(
|
589 |
-
"PreciseBN is disabled because model does not contain BN layers in training mode."
|
590 |
-
)
|
591 |
-
self._disabled = True
|
592 |
-
return
|
593 |
-
|
594 |
-
self._model = model
|
595 |
-
self._data_loader = data_loader
|
596 |
-
self._num_iter = num_iter
|
597 |
-
self._period = period
|
598 |
-
self._disabled = False
|
599 |
-
|
600 |
-
self._data_iter = None
|
601 |
-
|
602 |
-
def after_step(self):
|
603 |
-
next_iter = self.trainer.iter + 1
|
604 |
-
is_final = next_iter == self.trainer.max_iter
|
605 |
-
if is_final or (self._period > 0 and next_iter % self._period == 0):
|
606 |
-
self.update_stats()
|
607 |
-
|
608 |
-
def update_stats(self):
|
609 |
-
"""
|
610 |
-
Update the model with precise statistics. Users can manually call this method.
|
611 |
-
"""
|
612 |
-
if self._disabled:
|
613 |
-
return
|
614 |
-
|
615 |
-
if self._data_iter is None:
|
616 |
-
self._data_iter = iter(self._data_loader)
|
617 |
-
|
618 |
-
def data_loader():
|
619 |
-
for num_iter in itertools.count(1):
|
620 |
-
if num_iter % 100 == 0:
|
621 |
-
self._logger.info(
|
622 |
-
"Running precise-BN ... {}/{} iterations.".format(num_iter, self._num_iter)
|
623 |
-
)
|
624 |
-
# This way we can reuse the same iterator
|
625 |
-
yield next(self._data_iter)
|
626 |
-
|
627 |
-
with EventStorage(): # capture events in a new storage to discard them
|
628 |
-
self._logger.info(
|
629 |
-
"Running precise-BN for {} iterations... ".format(self._num_iter)
|
630 |
-
+ "Note that this could produce different statistics every time."
|
631 |
-
)
|
632 |
-
update_bn_stats(self._model, data_loader(), self._num_iter)
|
633 |
-
|
634 |
-
|
635 |
-
class TorchMemoryStats(HookBase):
|
636 |
-
"""
|
637 |
-
Writes pytorch's cuda memory statistics periodically.
|
638 |
-
"""
|
639 |
-
|
640 |
-
def __init__(self, period=20, max_runs=10):
|
641 |
-
"""
|
642 |
-
Args:
|
643 |
-
period (int): Output stats each 'period' iterations
|
644 |
-
max_runs (int): Stop the logging after 'max_runs'
|
645 |
-
"""
|
646 |
-
|
647 |
-
self._logger = logging.getLogger(__name__)
|
648 |
-
self._period = period
|
649 |
-
self._max_runs = max_runs
|
650 |
-
self._runs = 0
|
651 |
-
|
652 |
-
def after_step(self):
|
653 |
-
if self._runs > self._max_runs:
|
654 |
-
return
|
655 |
-
|
656 |
-
if (self.trainer.iter + 1) % self._period == 0 or (
|
657 |
-
self.trainer.iter == self.trainer.max_iter - 1
|
658 |
-
):
|
659 |
-
if torch.cuda.is_available():
|
660 |
-
max_reserved_mb = torch.cuda.max_memory_reserved() / 1024.0 / 1024.0
|
661 |
-
reserved_mb = torch.cuda.memory_reserved() / 1024.0 / 1024.0
|
662 |
-
max_allocated_mb = torch.cuda.max_memory_allocated() / 1024.0 / 1024.0
|
663 |
-
allocated_mb = torch.cuda.memory_allocated() / 1024.0 / 1024.0
|
664 |
-
|
665 |
-
self._logger.info(
|
666 |
-
(
|
667 |
-
" iter: {} "
|
668 |
-
" max_reserved_mem: {:.0f}MB "
|
669 |
-
" reserved_mem: {:.0f}MB "
|
670 |
-
" max_allocated_mem: {:.0f}MB "
|
671 |
-
" allocated_mem: {:.0f}MB "
|
672 |
-
).format(
|
673 |
-
self.trainer.iter,
|
674 |
-
max_reserved_mb,
|
675 |
-
reserved_mb,
|
676 |
-
max_allocated_mb,
|
677 |
-
allocated_mb,
|
678 |
-
)
|
679 |
-
)
|
680 |
-
|
681 |
-
self._runs += 1
|
682 |
-
if self._runs == self._max_runs:
|
683 |
-
mem_summary = torch.cuda.memory_summary()
|
684 |
-
self._logger.info("\n" + mem_summary)
|
685 |
-
|
686 |
-
torch.cuda.reset_peak_memory_stats()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/evaluation/pascal_voc_evaluation.py
DELETED
@@ -1,300 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
-
|
4 |
-
import logging
|
5 |
-
import numpy as np
|
6 |
-
import os
|
7 |
-
import tempfile
|
8 |
-
import xml.etree.ElementTree as ET
|
9 |
-
from collections import OrderedDict, defaultdict
|
10 |
-
from functools import lru_cache
|
11 |
-
import torch
|
12 |
-
|
13 |
-
from detectron2.data import MetadataCatalog
|
14 |
-
from detectron2.utils import comm
|
15 |
-
from detectron2.utils.file_io import PathManager
|
16 |
-
|
17 |
-
from .evaluator import DatasetEvaluator
|
18 |
-
|
19 |
-
|
20 |
-
class PascalVOCDetectionEvaluator(DatasetEvaluator):
|
21 |
-
"""
|
22 |
-
Evaluate Pascal VOC style AP for Pascal VOC dataset.
|
23 |
-
It contains a synchronization, therefore has to be called from all ranks.
|
24 |
-
|
25 |
-
Note that the concept of AP can be implemented in different ways and may not
|
26 |
-
produce identical results. This class mimics the implementation of the official
|
27 |
-
Pascal VOC Matlab API, and should produce similar but not identical results to the
|
28 |
-
official API.
|
29 |
-
"""
|
30 |
-
|
31 |
-
def __init__(self, dataset_name):
|
32 |
-
"""
|
33 |
-
Args:
|
34 |
-
dataset_name (str): name of the dataset, e.g., "voc_2007_test"
|
35 |
-
"""
|
36 |
-
self._dataset_name = dataset_name
|
37 |
-
meta = MetadataCatalog.get(dataset_name)
|
38 |
-
|
39 |
-
# Too many tiny files, download all to local for speed.
|
40 |
-
annotation_dir_local = PathManager.get_local_path(
|
41 |
-
os.path.join(meta.dirname, "Annotations/")
|
42 |
-
)
|
43 |
-
self._anno_file_template = os.path.join(annotation_dir_local, "{}.xml")
|
44 |
-
self._image_set_path = os.path.join(meta.dirname, "ImageSets", "Main", meta.split + ".txt")
|
45 |
-
self._class_names = meta.thing_classes
|
46 |
-
assert meta.year in [2007, 2012], meta.year
|
47 |
-
self._is_2007 = meta.year == 2007
|
48 |
-
self._cpu_device = torch.device("cpu")
|
49 |
-
self._logger = logging.getLogger(__name__)
|
50 |
-
|
51 |
-
def reset(self):
|
52 |
-
self._predictions = defaultdict(list) # class name -> list of prediction strings
|
53 |
-
|
54 |
-
def process(self, inputs, outputs):
|
55 |
-
for input, output in zip(inputs, outputs):
|
56 |
-
image_id = input["image_id"]
|
57 |
-
instances = output["instances"].to(self._cpu_device)
|
58 |
-
boxes = instances.pred_boxes.tensor.numpy()
|
59 |
-
scores = instances.scores.tolist()
|
60 |
-
classes = instances.pred_classes.tolist()
|
61 |
-
for box, score, cls in zip(boxes, scores, classes):
|
62 |
-
xmin, ymin, xmax, ymax = box
|
63 |
-
# The inverse of data loading logic in `datasets/pascal_voc.py`
|
64 |
-
xmin += 1
|
65 |
-
ymin += 1
|
66 |
-
self._predictions[cls].append(
|
67 |
-
f"{image_id} {score:.3f} {xmin:.1f} {ymin:.1f} {xmax:.1f} {ymax:.1f}"
|
68 |
-
)
|
69 |
-
|
70 |
-
def evaluate(self):
|
71 |
-
"""
|
72 |
-
Returns:
|
73 |
-
dict: has a key "segm", whose value is a dict of "AP", "AP50", and "AP75".
|
74 |
-
"""
|
75 |
-
all_predictions = comm.gather(self._predictions, dst=0)
|
76 |
-
if not comm.is_main_process():
|
77 |
-
return
|
78 |
-
predictions = defaultdict(list)
|
79 |
-
for predictions_per_rank in all_predictions:
|
80 |
-
for clsid, lines in predictions_per_rank.items():
|
81 |
-
predictions[clsid].extend(lines)
|
82 |
-
del all_predictions
|
83 |
-
|
84 |
-
self._logger.info(
|
85 |
-
"Evaluating {} using {} metric. "
|
86 |
-
"Note that results do not use the official Matlab API.".format(
|
87 |
-
self._dataset_name, 2007 if self._is_2007 else 2012
|
88 |
-
)
|
89 |
-
)
|
90 |
-
|
91 |
-
with tempfile.TemporaryDirectory(prefix="pascal_voc_eval_") as dirname:
|
92 |
-
res_file_template = os.path.join(dirname, "{}.txt")
|
93 |
-
|
94 |
-
aps = defaultdict(list) # iou -> ap per class
|
95 |
-
for cls_id, cls_name in enumerate(self._class_names):
|
96 |
-
lines = predictions.get(cls_id, [""])
|
97 |
-
|
98 |
-
with open(res_file_template.format(cls_name), "w") as f:
|
99 |
-
f.write("\n".join(lines))
|
100 |
-
|
101 |
-
for thresh in range(50, 100, 5):
|
102 |
-
rec, prec, ap = voc_eval(
|
103 |
-
res_file_template,
|
104 |
-
self._anno_file_template,
|
105 |
-
self._image_set_path,
|
106 |
-
cls_name,
|
107 |
-
ovthresh=thresh / 100.0,
|
108 |
-
use_07_metric=self._is_2007,
|
109 |
-
)
|
110 |
-
aps[thresh].append(ap * 100)
|
111 |
-
|
112 |
-
ret = OrderedDict()
|
113 |
-
mAP = {iou: np.mean(x) for iou, x in aps.items()}
|
114 |
-
ret["bbox"] = {"AP": np.mean(list(mAP.values())), "AP50": mAP[50], "AP75": mAP[75]}
|
115 |
-
return ret
|
116 |
-
|
117 |
-
|
118 |
-
##############################################################################
|
119 |
-
#
|
120 |
-
# Below code is modified from
|
121 |
-
# https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/voc_eval.py
|
122 |
-
# --------------------------------------------------------
|
123 |
-
# Fast/er R-CNN
|
124 |
-
# Licensed under The MIT License [see LICENSE for details]
|
125 |
-
# Written by Bharath Hariharan
|
126 |
-
# --------------------------------------------------------
|
127 |
-
|
128 |
-
"""Python implementation of the PASCAL VOC devkit's AP evaluation code."""
|
129 |
-
|
130 |
-
|
131 |
-
@lru_cache(maxsize=None)
|
132 |
-
def parse_rec(filename):
|
133 |
-
"""Parse a PASCAL VOC xml file."""
|
134 |
-
with PathManager.open(filename) as f:
|
135 |
-
tree = ET.parse(f)
|
136 |
-
objects = []
|
137 |
-
for obj in tree.findall("object"):
|
138 |
-
obj_struct = {}
|
139 |
-
obj_struct["name"] = obj.find("name").text
|
140 |
-
obj_struct["pose"] = obj.find("pose").text
|
141 |
-
obj_struct["truncated"] = int(obj.find("truncated").text)
|
142 |
-
obj_struct["difficult"] = int(obj.find("difficult").text)
|
143 |
-
bbox = obj.find("bndbox")
|
144 |
-
obj_struct["bbox"] = [
|
145 |
-
int(bbox.find("xmin").text),
|
146 |
-
int(bbox.find("ymin").text),
|
147 |
-
int(bbox.find("xmax").text),
|
148 |
-
int(bbox.find("ymax").text),
|
149 |
-
]
|
150 |
-
objects.append(obj_struct)
|
151 |
-
|
152 |
-
return objects
|
153 |
-
|
154 |
-
|
155 |
-
def voc_ap(rec, prec, use_07_metric=False):
|
156 |
-
"""Compute VOC AP given precision and recall. If use_07_metric is true, uses
|
157 |
-
the VOC 07 11-point method (default:False).
|
158 |
-
"""
|
159 |
-
if use_07_metric:
|
160 |
-
# 11 point metric
|
161 |
-
ap = 0.0
|
162 |
-
for t in np.arange(0.0, 1.1, 0.1):
|
163 |
-
if np.sum(rec >= t) == 0:
|
164 |
-
p = 0
|
165 |
-
else:
|
166 |
-
p = np.max(prec[rec >= t])
|
167 |
-
ap = ap + p / 11.0
|
168 |
-
else:
|
169 |
-
# correct AP calculation
|
170 |
-
# first append sentinel values at the end
|
171 |
-
mrec = np.concatenate(([0.0], rec, [1.0]))
|
172 |
-
mpre = np.concatenate(([0.0], prec, [0.0]))
|
173 |
-
|
174 |
-
# compute the precision envelope
|
175 |
-
for i in range(mpre.size - 1, 0, -1):
|
176 |
-
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
|
177 |
-
|
178 |
-
# to calculate area under PR curve, look for points
|
179 |
-
# where X axis (recall) changes value
|
180 |
-
i = np.where(mrec[1:] != mrec[:-1])[0]
|
181 |
-
|
182 |
-
# and sum (\Delta recall) * prec
|
183 |
-
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
|
184 |
-
return ap
|
185 |
-
|
186 |
-
|
187 |
-
def voc_eval(detpath, annopath, imagesetfile, classname, ovthresh=0.5, use_07_metric=False):
|
188 |
-
"""rec, prec, ap = voc_eval(detpath,
|
189 |
-
annopath,
|
190 |
-
imagesetfile,
|
191 |
-
classname,
|
192 |
-
[ovthresh],
|
193 |
-
[use_07_metric])
|
194 |
-
|
195 |
-
Top level function that does the PASCAL VOC evaluation.
|
196 |
-
|
197 |
-
detpath: Path to detections
|
198 |
-
detpath.format(classname) should produce the detection results file.
|
199 |
-
annopath: Path to annotations
|
200 |
-
annopath.format(imagename) should be the xml annotations file.
|
201 |
-
imagesetfile: Text file containing the list of images, one image per line.
|
202 |
-
classname: Category name (duh)
|
203 |
-
[ovthresh]: Overlap threshold (default = 0.5)
|
204 |
-
[use_07_metric]: Whether to use VOC07's 11 point AP computation
|
205 |
-
(default False)
|
206 |
-
"""
|
207 |
-
# assumes detections are in detpath.format(classname)
|
208 |
-
# assumes annotations are in annopath.format(imagename)
|
209 |
-
# assumes imagesetfile is a text file with each line an image name
|
210 |
-
|
211 |
-
# first load gt
|
212 |
-
# read list of images
|
213 |
-
with PathManager.open(imagesetfile, "r") as f:
|
214 |
-
lines = f.readlines()
|
215 |
-
imagenames = [x.strip() for x in lines]
|
216 |
-
|
217 |
-
# load annots
|
218 |
-
recs = {}
|
219 |
-
for imagename in imagenames:
|
220 |
-
recs[imagename] = parse_rec(annopath.format(imagename))
|
221 |
-
|
222 |
-
# extract gt objects for this class
|
223 |
-
class_recs = {}
|
224 |
-
npos = 0
|
225 |
-
for imagename in imagenames:
|
226 |
-
R = [obj for obj in recs[imagename] if obj["name"] == classname]
|
227 |
-
bbox = np.array([x["bbox"] for x in R])
|
228 |
-
difficult = np.array([x["difficult"] for x in R]).astype(np.bool)
|
229 |
-
# difficult = np.array([False for x in R]).astype(np.bool) # treat all "difficult" as GT
|
230 |
-
det = [False] * len(R)
|
231 |
-
npos = npos + sum(~difficult)
|
232 |
-
class_recs[imagename] = {"bbox": bbox, "difficult": difficult, "det": det}
|
233 |
-
|
234 |
-
# read dets
|
235 |
-
detfile = detpath.format(classname)
|
236 |
-
with open(detfile, "r") as f:
|
237 |
-
lines = f.readlines()
|
238 |
-
|
239 |
-
splitlines = [x.strip().split(" ") for x in lines]
|
240 |
-
image_ids = [x[0] for x in splitlines]
|
241 |
-
confidence = np.array([float(x[1]) for x in splitlines])
|
242 |
-
BB = np.array([[float(z) for z in x[2:]] for x in splitlines]).reshape(-1, 4)
|
243 |
-
|
244 |
-
# sort by confidence
|
245 |
-
sorted_ind = np.argsort(-confidence)
|
246 |
-
BB = BB[sorted_ind, :]
|
247 |
-
image_ids = [image_ids[x] for x in sorted_ind]
|
248 |
-
|
249 |
-
# go down dets and mark TPs and FPs
|
250 |
-
nd = len(image_ids)
|
251 |
-
tp = np.zeros(nd)
|
252 |
-
fp = np.zeros(nd)
|
253 |
-
for d in range(nd):
|
254 |
-
R = class_recs[image_ids[d]]
|
255 |
-
bb = BB[d, :].astype(float)
|
256 |
-
ovmax = -np.inf
|
257 |
-
BBGT = R["bbox"].astype(float)
|
258 |
-
|
259 |
-
if BBGT.size > 0:
|
260 |
-
# compute overlaps
|
261 |
-
# intersection
|
262 |
-
ixmin = np.maximum(BBGT[:, 0], bb[0])
|
263 |
-
iymin = np.maximum(BBGT[:, 1], bb[1])
|
264 |
-
ixmax = np.minimum(BBGT[:, 2], bb[2])
|
265 |
-
iymax = np.minimum(BBGT[:, 3], bb[3])
|
266 |
-
iw = np.maximum(ixmax - ixmin + 1.0, 0.0)
|
267 |
-
ih = np.maximum(iymax - iymin + 1.0, 0.0)
|
268 |
-
inters = iw * ih
|
269 |
-
|
270 |
-
# union
|
271 |
-
uni = (
|
272 |
-
(bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0)
|
273 |
-
+ (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0)
|
274 |
-
- inters
|
275 |
-
)
|
276 |
-
|
277 |
-
overlaps = inters / uni
|
278 |
-
ovmax = np.max(overlaps)
|
279 |
-
jmax = np.argmax(overlaps)
|
280 |
-
|
281 |
-
if ovmax > ovthresh:
|
282 |
-
if not R["difficult"][jmax]:
|
283 |
-
if not R["det"][jmax]:
|
284 |
-
tp[d] = 1.0
|
285 |
-
R["det"][jmax] = 1
|
286 |
-
else:
|
287 |
-
fp[d] = 1.0
|
288 |
-
else:
|
289 |
-
fp[d] = 1.0
|
290 |
-
|
291 |
-
# compute precision recall
|
292 |
-
fp = np.cumsum(fp)
|
293 |
-
tp = np.cumsum(tp)
|
294 |
-
rec = tp / float(npos)
|
295 |
-
# avoid divide by zero in case the first detection matches a difficult
|
296 |
-
# ground truth
|
297 |
-
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
|
298 |
-
ap = voc_ap(rec, prec, use_07_metric)
|
299 |
-
|
300 |
-
return rec, prec, ap
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/tutorials/datasets.md
DELETED
@@ -1,290 +0,0 @@
|
|
1 |
-
# Use Custom Datasets
|
2 |
-
|
3 |
-
This document explains how the dataset APIs
|
4 |
-
([DatasetCatalog](../modules/data.html#detectron2.data.DatasetCatalog), [MetadataCatalog](../modules/data.html#detectron2.data.MetadataCatalog))
|
5 |
-
work, and how to use them to add custom datasets.
|
6 |
-
|
7 |
-
Datasets that have builtin support in detectron2 are listed in [builtin datasets](builtin_datasets.md).
|
8 |
-
If you want to use a custom dataset while also reusing detectron2's data loaders,
|
9 |
-
you will need to:
|
10 |
-
|
11 |
-
1. __Register__ your dataset (i.e., tell detectron2 how to obtain your dataset).
|
12 |
-
2. Optionally, __register metadata__ for your dataset.
|
13 |
-
|
14 |
-
Next, we explain the above two concepts in detail.
|
15 |
-
|
16 |
-
The [Colab tutorial](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5)
|
17 |
-
has a live example of how to register and train on a dataset of custom formats.
|
18 |
-
|
19 |
-
### Register a Dataset
|
20 |
-
|
21 |
-
To let detectron2 know how to obtain a dataset named "my_dataset", users need to implement
|
22 |
-
a function that returns the items in your dataset and then tell detectron2 about this
|
23 |
-
function:
|
24 |
-
```python
|
25 |
-
def my_dataset_function():
|
26 |
-
...
|
27 |
-
return list[dict] in the following format
|
28 |
-
|
29 |
-
from detectron2.data import DatasetCatalog
|
30 |
-
DatasetCatalog.register("my_dataset", my_dataset_function)
|
31 |
-
# later, to access the data:
|
32 |
-
data: List[Dict] = DatasetCatalog.get("my_dataset")
|
33 |
-
```
|
34 |
-
|
35 |
-
Here, the snippet associates a dataset named "my_dataset" with a function that returns the data.
|
36 |
-
The function must return the same data (with same order) if called multiple times.
|
37 |
-
The registration stays effective until the process exits.
|
38 |
-
|
39 |
-
The function can do arbitrary things and should return the data in `list[dict]`, each dict in either
|
40 |
-
of the following formats:
|
41 |
-
1. Detectron2's standard dataset dict, described below. This will make it work with many other builtin
|
42 |
-
features in detectron2, so it's recommended to use it when it's sufficient.
|
43 |
-
2. Any custom format. You can also return arbitrary dicts in your own format,
|
44 |
-
such as adding extra keys for new tasks.
|
45 |
-
Then you will need to handle them properly downstream as well.
|
46 |
-
See below for more details.
|
47 |
-
|
48 |
-
#### Standard Dataset Dicts
|
49 |
-
|
50 |
-
For standard tasks
|
51 |
-
(instance detection, instance/semantic/panoptic segmentation, keypoint detection),
|
52 |
-
we load the original dataset into `list[dict]` with a specification similar to COCO's annotations.
|
53 |
-
This is our standard representation for a dataset.
|
54 |
-
|
55 |
-
Each dict contains information about one image.
|
56 |
-
The dict may have the following fields,
|
57 |
-
and the required fields vary based on what the dataloader or the task needs (see more below).
|
58 |
-
|
59 |
-
```eval_rst
|
60 |
-
.. list-table::
|
61 |
-
:header-rows: 1
|
62 |
-
|
63 |
-
* - Task
|
64 |
-
- Fields
|
65 |
-
* - Common
|
66 |
-
- file_name, height, width, image_id
|
67 |
-
|
68 |
-
* - Instance detection/segmentation
|
69 |
-
- annotations
|
70 |
-
|
71 |
-
* - Semantic segmentation
|
72 |
-
- sem_seg_file_name
|
73 |
-
|
74 |
-
* - Panoptic segmentation
|
75 |
-
- pan_seg_file_name, segments_info
|
76 |
-
```
|
77 |
-
|
78 |
-
+ `file_name`: the full path to the image file.
|
79 |
-
+ `height`, `width`: integer. The shape of the image.
|
80 |
-
+ `image_id` (str or int): a unique id that identifies this image. Required by many
|
81 |
-
evaluators to identify the images, but a dataset may use it for different purposes.
|
82 |
-
+ `annotations` (list[dict]): Required by __instance detection/segmentation or keypoint detection__ tasks.
|
83 |
-
Each dict corresponds to annotations of one instance in this image, and
|
84 |
-
may contain the following keys:
|
85 |
-
+ `bbox` (list[float], required): list of 4 numbers representing the bounding box of the instance.
|
86 |
-
+ `bbox_mode` (int, required): the format of bbox. It must be a member of
|
87 |
-
[structures.BoxMode](../modules/structures.html#detectron2.structures.BoxMode).
|
88 |
-
Currently supports: `BoxMode.XYXY_ABS`, `BoxMode.XYWH_ABS`.
|
89 |
-
+ `category_id` (int, required): an integer in the range [0, num_categories-1] representing the category label.
|
90 |
-
The value num_categories is reserved to represent the "background" category, if applicable.
|
91 |
-
+ `segmentation` (list[list[float]] or dict): the segmentation mask of the instance.
|
92 |
-
+ If `list[list[float]]`, it represents a list of polygons, one for each connected component
|
93 |
-
of the object. Each `list[float]` is one simple polygon in the format of `[x1, y1, ..., xn, yn]` (n≥3).
|
94 |
-
The Xs and Ys are absolute coordinates in unit of pixels.
|
95 |
-
+ If `dict`, it represents the per-pixel segmentation mask in COCO's compressed RLE format.
|
96 |
-
The dict should have keys "size" and "counts". You can convert a uint8 segmentation mask of 0s and
|
97 |
-
1s into such dict by `pycocotools.mask.encode(np.asarray(mask, order="F"))`.
|
98 |
-
`cfg.INPUT.MASK_FORMAT` must be set to `bitmask` if using the default data loader with such format.
|
99 |
-
+ `keypoints` (list[float]): in the format of [x1, y1, v1,..., xn, yn, vn].
|
100 |
-
v[i] means the [visibility](http://cocodataset.org/#format-data) of this keypoint.
|
101 |
-
`n` must be equal to the number of keypoint categories.
|
102 |
-
The Xs and Ys are absolute real-value coordinates in range [0, W or H].
|
103 |
-
|
104 |
-
(Note that the keypoint coordinates in COCO format are integers in range [0, W-1 or H-1], which is different
|
105 |
-
from our standard format. Detectron2 adds 0.5 to COCO keypoint coordinates to convert them from discrete
|
106 |
-
pixel indices to floating point coordinates.)
|
107 |
-
+ `iscrowd`: 0 (default) or 1. Whether this instance is labeled as COCO's "crowd
|
108 |
-
region". Don't include this field if you don't know what it means.
|
109 |
-
|
110 |
-
If `annotations` is an empty list, it means the image is labeled to have no objects.
|
111 |
-
Such images will by default be removed from training,
|
112 |
-
but can be included using `DATALOADER.FILTER_EMPTY_ANNOTATIONS`.
|
113 |
-
|
114 |
-
+ `sem_seg_file_name` (str):
|
115 |
-
The full path to the semantic segmentation ground truth file.
|
116 |
-
It should be a grayscale image whose pixel values are integer labels.
|
117 |
-
+ `pan_seg_file_name` (str):
|
118 |
-
The full path to panoptic segmentation ground truth file.
|
119 |
-
It should be an RGB image whose pixel values are integer ids encoded using the
|
120 |
-
[panopticapi.utils.id2rgb](https://github.com/cocodataset/panopticapi/) function.
|
121 |
-
The ids are defined by `segments_info`.
|
122 |
-
If an id does not appear in `segments_info`, the pixel is considered unlabeled
|
123 |
-
and is usually ignored in training & evaluation.
|
124 |
-
+ `segments_info` (list[dict]): defines the meaning of each id in panoptic segmentation ground truth.
|
125 |
-
Each dict has the following keys:
|
126 |
-
+ `id` (int): integer that appears in the ground truth image.
|
127 |
-
+ `category_id` (int): an integer in the range [0, num_categories-1] representing the category label.
|
128 |
-
+ `iscrowd`: 0 (default) or 1. Whether this instance is labeled as COCO's "crowd region".
|
129 |
-
|
130 |
-
|
131 |
-
```eval_rst
|
132 |
-
|
133 |
-
.. note::
|
134 |
-
|
135 |
-
The PanopticFPN model does not use the panoptic segmentation
|
136 |
-
format defined here, but a combination of both instance segmentation and semantic segmentation data
|
137 |
-
format. See :doc:`builtin_datasets` for instructions on COCO.
|
138 |
-
|
139 |
-
```
|
140 |
-
|
141 |
-
Fast R-CNN (with pre-computed proposals) models are rarely used today.
|
142 |
-
To train a Fast R-CNN, the following extra keys are needed:
|
143 |
-
|
144 |
-
+ `proposal_boxes` (array): 2D numpy array with shape (K, 4) representing K precomputed proposal boxes for this image.
|
145 |
-
+ `proposal_objectness_logits` (array): numpy array with shape (K, ), which corresponds to the objectness
|
146 |
-
logits of proposals in 'proposal_boxes'.
|
147 |
-
+ `proposal_bbox_mode` (int): the format of the precomputed proposal bbox.
|
148 |
-
It must be a member of
|
149 |
-
[structures.BoxMode](../modules/structures.html#detectron2.structures.BoxMode).
|
150 |
-
Default is `BoxMode.XYXY_ABS`.
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
#### Custom Dataset Dicts for New Tasks
|
155 |
-
|
156 |
-
In the `list[dict]` that your dataset function returns, the dictionary can also have __arbitrary custom data__.
|
157 |
-
This will be useful for a new task that needs extra information not covered
|
158 |
-
by the standard dataset dicts. In this case, you need to make sure the downstream code can handle your data
|
159 |
-
correctly. Usually this requires writing a new `mapper` for the dataloader (see [Use Custom Dataloaders](./data_loading.md)).
|
160 |
-
|
161 |
-
When designing a custom format, note that all dicts are stored in memory
|
162 |
-
(sometimes serialized and with multiple copies).
|
163 |
-
To save memory, each dict is meant to contain __small__ but sufficient information
|
164 |
-
about each sample, such as file names and annotations.
|
165 |
-
Loading full samples typically happens in the data loader.
|
166 |
-
|
167 |
-
For attributes shared among the entire dataset, use `Metadata` (see below).
|
168 |
-
To avoid extra memory, do not save such information inside each sample.
|
169 |
-
|
170 |
-
### "Metadata" for Datasets
|
171 |
-
|
172 |
-
Each dataset is associated with some metadata, accessible through
|
173 |
-
`MetadataCatalog.get(dataset_name).some_metadata`.
|
174 |
-
Metadata is a key-value mapping that contains information that's shared among
|
175 |
-
the entire dataset, and usually is used to interpret what's in the dataset, e.g.,
|
176 |
-
names of classes, colors of classes, root of files, etc.
|
177 |
-
This information will be useful for augmentation, evaluation, visualization, logging, etc.
|
178 |
-
The structure of metadata depends on what is needed from the corresponding downstream code.
|
179 |
-
|
180 |
-
If you register a new dataset through `DatasetCatalog.register`,
|
181 |
-
you may also want to add its corresponding metadata through
|
182 |
-
`MetadataCatalog.get(dataset_name).some_key = some_value`, to enable any features that need the metadata.
|
183 |
-
You can do it like this (using the metadata key "thing_classes" as an example):
|
184 |
-
|
185 |
-
```python
|
186 |
-
from detectron2.data import MetadataCatalog
|
187 |
-
MetadataCatalog.get("my_dataset").thing_classes = ["person", "dog"]
|
188 |
-
```
|
189 |
-
|
190 |
-
Here is a list of metadata keys that are used by builtin features in detectron2.
|
191 |
-
If you add your own dataset without these metadata, some features may be
|
192 |
-
unavailable to you:
|
193 |
-
|
194 |
-
* `thing_classes` (list[str]): Used by all instance detection/segmentation tasks.
|
195 |
-
A list of names for each instance/thing category.
|
196 |
-
If you load a COCO format dataset, it will be automatically set by the function `load_coco_json`.
|
197 |
-
|
198 |
-
* `thing_colors` (list[tuple(r, g, b)]): Pre-defined color (in [0, 255]) for each thing category.
|
199 |
-
Used for visualization. If not given, random colors will be used.
|
200 |
-
|
201 |
-
* `stuff_classes` (list[str]): Used by semantic and panoptic segmentation tasks.
|
202 |
-
A list of names for each stuff category.
|
203 |
-
|
204 |
-
* `stuff_colors` (list[tuple(r, g, b)]): Pre-defined color (in [0, 255]) for each stuff category.
|
205 |
-
Used for visualization. If not given, random colors are used.
|
206 |
-
|
207 |
-
* `ignore_label` (int): Used by semantic and panoptic segmentation tasks. Pixels in ground-truth
|
208 |
-
annotations with this category label should be ignored in evaluation. Typically these are "unlabeled"
|
209 |
-
pixels.
|
210 |
-
|
211 |
-
* `keypoint_names` (list[str]): Used by keypoint detection. A list of names for each keypoint.
|
212 |
-
|
213 |
-
* `keypoint_flip_map` (list[tuple[str]]): Used by keypoint detection. A list of pairs of names,
|
214 |
-
where each pair are the two keypoints that should be flipped if the image is
|
215 |
-
flipped horizontally during augmentation.
|
216 |
-
* `keypoint_connection_rules`: list[tuple(str, str, (r, g, b))]. Each tuple specifies a pair of keypoints
|
217 |
-
that are connected and the color (in [0, 255]) to use for the line between them when visualized.
|
218 |
-
|
219 |
-
Some additional metadata that are specific to the evaluation of certain datasets (e.g. COCO):
|
220 |
-
|
221 |
-
* `thing_dataset_id_to_contiguous_id` (dict[int->int]): Used by all instance detection/segmentation tasks in the COCO format.
|
222 |
-
A mapping from instance class ids in the dataset to contiguous ids in range [0, #class).
|
223 |
-
Will be automatically set by the function `load_coco_json`.
|
224 |
-
|
225 |
-
* `stuff_dataset_id_to_contiguous_id` (dict[int->int]): Used when generating prediction json files for
|
226 |
-
semantic/panoptic segmentation.
|
227 |
-
A mapping from semantic segmentation class ids in the dataset
|
228 |
-
to contiguous ids in [0, num_categories). It is useful for evaluation only.
|
229 |
-
|
230 |
-
* `json_file`: The COCO annotation json file. Used by COCO evaluation for COCO-format datasets.
|
231 |
-
* `panoptic_root`, `panoptic_json`: Used by COCO-format panoptic evaluation.
|
232 |
-
* `evaluator_type`: Used by the builtin main training script to select
|
233 |
-
evaluator. Don't use it in a new training script.
|
234 |
-
You can just provide the [DatasetEvaluator](../modules/evaluation.html#detectron2.evaluation.DatasetEvaluator)
|
235 |
-
for your dataset directly in your main script.
|
236 |
-
|
237 |
-
```eval_rst
|
238 |
-
.. note::
|
239 |
-
|
240 |
-
In recognition, sometimes we use the term "thing" for instance-level tasks,
|
241 |
-
and "stuff" for semantic segmentation tasks.
|
242 |
-
Both are used in panoptic segmentation tasks.
|
243 |
-
For background on the concept of "thing" and "stuff", see
|
244 |
-
`On Seeing Stuff: The Perception of Materials by Humans and Machines
|
245 |
-
<http://persci.mit.edu/pub_pdfs/adelson_spie_01.pdf>`_.
|
246 |
-
```
|
247 |
-
|
248 |
-
### Register a COCO Format Dataset
|
249 |
-
|
250 |
-
If your instance-level (detection, segmentation, keypoint) dataset is already a json file in the COCO format,
|
251 |
-
the dataset and its associated metadata can be registered easily with:
|
252 |
-
```python
|
253 |
-
from detectron2.data.datasets import register_coco_instances
|
254 |
-
register_coco_instances("my_dataset", {}, "json_annotation.json", "path/to/image/dir")
|
255 |
-
```
|
256 |
-
|
257 |
-
If your dataset is in COCO format but need to be further processed, or has extra custom per-instance annotations,
|
258 |
-
the [load_coco_json](../modules/data.html#detectron2.data.datasets.load_coco_json)
|
259 |
-
function might be useful.
|
260 |
-
|
261 |
-
### Update the Config for New Datasets
|
262 |
-
|
263 |
-
Once you've registered the dataset, you can use the name of the dataset (e.g., "my_dataset" in
|
264 |
-
example above) in `cfg.DATASETS.{TRAIN,TEST}`.
|
265 |
-
There are other configs you might want to change to train or evaluate on new datasets:
|
266 |
-
|
267 |
-
* `MODEL.ROI_HEADS.NUM_CLASSES` and `MODEL.RETINANET.NUM_CLASSES` are the number of thing classes
|
268 |
-
for R-CNN and RetinaNet models, respectively.
|
269 |
-
* `MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS` sets the number of keypoints for Keypoint R-CNN.
|
270 |
-
You'll also need to set [Keypoint OKS](http://cocodataset.org/#keypoints-eval)
|
271 |
-
with `TEST.KEYPOINT_OKS_SIGMAS` for evaluation.
|
272 |
-
* `MODEL.SEM_SEG_HEAD.NUM_CLASSES` sets the number of stuff classes for Semantic FPN & Panoptic FPN.
|
273 |
-
* `TEST.DETECTIONS_PER_IMAGE` controls the maximum number of objects to be detected.
|
274 |
-
Set it to a larger number if test images may contain >100 objects.
|
275 |
-
* If you're training Fast R-CNN (with precomputed proposals), `DATASETS.PROPOSAL_FILES_{TRAIN,TEST}`
|
276 |
-
need to match the datasets. The format of proposal files are documented
|
277 |
-
[here](../modules/data.html#detectron2.data.load_proposals_into_dataset).
|
278 |
-
|
279 |
-
New models
|
280 |
-
(e.g. [TensorMask](../../projects/TensorMask),
|
281 |
-
[PointRend](../../projects/PointRend))
|
282 |
-
often have similar configs of their own that need to be changed as well.
|
283 |
-
|
284 |
-
```eval_rst
|
285 |
-
.. tip::
|
286 |
-
|
287 |
-
After changing the number of classes, certain layers in a pre-trained model will become incompatible
|
288 |
-
and therefore cannot be loaded to the new model.
|
289 |
-
This is expected, and loading such pre-trained models will produce warnings about such layers.
|
290 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/custom_build_augmentation.py
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
import numpy as np
|
3 |
-
import pycocotools.mask as mask_util
|
4 |
-
import torch
|
5 |
-
from fvcore.common.file_io import PathManager
|
6 |
-
from PIL import Image
|
7 |
-
|
8 |
-
from detectron2.structures import (
|
9 |
-
BitMasks,
|
10 |
-
Boxes,
|
11 |
-
BoxMode,
|
12 |
-
Instances,
|
13 |
-
Keypoints,
|
14 |
-
PolygonMasks,
|
15 |
-
RotatedBoxes,
|
16 |
-
polygons_to_bitmask,
|
17 |
-
)
|
18 |
-
|
19 |
-
from detectron2.data import transforms as T
|
20 |
-
from .transforms.custom_augmentation_impl import EfficientDetResizeCrop
|
21 |
-
|
22 |
-
def build_custom_augmentation(cfg, is_train):
|
23 |
-
"""
|
24 |
-
Create a list of default :class:`Augmentation` from config.
|
25 |
-
Now it includes resizing and flipping.
|
26 |
-
|
27 |
-
Returns:
|
28 |
-
list[Augmentation]
|
29 |
-
"""
|
30 |
-
if cfg.INPUT.CUSTOM_AUG == 'ResizeShortestEdge':
|
31 |
-
if is_train:
|
32 |
-
min_size = cfg.INPUT.MIN_SIZE_TRAIN
|
33 |
-
max_size = cfg.INPUT.MAX_SIZE_TRAIN
|
34 |
-
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
|
35 |
-
else:
|
36 |
-
min_size = cfg.INPUT.MIN_SIZE_TEST
|
37 |
-
max_size = cfg.INPUT.MAX_SIZE_TEST
|
38 |
-
sample_style = "choice"
|
39 |
-
augmentation = [T.ResizeShortestEdge(min_size, max_size, sample_style)]
|
40 |
-
elif cfg.INPUT.CUSTOM_AUG == 'EfficientDetResizeCrop':
|
41 |
-
if is_train:
|
42 |
-
scale = cfg.INPUT.SCALE_RANGE
|
43 |
-
size = cfg.INPUT.TRAIN_SIZE
|
44 |
-
else:
|
45 |
-
scale = (1, 1)
|
46 |
-
size = cfg.INPUT.TEST_SIZE
|
47 |
-
augmentation = [EfficientDetResizeCrop(size, scale)]
|
48 |
-
else:
|
49 |
-
assert 0, cfg.INPUT.CUSTOM_AUG
|
50 |
-
|
51 |
-
if is_train:
|
52 |
-
augmentation.append(T.RandomFlip())
|
53 |
-
return augmentation
|
54 |
-
|
55 |
-
|
56 |
-
build_custom_transform_gen = build_custom_augmentation
|
57 |
-
"""
|
58 |
-
Alias for backward-compatibility.
|
59 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Anime Negro Apk.md
DELETED
@@ -1,64 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Descargar Anime Negro APK: La aplicación definitiva para los fans de anime</h1>
|
3 |
-
<p>Si eres un amante del anime, probablemente sabes lo difícil que puede ser encontrar una buena aplicación para ver tus programas favoritos en tu dispositivo Android. Hay muchas aplicaciones por ahí, pero la mayoría de ellos son de baja calidad, poco fiable, o lleno de anuncios. Es por eso que usted necesita para descargar Anime Negro APK, la aplicación definitiva para los fans del anime. </p>
|
4 |
-
<h2>descargar anime negro apk</h2><br /><p><b><b>Download Zip</b> ⭐ <a href="https://bltlly.com/2v6K4k">https://bltlly.com/2v6K4k</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es Anime negro APK? </h2>
|
6 |
-
<p>Anime Negro APK es una aplicación para Android que le permite ver y descargar miles de episodios de anime de varios géneros y categorías. Puede disfrutar viendo anime en calidad HD, con transmisión rápida y reproducción suave. También puedes descargar episodios de anime para verlos sin conexión, para que puedas verlos en cualquier momento y en cualquier lugar. </p>
|
7 |
-
<h3>Características del anime negro APK</h3>
|
8 |
-
<p>Anime Negro APK tiene muchas características que lo convierten en una de las mejores aplicaciones para los amantes del anime. Aquí están algunos de ellos:</p>
|
9 |
-
<h4>Ver miles de episodios de anime en calidad HD</h4>
|
10 |
-
<p>Anime Negro APK tiene una enorme biblioteca de espectáculos de anime, desde los últimos lanzamientos a los clásicos. Puedes encontrar anime de diferentes géneros, como acción, comedia, romance, terror, ciencia ficción, fantasía y más. También puede buscar anime por nombre, género o popularidad. Puede ver anime en calidad HD, con sonido claro y subtítulos. También puede ajustar la calidad del vídeo según su velocidad de Internet y el uso de datos. </p>
|
11 |
-
<h4>Descargar episodios de anime para ver sin conexión</h4>
|
12 |
-
<p>Si desea ver anime sin conexión a Internet, puede descargar episodios de anime para ver sin conexión. Puede elegir la calidad de descarga y la ubicación en su dispositivo. También puede administrar sus descargas y eliminarlas cuando haya terminado de verlas. </p>
|
13 |
-
<h4>Acceso a múltiples fuentes y servidores</h4>
|
14 |
-
|
15 |
-
<h4>Personaliza la configuración y las preferencias de tu app</h4>
|
16 |
-
<p>Anime Negro APK le permite personalizar la configuración de la aplicación y las preferencias de acuerdo a su gusto. Puede cambiar el tema de la aplicación, el idioma, el tamaño de la fuente, la configuración de notificaciones y más. También puede activar o desactivar anuncios, reproducción automática, descarga automática y otras funciones. </p>
|
17 |
-
<p></p>
|
18 |
-
<h3> Cómo descargar e instalar Anime Negro APK? </h3>
|
19 |
-
<p>Descargar e instalar Anime Black APK es muy fácil y simple. Solo tienes que seguir estos pasos:</p>
|
20 |
-
<h4>Paso 1: Habilitar fuentes desconocidas en su dispositivo</h4>
|
21 |
-
<p>Dado que Anime Negro APK no está disponible en el Google Play Store, es necesario habilitar fuentes desconocidas en su dispositivo para instalarlo. Para hacer esto, vaya a la configuración del dispositivo > seguridad > fuentes desconocidas > habilitar. </p>
|
22 |
-
<h4>Paso 2: Descargar el archivo APK de una fuente de confianza</h4>
|
23 |
-
<p>Puede descargar el archivo APK de una fuente de confianza como [ANIME NEGRO APK (Android App) - تنزيل مجاني - APKCombo]( 1 ). Asegúrate de descargar la última versión de la aplicación. </p>
|
24 |
-
<h4>Paso 3: Instalar el archivo APK en su dispositivo</h4>
|
25 |
-
<p>Una vez que haya descargado el archivo APK, localizarlo en su dispositivo y toque en él para instalarlo. Siga las instrucciones en la pantalla para completar la instalación. </p>
|
26 |
-
<h4>Paso 4: Inicie la aplicación y disfrute viendo anime</h4>
|
27 |
-
<p>Después de instalar la aplicación, se puede iniciar y empezar a ver anime. Puede navegar a través de las categorías de la aplicación, o utilizar la función de búsqueda para encontrar su anime favorito. También puede agregar anime a su lista de favoritos, o ver los últimos episodios de la página principal de la aplicación. También puede consultar las secciones de actualizaciones, noticias y comentarios de la aplicación para obtener más información y soporte. </p>
|
28 |
-
<h3>Pros y contras de anime negro APK</h3>
|
29 |
-
<p>Como cualquier otra aplicación, Anime Negro APK tiene sus pros y contras. Aquí están algunos de ellos:</p>
|
30 |
-
<h4>Pros</h4>
|
31 |
-
<ul>
|
32 |
-
<li> Es gratis para descargar y usar. </li>
|
33 |
-
<li> Tiene una gran colección de programas de anime en calidad HD. </li>
|
34 |
-
|
35 |
-
<li> Tiene múltiples fuentes y servidores para cada espectáculo de anime. </li>
|
36 |
-
<li> Tiene una interfaz fácil de usar y ajustes personalizables. </li>
|
37 |
-
<li> Tiene actualizaciones regulares y correcciones de errores. </li>
|
38 |
-
</ul>
|
39 |
-
<h4>Contras</h4>
|
40 |
-
<ul>
|
41 |
-
<li>No está disponible en Google Play Store.</li>
|
42 |
-
<li>Puede contener algunos anuncios y ventanas emergentes. </li>
|
43 |
-
<li>Puede que no funcione en algunos dispositivos o regiones. </li>
|
44 |
-
<li>Puede tener algunos errores o fallos ocasionalmente. </li>
|
45 |
-
</ul>
|
46 |
-
<h2>Conclusión</h2>
|
47 |
-
<p>Anime Negro APK es una gran aplicación para los fans del anime que quieren ver y descargar sus programas favoritos en sus dispositivos Android. Tiene muchas características que lo convierten en una de las mejores aplicaciones para los amantes del anime. Es fácil de descargar e instalar, y es de uso gratuito. Sin embargo, también tiene algunos inconvenientes que debe tener en cuenta antes de usarlo. En general, Anime Negro APK es una aplicación imprescindible para cualquier fan del anime que quiere disfrutar viendo anime en cualquier momento y en cualquier lugar. </p>
|
48 |
-
<p>Si usted tiene alguna pregunta o comentario acerca de Anime Negro APK, puede dejar un comentario a continuación o póngase en contacto con el desarrollador de la aplicación. También puede compartir este artículo con sus amigos que también están en el anime. Gracias por leer! </p>
|
49 |
-
<h3>Preguntas frecuentes</h3>
|
50 |
-
<p>Aquí están algunas de las preguntas más comunes que la gente pregunta acerca de Anime Negro APK:</p>
|
51 |
-
<ol>
|
52 |
-
<li><b>Es Anime Negro APK seguro de usar? </b></li>
|
53 |
-
<p>Anime Negro APK es seguro de usar, siempre y cuando se descarga de una fuente de confianza, tales como [ANIME NEGRO APK (Android App) - تنزيل مجاني - APKCombo]. Sin embargo, siempre debe tener cuidado al descargar e instalar cualquier aplicación de fuentes desconocidas, ya que pueden contener virus o malware que pueden dañar su dispositivo. También debe escanear el archivo APK con una aplicación antivirus antes de instalarlo. </p>
|
54 |
-
<li><b>Es Anime Negro APK legal de usar? </b></li>
|
55 |
-
|
56 |
-
<li><b>¿Cómo puedo actualizar Anime negro APK? </b></li>
|
57 |
-
<p>Anime Negro APK tiene actualizaciones regulares que corrigen errores y añadir nuevas características. Puedes buscar actualizaciones en el menú de configuración de la aplicación, o visitar el sitio web oficial de la aplicación o las páginas de redes sociales para obtener más información. También puede descargar la última versión de la aplicación de [ANIME NEGRO APK (Android App) - Cuando hay una nueva actualización disponible. </p>
|
58 |
-
<li><b>¿Cómo puedo solicitar un espectáculo de anime que no está disponible en Anime Black APK? </b></li>
|
59 |
-
<p>Anime Negro APK tiene una sección de comentarios donde se puede solicitar un espectáculo de anime que no está disponible en la aplicación. También puede ponerse en contacto con el desarrollador de la aplicación a través de correo electrónico o plataformas de redes sociales y sugerir un anime que desea ver o descargar. Sin embargo, no hay garantía de que su solicitud se cumplirá, ya que depende de la disponibilidad y compatibilidad de la serie de anime con la aplicación. </p>
|
60 |
-
<li><b> ¿Cómo puedo apoyar Anime negro APK? </b></li>
|
61 |
-
<p>Anime Negro APK es una aplicación gratuita que no cobra ninguna cuota o suscripciones por sus servicios. Sin embargo, si desea apoyar el desarrollo y mantenimiento de la aplicación, puede hacerlo mediante una donación a través de PayPal o Patreon, o mediante la compra de funciones premium como el modo sin anuncios o descargas ilimitadas. También puedes dar soporte a la aplicación calificándola, revisándola, compartiéndola o dándole feedback. </p>
|
62 |
-
</ol></p> 64aa2da5cf<br />
|
63 |
-
<br />
|
64 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/install_scripts.py
DELETED
@@ -1,61 +0,0 @@
|
|
1 |
-
"""distutils.command.install_scripts
|
2 |
-
|
3 |
-
Implements the Distutils 'install_scripts' command, for installing
|
4 |
-
Python scripts."""
|
5 |
-
|
6 |
-
# contributed by Bastian Kleineidam
|
7 |
-
|
8 |
-
import os
|
9 |
-
from distutils.core import Command
|
10 |
-
from distutils import log
|
11 |
-
from stat import ST_MODE
|
12 |
-
|
13 |
-
|
14 |
-
class install_scripts(Command):
|
15 |
-
|
16 |
-
description = "install scripts (Python or otherwise)"
|
17 |
-
|
18 |
-
user_options = [
|
19 |
-
('install-dir=', 'd', "directory to install scripts to"),
|
20 |
-
('build-dir=', 'b', "build directory (where to install from)"),
|
21 |
-
('force', 'f', "force installation (overwrite existing files)"),
|
22 |
-
('skip-build', None, "skip the build steps"),
|
23 |
-
]
|
24 |
-
|
25 |
-
boolean_options = ['force', 'skip-build']
|
26 |
-
|
27 |
-
def initialize_options(self):
|
28 |
-
self.install_dir = None
|
29 |
-
self.force = 0
|
30 |
-
self.build_dir = None
|
31 |
-
self.skip_build = None
|
32 |
-
|
33 |
-
def finalize_options(self):
|
34 |
-
self.set_undefined_options('build', ('build_scripts', 'build_dir'))
|
35 |
-
self.set_undefined_options(
|
36 |
-
'install',
|
37 |
-
('install_scripts', 'install_dir'),
|
38 |
-
('force', 'force'),
|
39 |
-
('skip_build', 'skip_build'),
|
40 |
-
)
|
41 |
-
|
42 |
-
def run(self):
|
43 |
-
if not self.skip_build:
|
44 |
-
self.run_command('build_scripts')
|
45 |
-
self.outfiles = self.copy_tree(self.build_dir, self.install_dir)
|
46 |
-
if os.name == 'posix':
|
47 |
-
# Set the executable bits (owner, group, and world) on
|
48 |
-
# all the scripts we just installed.
|
49 |
-
for file in self.get_outputs():
|
50 |
-
if self.dry_run:
|
51 |
-
log.info("changing mode of %s", file)
|
52 |
-
else:
|
53 |
-
mode = ((os.stat(file)[ST_MODE]) | 0o555) & 0o7777
|
54 |
-
log.info("changing mode of %s to %o", file, mode)
|
55 |
-
os.chmod(file, mode)
|
56 |
-
|
57 |
-
def get_inputs(self):
|
58 |
-
return self.distribution.scripts or []
|
59 |
-
|
60 |
-
def get_outputs(self):
|
61 |
-
return self.outfiles or []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Boadiwaa/Recipes/openai/util.py
DELETED
@@ -1,185 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
import os
|
3 |
-
import re
|
4 |
-
import sys
|
5 |
-
from enum import Enum
|
6 |
-
from typing import Optional
|
7 |
-
|
8 |
-
import openai
|
9 |
-
|
10 |
-
OPENAI_LOG = os.environ.get("OPENAI_LOG")
|
11 |
-
|
12 |
-
logger = logging.getLogger("openai")
|
13 |
-
|
14 |
-
__all__ = [
|
15 |
-
"log_info",
|
16 |
-
"log_debug",
|
17 |
-
"log_warn",
|
18 |
-
"logfmt",
|
19 |
-
]
|
20 |
-
|
21 |
-
api_key_to_header = (
|
22 |
-
lambda api, key: {"Authorization": f"Bearer {key}"}
|
23 |
-
if api == ApiType.OPEN_AI
|
24 |
-
else {"api-key": f"{key}"}
|
25 |
-
)
|
26 |
-
|
27 |
-
|
28 |
-
class ApiType(Enum):
|
29 |
-
AZURE = 1
|
30 |
-
OPEN_AI = 2
|
31 |
-
|
32 |
-
@staticmethod
|
33 |
-
def from_str(label):
|
34 |
-
if label.lower() == "azure":
|
35 |
-
return ApiType.AZURE
|
36 |
-
elif label.lower() in ("open_ai", "openai"):
|
37 |
-
return ApiType.OPEN_AI
|
38 |
-
else:
|
39 |
-
raise openai.error.InvalidAPIType(
|
40 |
-
"The API type provided in invalid. Please select one of the supported API types: 'azure', 'open_ai'"
|
41 |
-
)
|
42 |
-
|
43 |
-
|
44 |
-
def _console_log_level():
|
45 |
-
if openai.log in ["debug", "info"]:
|
46 |
-
return openai.log
|
47 |
-
elif OPENAI_LOG in ["debug", "info"]:
|
48 |
-
return OPENAI_LOG
|
49 |
-
else:
|
50 |
-
return None
|
51 |
-
|
52 |
-
|
53 |
-
def log_debug(message, **params):
|
54 |
-
msg = logfmt(dict(message=message, **params))
|
55 |
-
if _console_log_level() == "debug":
|
56 |
-
print(msg, file=sys.stderr)
|
57 |
-
logger.debug(msg)
|
58 |
-
|
59 |
-
|
60 |
-
def log_info(message, **params):
|
61 |
-
msg = logfmt(dict(message=message, **params))
|
62 |
-
if _console_log_level() in ["debug", "info"]:
|
63 |
-
print(msg, file=sys.stderr)
|
64 |
-
logger.info(msg)
|
65 |
-
|
66 |
-
|
67 |
-
def log_warn(message, **params):
|
68 |
-
msg = logfmt(dict(message=message, **params))
|
69 |
-
print(msg, file=sys.stderr)
|
70 |
-
logger.warn(msg)
|
71 |
-
|
72 |
-
|
73 |
-
def logfmt(props):
|
74 |
-
def fmt(key, val):
|
75 |
-
# Handle case where val is a bytes or bytesarray
|
76 |
-
if hasattr(val, "decode"):
|
77 |
-
val = val.decode("utf-8")
|
78 |
-
# Check if val is already a string to avoid re-encoding into ascii.
|
79 |
-
if not isinstance(val, str):
|
80 |
-
val = str(val)
|
81 |
-
if re.search(r"\s", val):
|
82 |
-
val = repr(val)
|
83 |
-
# key should already be a string
|
84 |
-
if re.search(r"\s", key):
|
85 |
-
key = repr(key)
|
86 |
-
return "{key}={val}".format(key=key, val=val)
|
87 |
-
|
88 |
-
return " ".join([fmt(key, val) for key, val in sorted(props.items())])
|
89 |
-
|
90 |
-
|
91 |
-
def get_object_classes():
|
92 |
-
# This is here to avoid a circular dependency
|
93 |
-
from openai.object_classes import OBJECT_CLASSES
|
94 |
-
|
95 |
-
return OBJECT_CLASSES
|
96 |
-
|
97 |
-
|
98 |
-
def convert_to_openai_object(
|
99 |
-
resp,
|
100 |
-
api_key=None,
|
101 |
-
api_version=None,
|
102 |
-
organization=None,
|
103 |
-
engine=None,
|
104 |
-
plain_old_data=False,
|
105 |
-
):
|
106 |
-
# If we get a OpenAIResponse, we'll want to return a OpenAIObject.
|
107 |
-
|
108 |
-
response_ms: Optional[int] = None
|
109 |
-
if isinstance(resp, openai.openai_response.OpenAIResponse):
|
110 |
-
organization = resp.organization
|
111 |
-
response_ms = resp.response_ms
|
112 |
-
resp = resp.data
|
113 |
-
|
114 |
-
if plain_old_data:
|
115 |
-
return resp
|
116 |
-
elif isinstance(resp, list):
|
117 |
-
return [
|
118 |
-
convert_to_openai_object(
|
119 |
-
i, api_key, api_version, organization, engine=engine
|
120 |
-
)
|
121 |
-
for i in resp
|
122 |
-
]
|
123 |
-
elif isinstance(resp, dict) and not isinstance(
|
124 |
-
resp, openai.openai_object.OpenAIObject
|
125 |
-
):
|
126 |
-
resp = resp.copy()
|
127 |
-
klass_name = resp.get("object")
|
128 |
-
if isinstance(klass_name, str):
|
129 |
-
klass = get_object_classes().get(
|
130 |
-
klass_name, openai.openai_object.OpenAIObject
|
131 |
-
)
|
132 |
-
else:
|
133 |
-
klass = openai.openai_object.OpenAIObject
|
134 |
-
|
135 |
-
return klass.construct_from(
|
136 |
-
resp,
|
137 |
-
api_key=api_key,
|
138 |
-
api_version=api_version,
|
139 |
-
organization=organization,
|
140 |
-
response_ms=response_ms,
|
141 |
-
engine=engine,
|
142 |
-
)
|
143 |
-
else:
|
144 |
-
return resp
|
145 |
-
|
146 |
-
|
147 |
-
def convert_to_dict(obj):
|
148 |
-
"""Converts a OpenAIObject back to a regular dict.
|
149 |
-
|
150 |
-
Nested OpenAIObjects are also converted back to regular dicts.
|
151 |
-
|
152 |
-
:param obj: The OpenAIObject to convert.
|
153 |
-
|
154 |
-
:returns: The OpenAIObject as a dict.
|
155 |
-
"""
|
156 |
-
if isinstance(obj, list):
|
157 |
-
return [convert_to_dict(i) for i in obj]
|
158 |
-
# This works by virtue of the fact that OpenAIObjects _are_ dicts. The dict
|
159 |
-
# comprehension returns a regular dict and recursively applies the
|
160 |
-
# conversion to each value.
|
161 |
-
elif isinstance(obj, dict):
|
162 |
-
return {k: convert_to_dict(v) for k, v in obj.items()}
|
163 |
-
else:
|
164 |
-
return obj
|
165 |
-
|
166 |
-
|
167 |
-
def merge_dicts(x, y):
|
168 |
-
z = x.copy()
|
169 |
-
z.update(y)
|
170 |
-
return z
|
171 |
-
|
172 |
-
|
173 |
-
def default_api_key() -> str:
|
174 |
-
if openai.api_key_path:
|
175 |
-
with open(openai.api_key_path, "rt") as k:
|
176 |
-
api_key = k.read().strip()
|
177 |
-
if not api_key.startswith("sk-"):
|
178 |
-
raise ValueError(f"Malformed API key in {openai.api_key_path}.")
|
179 |
-
return api_key
|
180 |
-
elif openai.api_key is not None:
|
181 |
-
return openai.api_key
|
182 |
-
else:
|
183 |
-
raise openai.error.AuthenticationError(
|
184 |
-
"No API key provided. You can set your API key in code using 'openai.api_key = <API-KEY>', or you can set the environment variable OPENAI_API_KEY=<API-KEY>). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = <PATH>'. You can generate API keys in the OpenAI web interface. See https://onboard.openai.com for details, or email [email protected] if you have any questions."
|
185 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/detail/swap.h
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
|
18 |
-
#pragma once
|
19 |
-
|
20 |
-
#include <thrust/detail/config.h>
|
21 |
-
|
22 |
-
namespace thrust
|
23 |
-
{
|
24 |
-
|
25 |
-
__thrust_exec_check_disable__
|
26 |
-
template<typename Assignable1, typename Assignable2>
|
27 |
-
__host__ __device__
|
28 |
-
inline void swap(Assignable1 &a, Assignable2 &b)
|
29 |
-
{
|
30 |
-
Assignable1 temp = a;
|
31 |
-
a = b;
|
32 |
-
b = temp;
|
33 |
-
} // end swap()
|
34 |
-
|
35 |
-
} // end namespace thrust
|
36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/iterator/detail/zip_iterator_base.h
DELETED
@@ -1,405 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/advance.h>
|
20 |
-
#include <thrust/iterator/iterator_traits.h>
|
21 |
-
#include <thrust/iterator/iterator_facade.h>
|
22 |
-
#include <thrust/iterator/iterator_categories.h>
|
23 |
-
#include <thrust/iterator/detail/minimum_category.h>
|
24 |
-
#include <thrust/iterator/detail/minimum_system.h>
|
25 |
-
#include <thrust/tuple.h>
|
26 |
-
#include <thrust/detail/tuple_meta_transform.h>
|
27 |
-
#include <thrust/detail/tuple_transform.h>
|
28 |
-
#include <thrust/detail/type_traits.h>
|
29 |
-
#include <thrust/iterator/detail/tuple_of_iterator_references.h>
|
30 |
-
|
31 |
-
namespace thrust
|
32 |
-
{
|
33 |
-
|
34 |
-
// forward declare zip_iterator for zip_iterator_base
|
35 |
-
template<typename IteratorTuple> class zip_iterator;
|
36 |
-
|
37 |
-
namespace detail
|
38 |
-
{
|
39 |
-
|
40 |
-
|
41 |
-
// Functors to be used with tuple algorithms
|
42 |
-
//
|
43 |
-
template<typename DiffType>
|
44 |
-
class advance_iterator
|
45 |
-
{
|
46 |
-
public:
|
47 |
-
inline __host__ __device__
|
48 |
-
advance_iterator(DiffType step) : m_step(step) {}
|
49 |
-
|
50 |
-
__thrust_exec_check_disable__
|
51 |
-
template<typename Iterator>
|
52 |
-
inline __host__ __device__
|
53 |
-
void operator()(Iterator& it) const
|
54 |
-
{ thrust::advance(it, m_step); }
|
55 |
-
|
56 |
-
private:
|
57 |
-
DiffType m_step;
|
58 |
-
}; // end advance_iterator
|
59 |
-
|
60 |
-
|
61 |
-
struct increment_iterator
|
62 |
-
{
|
63 |
-
__thrust_exec_check_disable__
|
64 |
-
template<typename Iterator>
|
65 |
-
inline __host__ __device__
|
66 |
-
void operator()(Iterator& it)
|
67 |
-
{ ++it; }
|
68 |
-
}; // end increment_iterator
|
69 |
-
|
70 |
-
|
71 |
-
struct decrement_iterator
|
72 |
-
{
|
73 |
-
__thrust_exec_check_disable__
|
74 |
-
template<typename Iterator>
|
75 |
-
inline __host__ __device__
|
76 |
-
void operator()(Iterator& it)
|
77 |
-
{ --it; }
|
78 |
-
}; // end decrement_iterator
|
79 |
-
|
80 |
-
|
81 |
-
struct dereference_iterator
|
82 |
-
{
|
83 |
-
template<typename Iterator>
|
84 |
-
struct apply
|
85 |
-
{
|
86 |
-
typedef typename
|
87 |
-
iterator_traits<Iterator>::reference
|
88 |
-
type;
|
89 |
-
}; // end apply
|
90 |
-
|
91 |
-
// XXX silence warnings of the form "calling a __host__ function from a __host__ __device__ function is not allowed
|
92 |
-
__thrust_exec_check_disable__
|
93 |
-
template<typename Iterator>
|
94 |
-
__host__ __device__
|
95 |
-
typename apply<Iterator>::type operator()(Iterator const& it)
|
96 |
-
{
|
97 |
-
return *it;
|
98 |
-
}
|
99 |
-
}; // end dereference_iterator
|
100 |
-
|
101 |
-
|
102 |
-
// The namespace tuple_impl_specific provides two meta-
|
103 |
-
// algorithms and two algorithms for tuples.
|
104 |
-
namespace tuple_impl_specific
|
105 |
-
{
|
106 |
-
|
107 |
-
// define apply1 for tuple_meta_transform_impl
|
108 |
-
template<typename UnaryMetaFunctionClass, class Arg>
|
109 |
-
struct apply1
|
110 |
-
: UnaryMetaFunctionClass::template apply<Arg>
|
111 |
-
{
|
112 |
-
}; // end apply1
|
113 |
-
|
114 |
-
|
115 |
-
// define apply2 for tuple_meta_accumulate_impl
|
116 |
-
template<typename UnaryMetaFunctionClass, class Arg1, class Arg2>
|
117 |
-
struct apply2
|
118 |
-
: UnaryMetaFunctionClass::template apply<Arg1,Arg2>
|
119 |
-
{
|
120 |
-
}; // end apply2
|
121 |
-
|
122 |
-
|
123 |
-
// Meta-accumulate algorithm for tuples. Note: The template
|
124 |
-
// parameter StartType corresponds to the initial value in
|
125 |
-
// ordinary accumulation.
|
126 |
-
//
|
127 |
-
template<class Tuple, class BinaryMetaFun, class StartType>
|
128 |
-
struct tuple_meta_accumulate;
|
129 |
-
|
130 |
-
template<
|
131 |
-
typename Tuple
|
132 |
-
, class BinaryMetaFun
|
133 |
-
, typename StartType
|
134 |
-
>
|
135 |
-
struct tuple_meta_accumulate_impl
|
136 |
-
{
|
137 |
-
typedef typename apply2<
|
138 |
-
BinaryMetaFun
|
139 |
-
, typename Tuple::head_type
|
140 |
-
, typename tuple_meta_accumulate<
|
141 |
-
typename Tuple::tail_type
|
142 |
-
, BinaryMetaFun
|
143 |
-
, StartType
|
144 |
-
>::type
|
145 |
-
>::type type;
|
146 |
-
};
|
147 |
-
|
148 |
-
|
149 |
-
template<
|
150 |
-
typename Tuple
|
151 |
-
, class BinaryMetaFun
|
152 |
-
, typename StartType
|
153 |
-
>
|
154 |
-
struct tuple_meta_accumulate
|
155 |
-
: thrust::detail::eval_if<
|
156 |
-
thrust::detail::is_same<Tuple, thrust::null_type>::value
|
157 |
-
, thrust::detail::identity_<StartType>
|
158 |
-
, tuple_meta_accumulate_impl<
|
159 |
-
Tuple
|
160 |
-
, BinaryMetaFun
|
161 |
-
, StartType
|
162 |
-
>
|
163 |
-
> // end eval_if
|
164 |
-
{
|
165 |
-
}; // end tuple_meta_accumulate
|
166 |
-
|
167 |
-
|
168 |
-
// transform algorithm for tuples. The template parameter Fun
|
169 |
-
// must be a unary functor which is also a unary metafunction
|
170 |
-
// class that computes its return type based on its argument
|
171 |
-
// type. For example:
|
172 |
-
//
|
173 |
-
// struct to_ptr
|
174 |
-
// {
|
175 |
-
// template <class Arg>
|
176 |
-
// struct apply
|
177 |
-
// {
|
178 |
-
// typedef Arg* type;
|
179 |
-
// }
|
180 |
-
//
|
181 |
-
// template <class Arg>
|
182 |
-
// Arg* operator()(Arg x);
|
183 |
-
// };
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
// for_each algorithm for tuples.
|
188 |
-
template<typename Fun>
|
189 |
-
inline __host__ __device__
|
190 |
-
Fun tuple_for_each(thrust::null_type, Fun f)
|
191 |
-
{
|
192 |
-
return f;
|
193 |
-
} // end tuple_for_each()
|
194 |
-
|
195 |
-
|
196 |
-
template<typename Tuple, typename Fun>
|
197 |
-
inline __host__ __device__
|
198 |
-
Fun tuple_for_each(Tuple& t, Fun f)
|
199 |
-
{
|
200 |
-
f( t.get_head() );
|
201 |
-
return tuple_for_each(t.get_tail(), f);
|
202 |
-
} // end tuple_for_each()
|
203 |
-
|
204 |
-
|
205 |
-
// Equality of tuples. NOTE: "==" for tuples currently (7/2003)
|
206 |
-
// has problems under some compilers, so I just do my own.
|
207 |
-
// No point in bringing in a bunch of #ifdefs here. This is
|
208 |
-
// going to go away with the next tuple implementation anyway.
|
209 |
-
//
|
210 |
-
__host__ __device__
|
211 |
-
inline bool tuple_equal(thrust::null_type, thrust::null_type)
|
212 |
-
{ return true; }
|
213 |
-
|
214 |
-
|
215 |
-
template<typename Tuple1, typename Tuple2>
|
216 |
-
__host__ __device__
|
217 |
-
bool tuple_equal(Tuple1 const& t1, Tuple2 const& t2)
|
218 |
-
{
|
219 |
-
return t1.get_head() == t2.get_head() &&
|
220 |
-
tuple_equal(t1.get_tail(), t2.get_tail());
|
221 |
-
} // end tuple_equal()
|
222 |
-
|
223 |
-
} // end end tuple_impl_specific
|
224 |
-
|
225 |
-
|
226 |
-
// Metafunction to obtain the type of the tuple whose element types
|
227 |
-
// are the value_types of an iterator tupel.
|
228 |
-
//
|
229 |
-
template<typename IteratorTuple>
|
230 |
-
struct tuple_of_value_types
|
231 |
-
: tuple_meta_transform<
|
232 |
-
IteratorTuple,
|
233 |
-
iterator_value
|
234 |
-
>
|
235 |
-
{
|
236 |
-
}; // end tuple_of_value_types
|
237 |
-
|
238 |
-
|
239 |
-
struct minimum_category_lambda
|
240 |
-
{
|
241 |
-
template<typename T1, typename T2>
|
242 |
-
struct apply : minimum_category<T1,T2>
|
243 |
-
{};
|
244 |
-
};
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
// Metafunction to obtain the minimal traversal tag in a tuple
|
249 |
-
// of iterators.
|
250 |
-
//
|
251 |
-
template<typename IteratorTuple>
|
252 |
-
struct minimum_traversal_category_in_iterator_tuple
|
253 |
-
{
|
254 |
-
typedef typename tuple_meta_transform<
|
255 |
-
IteratorTuple
|
256 |
-
, thrust::iterator_traversal
|
257 |
-
>::type tuple_of_traversal_tags;
|
258 |
-
|
259 |
-
typedef typename tuple_impl_specific::tuple_meta_accumulate<
|
260 |
-
tuple_of_traversal_tags
|
261 |
-
, minimum_category_lambda
|
262 |
-
, thrust::random_access_traversal_tag
|
263 |
-
>::type type;
|
264 |
-
};
|
265 |
-
|
266 |
-
|
267 |
-
struct minimum_system_lambda
|
268 |
-
{
|
269 |
-
template<typename T1, typename T2>
|
270 |
-
struct apply : minimum_system<T1,T2>
|
271 |
-
{};
|
272 |
-
};
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
// Metafunction to obtain the minimal system tag in a tuple
|
277 |
-
// of iterators.
|
278 |
-
template<typename IteratorTuple>
|
279 |
-
struct minimum_system_in_iterator_tuple
|
280 |
-
{
|
281 |
-
typedef typename thrust::detail::tuple_meta_transform<
|
282 |
-
IteratorTuple,
|
283 |
-
thrust::iterator_system
|
284 |
-
>::type tuple_of_system_tags;
|
285 |
-
|
286 |
-
typedef typename tuple_impl_specific::tuple_meta_accumulate<
|
287 |
-
tuple_of_system_tags,
|
288 |
-
minimum_system_lambda,
|
289 |
-
thrust::any_system_tag
|
290 |
-
>::type type;
|
291 |
-
};
|
292 |
-
|
293 |
-
namespace zip_iterator_base_ns
|
294 |
-
{
|
295 |
-
|
296 |
-
|
297 |
-
template<int i, typename Tuple>
|
298 |
-
struct tuple_elements_helper
|
299 |
-
: eval_if<
|
300 |
-
(i < tuple_size<Tuple>::value),
|
301 |
-
tuple_element<i,Tuple>,
|
302 |
-
identity_<thrust::null_type>
|
303 |
-
>
|
304 |
-
{};
|
305 |
-
|
306 |
-
|
307 |
-
template<typename Tuple>
|
308 |
-
struct tuple_elements
|
309 |
-
{
|
310 |
-
typedef typename tuple_elements_helper<0,Tuple>::type T0;
|
311 |
-
typedef typename tuple_elements_helper<1,Tuple>::type T1;
|
312 |
-
typedef typename tuple_elements_helper<2,Tuple>::type T2;
|
313 |
-
typedef typename tuple_elements_helper<3,Tuple>::type T3;
|
314 |
-
typedef typename tuple_elements_helper<4,Tuple>::type T4;
|
315 |
-
typedef typename tuple_elements_helper<5,Tuple>::type T5;
|
316 |
-
typedef typename tuple_elements_helper<6,Tuple>::type T6;
|
317 |
-
typedef typename tuple_elements_helper<7,Tuple>::type T7;
|
318 |
-
typedef typename tuple_elements_helper<8,Tuple>::type T8;
|
319 |
-
typedef typename tuple_elements_helper<9,Tuple>::type T9;
|
320 |
-
};
|
321 |
-
|
322 |
-
|
323 |
-
template<typename IteratorTuple>
|
324 |
-
struct tuple_of_iterator_references
|
325 |
-
{
|
326 |
-
// get a thrust::tuple of the iterators' references
|
327 |
-
typedef typename tuple_meta_transform<
|
328 |
-
IteratorTuple,
|
329 |
-
iterator_reference
|
330 |
-
>::type tuple_of_references;
|
331 |
-
|
332 |
-
// get at the individual tuple element types by name
|
333 |
-
typedef tuple_elements<tuple_of_references> elements;
|
334 |
-
|
335 |
-
// map thrust::tuple<T...> to tuple_of_iterator_references<T...>
|
336 |
-
typedef thrust::detail::tuple_of_iterator_references<
|
337 |
-
typename elements::T0,
|
338 |
-
typename elements::T1,
|
339 |
-
typename elements::T2,
|
340 |
-
typename elements::T3,
|
341 |
-
typename elements::T4,
|
342 |
-
typename elements::T5,
|
343 |
-
typename elements::T6,
|
344 |
-
typename elements::T7,
|
345 |
-
typename elements::T8,
|
346 |
-
typename elements::T9
|
347 |
-
> type;
|
348 |
-
};
|
349 |
-
|
350 |
-
|
351 |
-
} // end zip_iterator_base_ns
|
352 |
-
|
353 |
-
///////////////////////////////////////////////////////////////////
|
354 |
-
//
|
355 |
-
// Class zip_iterator_base
|
356 |
-
//
|
357 |
-
// Builds and exposes the iterator facade type from which the zip
|
358 |
-
// iterator will be derived.
|
359 |
-
//
|
360 |
-
template<typename IteratorTuple>
|
361 |
-
struct zip_iterator_base
|
362 |
-
{
|
363 |
-
//private:
|
364 |
-
// reference type is the type of the tuple obtained from the
|
365 |
-
// iterators' reference types.
|
366 |
-
typedef typename zip_iterator_base_ns::tuple_of_iterator_references<IteratorTuple>::type reference;
|
367 |
-
|
368 |
-
// Boost's Value type is the same as reference type.
|
369 |
-
//typedef reference value_type;
|
370 |
-
typedef typename tuple_of_value_types<IteratorTuple>::type value_type;
|
371 |
-
|
372 |
-
// Difference type is the first iterator's difference type
|
373 |
-
typedef typename thrust::iterator_traits<
|
374 |
-
typename thrust::tuple_element<0, IteratorTuple>::type
|
375 |
-
>::difference_type difference_type;
|
376 |
-
|
377 |
-
// Iterator system is the minimum system tag in the
|
378 |
-
// iterator tuple
|
379 |
-
typedef typename
|
380 |
-
minimum_system_in_iterator_tuple<IteratorTuple>::type system;
|
381 |
-
|
382 |
-
// Traversal category is the minimum traversal category in the
|
383 |
-
// iterator tuple
|
384 |
-
typedef typename
|
385 |
-
minimum_traversal_category_in_iterator_tuple<IteratorTuple>::type traversal_category;
|
386 |
-
|
387 |
-
public:
|
388 |
-
|
389 |
-
// The iterator facade type from which the zip iterator will
|
390 |
-
// be derived.
|
391 |
-
typedef thrust::iterator_facade<
|
392 |
-
zip_iterator<IteratorTuple>,
|
393 |
-
value_type,
|
394 |
-
system,
|
395 |
-
traversal_category,
|
396 |
-
reference,
|
397 |
-
difference_type
|
398 |
-
> type;
|
399 |
-
}; // end zip_iterator_base
|
400 |
-
|
401 |
-
} // end detail
|
402 |
-
|
403 |
-
} // end thrust
|
404 |
-
|
405 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/malloc_and_free.h
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// this system inherits malloc & free
|
22 |
-
#include <thrust/system/detail/sequential/malloc_and_free.h>
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/uninitialized_copy.h
DELETED
@@ -1,116 +0,0 @@
|
|
1 |
-
/******************************************************************************
|
2 |
-
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
|
3 |
-
*
|
4 |
-
* Redistribution and use in source and binary forms, with or without
|
5 |
-
* modification, are permitted provided that the following conditions are met:
|
6 |
-
* * Redistributions of source code must retain the above copyright
|
7 |
-
* notice, this list of conditions and the following disclaimer.
|
8 |
-
* * Redistributions in binary form must reproduce the above copyright
|
9 |
-
* notice, this list of conditions and the following disclaimer in the
|
10 |
-
* documentation and/or other materials provided with the distribution.
|
11 |
-
* * Neither the name of the NVIDIA CORPORATION nor the
|
12 |
-
* names of its contributors may be used to endorse or promote products
|
13 |
-
* derived from this software without specific prior written permission.
|
14 |
-
*
|
15 |
-
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
16 |
-
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
17 |
-
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
18 |
-
* ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
|
19 |
-
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
20 |
-
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
21 |
-
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
22 |
-
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
23 |
-
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
24 |
-
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
25 |
-
*
|
26 |
-
******************************************************************************/
|
27 |
-
#pragma once
|
28 |
-
|
29 |
-
|
30 |
-
#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
|
31 |
-
#include <iterator>
|
32 |
-
#include <thrust/distance.h>
|
33 |
-
#include <thrust/system/cuda/detail/execution_policy.h>
|
34 |
-
#include <thrust/system/cuda/detail/util.h>
|
35 |
-
#include <thrust/system/cuda/detail/parallel_for.h>
|
36 |
-
|
37 |
-
namespace thrust
|
38 |
-
{
|
39 |
-
|
40 |
-
namespace cuda_cub {
|
41 |
-
|
42 |
-
namespace __uninitialized_copy {
|
43 |
-
|
44 |
-
template <class InputIt, class OutputIt>
|
45 |
-
struct functor
|
46 |
-
{
|
47 |
-
InputIt input;
|
48 |
-
OutputIt output;
|
49 |
-
|
50 |
-
typedef typename iterator_traits<InputIt>::value_type InputType;
|
51 |
-
typedef typename iterator_traits<OutputIt>::value_type OutputType;
|
52 |
-
|
53 |
-
THRUST_FUNCTION
|
54 |
-
functor(InputIt input_, OutputIt output_)
|
55 |
-
: input(input_), output(output_) {}
|
56 |
-
|
57 |
-
template<class Size>
|
58 |
-
void THRUST_DEVICE_FUNCTION operator()(Size idx)
|
59 |
-
{
|
60 |
-
InputType const &in = raw_reference_cast(input[idx]);
|
61 |
-
OutputType & out = raw_reference_cast(output[idx]);
|
62 |
-
|
63 |
-
#if defined(__CUDA__) && defined(__clang__)
|
64 |
-
// XXX unsafe, but clang is seemngly unable to call in-place new
|
65 |
-
out = in;
|
66 |
-
#else
|
67 |
-
::new (static_cast<void *>(&out)) OutputType(in);
|
68 |
-
#endif
|
69 |
-
}
|
70 |
-
}; // struct functor
|
71 |
-
|
72 |
-
} // namespace __uninitialized_copy
|
73 |
-
|
74 |
-
template <class Derived,
|
75 |
-
class InputIt,
|
76 |
-
class Size,
|
77 |
-
class OutputIt>
|
78 |
-
OutputIt __host__ __device__
|
79 |
-
uninitialized_copy_n(execution_policy<Derived> &policy,
|
80 |
-
InputIt first,
|
81 |
-
Size count,
|
82 |
-
OutputIt result)
|
83 |
-
{
|
84 |
-
typedef __uninitialized_copy::functor<InputIt,OutputIt> functor_t;
|
85 |
-
|
86 |
-
cuda_cub::parallel_for(policy,
|
87 |
-
functor_t(first, result),
|
88 |
-
count);
|
89 |
-
|
90 |
-
cuda_cub::throw_on_error(
|
91 |
-
cuda_cub::synchronize(policy)
|
92 |
-
, "uninitialized_copy_n: failed to synchronize"
|
93 |
-
);
|
94 |
-
|
95 |
-
return result + count;
|
96 |
-
}
|
97 |
-
|
98 |
-
template <class Derived,
|
99 |
-
class InputIt,
|
100 |
-
class OutputIt>
|
101 |
-
OutputIt __host__ __device__
|
102 |
-
uninitialized_copy(execution_policy<Derived>& policy,
|
103 |
-
InputIt first,
|
104 |
-
InputIt last,
|
105 |
-
OutputIt result)
|
106 |
-
{
|
107 |
-
return cuda_cub::uninitialized_copy_n(policy,
|
108 |
-
first,
|
109 |
-
thrust::distance(first, last),
|
110 |
-
result);
|
111 |
-
}
|
112 |
-
|
113 |
-
} // namespace cuda_
|
114 |
-
|
115 |
-
} // end namespace thrust
|
116 |
-
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/datasets/custom.py
DELETED
@@ -1,334 +0,0 @@
|
|
1 |
-
import os.path as osp
|
2 |
-
import warnings
|
3 |
-
from collections import OrderedDict
|
4 |
-
|
5 |
-
import mmcv
|
6 |
-
import numpy as np
|
7 |
-
from mmcv.utils import print_log
|
8 |
-
from torch.utils.data import Dataset
|
9 |
-
|
10 |
-
from mmdet.core import eval_map, eval_recalls
|
11 |
-
from .builder import DATASETS
|
12 |
-
from .pipelines import Compose
|
13 |
-
|
14 |
-
|
15 |
-
@DATASETS.register_module()
|
16 |
-
class CustomDataset(Dataset):
|
17 |
-
"""Custom dataset for detection.
|
18 |
-
|
19 |
-
The annotation format is shown as follows. The `ann` field is optional for
|
20 |
-
testing.
|
21 |
-
|
22 |
-
.. code-block:: none
|
23 |
-
|
24 |
-
[
|
25 |
-
{
|
26 |
-
'filename': 'a.jpg',
|
27 |
-
'width': 1280,
|
28 |
-
'height': 720,
|
29 |
-
'ann': {
|
30 |
-
'bboxes': <np.ndarray> (n, 4) in (x1, y1, x2, y2) order.
|
31 |
-
'labels': <np.ndarray> (n, ),
|
32 |
-
'bboxes_ignore': <np.ndarray> (k, 4), (optional field)
|
33 |
-
'labels_ignore': <np.ndarray> (k, 4) (optional field)
|
34 |
-
}
|
35 |
-
},
|
36 |
-
...
|
37 |
-
]
|
38 |
-
|
39 |
-
Args:
|
40 |
-
ann_file (str): Annotation file path.
|
41 |
-
pipeline (list[dict]): Processing pipeline.
|
42 |
-
classes (str | Sequence[str], optional): Specify classes to load.
|
43 |
-
If is None, ``cls.CLASSES`` will be used. Default: None.
|
44 |
-
data_root (str, optional): Data root for ``ann_file``,
|
45 |
-
``img_prefix``, ``seg_prefix``, ``proposal_file`` if specified.
|
46 |
-
test_mode (bool, optional): If set True, annotation will not be loaded.
|
47 |
-
filter_empty_gt (bool, optional): If set true, images without bounding
|
48 |
-
boxes of the dataset's classes will be filtered out. This option
|
49 |
-
only works when `test_mode=False`, i.e., we never filter images
|
50 |
-
during tests.
|
51 |
-
"""
|
52 |
-
|
53 |
-
CLASSES = None
|
54 |
-
|
55 |
-
def __init__(self,
|
56 |
-
ann_file,
|
57 |
-
pipeline,
|
58 |
-
classes=None,
|
59 |
-
data_root=None,
|
60 |
-
img_prefix='',
|
61 |
-
seg_prefix=None,
|
62 |
-
proposal_file=None,
|
63 |
-
test_mode=False,
|
64 |
-
filter_empty_gt=True):
|
65 |
-
self.ann_file = ann_file
|
66 |
-
self.data_root = data_root
|
67 |
-
self.img_prefix = img_prefix
|
68 |
-
self.seg_prefix = seg_prefix
|
69 |
-
self.proposal_file = proposal_file
|
70 |
-
self.test_mode = test_mode
|
71 |
-
self.filter_empty_gt = filter_empty_gt
|
72 |
-
self.CLASSES = self.get_classes(classes)
|
73 |
-
|
74 |
-
# join paths if data_root is specified
|
75 |
-
if self.data_root is not None:
|
76 |
-
if not osp.isabs(self.ann_file):
|
77 |
-
self.ann_file = osp.join(self.data_root, self.ann_file)
|
78 |
-
if not (self.img_prefix is None or osp.isabs(self.img_prefix)):
|
79 |
-
self.img_prefix = osp.join(self.data_root, self.img_prefix)
|
80 |
-
if not (self.seg_prefix is None or osp.isabs(self.seg_prefix)):
|
81 |
-
self.seg_prefix = osp.join(self.data_root, self.seg_prefix)
|
82 |
-
if not (self.proposal_file is None
|
83 |
-
or osp.isabs(self.proposal_file)):
|
84 |
-
self.proposal_file = osp.join(self.data_root,
|
85 |
-
self.proposal_file)
|
86 |
-
# load annotations (and proposals)
|
87 |
-
self.data_infos = self.load_annotations(self.ann_file)
|
88 |
-
|
89 |
-
if self.proposal_file is not None:
|
90 |
-
self.proposals = self.load_proposals(self.proposal_file)
|
91 |
-
else:
|
92 |
-
self.proposals = None
|
93 |
-
|
94 |
-
# filter images too small and containing no annotations
|
95 |
-
if not test_mode:
|
96 |
-
valid_inds = self._filter_imgs()
|
97 |
-
self.data_infos = [self.data_infos[i] for i in valid_inds]
|
98 |
-
if self.proposals is not None:
|
99 |
-
self.proposals = [self.proposals[i] for i in valid_inds]
|
100 |
-
# set group flag for the sampler
|
101 |
-
self._set_group_flag()
|
102 |
-
|
103 |
-
# processing pipeline
|
104 |
-
self.pipeline = Compose(pipeline)
|
105 |
-
|
106 |
-
def __len__(self):
|
107 |
-
"""Total number of samples of data."""
|
108 |
-
return len(self.data_infos)
|
109 |
-
|
110 |
-
def load_annotations(self, ann_file):
|
111 |
-
"""Load annotation from annotation file."""
|
112 |
-
return mmcv.load(ann_file)
|
113 |
-
|
114 |
-
def load_proposals(self, proposal_file):
|
115 |
-
"""Load proposal from proposal file."""
|
116 |
-
return mmcv.load(proposal_file)
|
117 |
-
|
118 |
-
def get_ann_info(self, idx):
|
119 |
-
"""Get annotation by index.
|
120 |
-
|
121 |
-
Args:
|
122 |
-
idx (int): Index of data.
|
123 |
-
|
124 |
-
Returns:
|
125 |
-
dict: Annotation info of specified index.
|
126 |
-
"""
|
127 |
-
|
128 |
-
return self.data_infos[idx]['ann']
|
129 |
-
|
130 |
-
def get_cat_ids(self, idx):
|
131 |
-
"""Get category ids by index.
|
132 |
-
|
133 |
-
Args:
|
134 |
-
idx (int): Index of data.
|
135 |
-
|
136 |
-
Returns:
|
137 |
-
list[int]: All categories in the image of specified index.
|
138 |
-
"""
|
139 |
-
|
140 |
-
return self.data_infos[idx]['ann']['labels'].astype(np.int).tolist()
|
141 |
-
|
142 |
-
def pre_pipeline(self, results):
|
143 |
-
"""Prepare results dict for pipeline."""
|
144 |
-
results['img_prefix'] = self.img_prefix
|
145 |
-
results['seg_prefix'] = self.seg_prefix
|
146 |
-
results['proposal_file'] = self.proposal_file
|
147 |
-
results['bbox_fields'] = []
|
148 |
-
results['mask_fields'] = []
|
149 |
-
results['seg_fields'] = []
|
150 |
-
|
151 |
-
def _filter_imgs(self, min_size=32):
|
152 |
-
"""Filter images too small."""
|
153 |
-
if self.filter_empty_gt:
|
154 |
-
warnings.warn(
|
155 |
-
'CustomDataset does not support filtering empty gt images.')
|
156 |
-
valid_inds = []
|
157 |
-
for i, img_info in enumerate(self.data_infos):
|
158 |
-
if min(img_info['width'], img_info['height']) >= min_size:
|
159 |
-
valid_inds.append(i)
|
160 |
-
return valid_inds
|
161 |
-
|
162 |
-
def _set_group_flag(self):
|
163 |
-
"""Set flag according to image aspect ratio.
|
164 |
-
|
165 |
-
Images with aspect ratio greater than 1 will be set as group 1,
|
166 |
-
otherwise group 0.
|
167 |
-
"""
|
168 |
-
self.flag = np.zeros(len(self), dtype=np.uint8)
|
169 |
-
for i in range(len(self)):
|
170 |
-
img_info = self.data_infos[i]
|
171 |
-
if img_info['width'] / img_info['height'] > 1:
|
172 |
-
self.flag[i] = 1
|
173 |
-
|
174 |
-
def _rand_another(self, idx):
|
175 |
-
"""Get another random index from the same group as the given index."""
|
176 |
-
pool = np.where(self.flag == self.flag[idx])[0]
|
177 |
-
return np.random.choice(pool)
|
178 |
-
|
179 |
-
def __getitem__(self, idx):
|
180 |
-
"""Get training/test data after pipeline.
|
181 |
-
|
182 |
-
Args:
|
183 |
-
idx (int): Index of data.
|
184 |
-
|
185 |
-
Returns:
|
186 |
-
dict: Training/test data (with annotation if `test_mode` is set \
|
187 |
-
True).
|
188 |
-
"""
|
189 |
-
|
190 |
-
if self.test_mode:
|
191 |
-
while 1:
|
192 |
-
try:
|
193 |
-
return self.prepare_test_img(idx)
|
194 |
-
except:
|
195 |
-
idx = idx+1
|
196 |
-
#return self.prepare_test_img(idx+1)
|
197 |
-
|
198 |
-
#return self.prepare_test_img(idx)
|
199 |
-
while True:
|
200 |
-
try:
|
201 |
-
data = self.prepare_train_img(idx)
|
202 |
-
except:
|
203 |
-
data = self.prepare_train_img(idx-1)
|
204 |
-
|
205 |
-
if data is None:
|
206 |
-
idx = self._rand_another(idx)
|
207 |
-
continue
|
208 |
-
return data
|
209 |
-
|
210 |
-
def prepare_train_img(self, idx):
|
211 |
-
"""Get training data and annotations after pipeline.
|
212 |
-
|
213 |
-
Args:
|
214 |
-
idx (int): Index of data.
|
215 |
-
|
216 |
-
Returns:
|
217 |
-
dict: Training data and annotation after pipeline with new keys \
|
218 |
-
introduced by pipeline.
|
219 |
-
"""
|
220 |
-
|
221 |
-
img_info = self.data_infos[idx]
|
222 |
-
ann_info = self.get_ann_info(idx)
|
223 |
-
results = dict(img_info=img_info, ann_info=ann_info)
|
224 |
-
if self.proposals is not None:
|
225 |
-
results['proposals'] = self.proposals[idx]
|
226 |
-
self.pre_pipeline(results)
|
227 |
-
return self.pipeline(results)
|
228 |
-
|
229 |
-
def prepare_test_img(self, idx):
|
230 |
-
"""Get testing data after pipeline.
|
231 |
-
|
232 |
-
Args:
|
233 |
-
idx (int): Index of data.
|
234 |
-
|
235 |
-
Returns:
|
236 |
-
dict: Testing data after pipeline with new keys introduced by \
|
237 |
-
pipeline.
|
238 |
-
"""
|
239 |
-
|
240 |
-
img_info = self.data_infos[idx]
|
241 |
-
results = dict(img_info=img_info)
|
242 |
-
if self.proposals is not None:
|
243 |
-
results['proposals'] = self.proposals[idx]
|
244 |
-
self.pre_pipeline(results)
|
245 |
-
return self.pipeline(results)
|
246 |
-
|
247 |
-
@classmethod
|
248 |
-
def get_classes(cls, classes=None):
|
249 |
-
"""Get class names of current dataset.
|
250 |
-
|
251 |
-
Args:
|
252 |
-
classes (Sequence[str] | str | None): If classes is None, use
|
253 |
-
default CLASSES defined by builtin dataset. If classes is a
|
254 |
-
string, take it as a file name. The file contains the name of
|
255 |
-
classes where each line contains one class name. If classes is
|
256 |
-
a tuple or list, override the CLASSES defined by the dataset.
|
257 |
-
|
258 |
-
Returns:
|
259 |
-
tuple[str] or list[str]: Names of categories of the dataset.
|
260 |
-
"""
|
261 |
-
if classes is None:
|
262 |
-
return cls.CLASSES
|
263 |
-
|
264 |
-
if isinstance(classes, str):
|
265 |
-
# take it as a file path
|
266 |
-
class_names = mmcv.list_from_file(classes)
|
267 |
-
elif isinstance(classes, (tuple, list)):
|
268 |
-
class_names = classes
|
269 |
-
else:
|
270 |
-
raise ValueError(f'Unsupported type {type(classes)} of classes.')
|
271 |
-
|
272 |
-
return class_names
|
273 |
-
|
274 |
-
def format_results(self, results, **kwargs):
|
275 |
-
"""Place holder to format result to dataset specific output."""
|
276 |
-
|
277 |
-
def evaluate(self,
|
278 |
-
results,
|
279 |
-
metric='mAP',
|
280 |
-
logger=None,
|
281 |
-
proposal_nums=(100, 300, 1000),
|
282 |
-
iou_thr=0.5,
|
283 |
-
scale_ranges=None):
|
284 |
-
"""Evaluate the dataset.
|
285 |
-
|
286 |
-
Args:
|
287 |
-
results (list): Testing results of the dataset.
|
288 |
-
metric (str | list[str]): Metrics to be evaluated.
|
289 |
-
logger (logging.Logger | None | str): Logger used for printing
|
290 |
-
related information during evaluation. Default: None.
|
291 |
-
proposal_nums (Sequence[int]): Proposal number used for evaluating
|
292 |
-
recalls, such as recall@100, recall@1000.
|
293 |
-
Default: (100, 300, 1000).
|
294 |
-
iou_thr (float | list[float]): IoU threshold. Default: 0.5.
|
295 |
-
scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP.
|
296 |
-
Default: None.
|
297 |
-
"""
|
298 |
-
|
299 |
-
if not isinstance(metric, str):
|
300 |
-
assert len(metric) == 1
|
301 |
-
metric = metric[0]
|
302 |
-
allowed_metrics = ['mAP', 'recall']
|
303 |
-
if metric not in allowed_metrics:
|
304 |
-
raise KeyError(f'metric {metric} is not supported')
|
305 |
-
annotations = [self.get_ann_info(i) for i in range(len(self))]
|
306 |
-
eval_results = OrderedDict()
|
307 |
-
iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr
|
308 |
-
if metric == 'mAP':
|
309 |
-
assert isinstance(iou_thrs, list)
|
310 |
-
mean_aps = []
|
311 |
-
for iou_thr in iou_thrs:
|
312 |
-
print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}')
|
313 |
-
mean_ap, _ = eval_map(
|
314 |
-
results,
|
315 |
-
annotations,
|
316 |
-
scale_ranges=scale_ranges,
|
317 |
-
iou_thr=iou_thr,
|
318 |
-
dataset=self.CLASSES,
|
319 |
-
logger=logger)
|
320 |
-
mean_aps.append(mean_ap)
|
321 |
-
eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)
|
322 |
-
eval_results['mAP'] = sum(mean_aps) / len(mean_aps)
|
323 |
-
elif metric == 'recall':
|
324 |
-
gt_bboxes = [ann['bboxes'] for ann in annotations]
|
325 |
-
recalls = eval_recalls(
|
326 |
-
gt_bboxes, results, proposal_nums, iou_thr, logger=logger)
|
327 |
-
for i, num in enumerate(proposal_nums):
|
328 |
-
for j, iou in enumerate(iou_thrs):
|
329 |
-
eval_results[f'recall@{num}@{iou}'] = recalls[i, j]
|
330 |
-
if recalls.shape[1] > 1:
|
331 |
-
ar = recalls.mean(axis=1)
|
332 |
-
for i, num in enumerate(proposal_nums):
|
333 |
-
eval_results[f'AR@{num}'] = ar[i]
|
334 |
-
return eval_results
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Chaitanya01/InvestingPlatform/notifier.py
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
import threading
|
2 |
-
from config import *
|
3 |
-
import requests
|
4 |
-
import slack
|
5 |
-
import json
|
6 |
-
from datetime import datetime
|
7 |
-
import time
|
8 |
-
arr = []
|
9 |
-
def symbol_info(req_params, i):
|
10 |
-
global arr
|
11 |
-
url = "https://api.binance.com/api/v3/ticker/24hr"
|
12 |
-
val = requests.get(url,params = req_params)
|
13 |
-
try:
|
14 |
-
data = json.loads(val.text)
|
15 |
-
|
16 |
-
x = arr[i]
|
17 |
-
try:
|
18 |
-
if float(data["priceChangePercent"])>=x:
|
19 |
-
client = slack.WebClient(token = SLACK_TOKEN)
|
20 |
-
client.chat_postMessage(channel = "#bot_alerts",
|
21 |
-
text = f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')} {data['symbol']} 24Hchange={float(data['priceChangePercent'])}% new benchmark {x+5}%")
|
22 |
-
arr[i] = arr[i] + 5
|
23 |
-
except:
|
24 |
-
pass
|
25 |
-
except:
|
26 |
-
print("Could not connect")
|
27 |
-
|
28 |
-
for i in range(len(crypto_symbols)):
|
29 |
-
arr.append(20)
|
30 |
-
|
31 |
-
while True:
|
32 |
-
for i in range(len(crypto_symbols)):
|
33 |
-
today = datetime.now()
|
34 |
-
if today.hour + today.minute + today.second == 0:
|
35 |
-
for i in range(len(crypto_symbols)):
|
36 |
-
arr[i] = 20
|
37 |
-
req_params = dict(symbol = crypto_symbols[i] + "USDT")
|
38 |
-
thread = threading.Thread(target = symbol_info, args = (req_params,i,))
|
39 |
-
thread.start()
|
40 |
-
time.sleep(15)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/Yunzai/Yunzai/plugins/system/friend.js
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
import cfg from '../../lib/config/config.js'
|
2 |
-
import common from '../../lib/common/common.js'
|
3 |
-
|
4 |
-
export class friend extends plugin {
|
5 |
-
constructor () {
|
6 |
-
super({
|
7 |
-
name: 'autoFriend',
|
8 |
-
dsc: '自动同意好友',
|
9 |
-
event: 'request.friend'
|
10 |
-
})
|
11 |
-
}
|
12 |
-
|
13 |
-
async accept() {
|
14 |
-
if (this.e.sub_type == 'add' || this.e.sub_type == 'single') {
|
15 |
-
if (cfg.other.autoFriend == 1) {
|
16 |
-
logger.mark(`[自动同意][添加好友] ${this.e.user_id}`)
|
17 |
-
await common.sleep(2000)
|
18 |
-
this.e.approve(true)
|
19 |
-
}
|
20 |
-
}
|
21 |
-
}
|
22 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|