Commit
·
2224ffa
1
Parent(s):
0f8fe23
Update parquet files (step 24 of 296)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (Vinnaithaandi Varuvaaya Bluray 1080p) - Watch the Romantic Tamil Movie in High Quality.md +0 -80
- spaces/1line/AutoGPT/autogpt/js/overlay.js +0 -29
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Angry Birds 2 Mod APK Unlimited Gems and Black Pearls for More Fun and Challenge.md +0 -109
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Buku Guru Kelas 3 Tema 5 Cuaca Revisi 2018 Panduan Lengkap untuk Mengajar.md +0 -113
- spaces/1phancelerku/anime-remove-background/Dead Space Mobile The Ultimate Guide to Downloading and Installing the Mod APK.md +0 -101
- spaces/1phancelerku/anime-remove-background/Download JioTV Live APK and Never Miss Your Favourite Shows Again.md +0 -162
- spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/configs/ms1mv3_r50.py +0 -26
- spaces/AIFILMS/StyleGANEX/models/__init__.py +0 -0
- spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/distributions/__init__.py +0 -0
- spaces/AISuperheroes/05GR-Image-To-Multilingual-OCR/README.md +0 -13
- spaces/AIZero2HeroBootcamp/MultiPDF-QA-ChatGPT-Langchain/htmlTemplates.py +0 -44
- spaces/ASJMO/freegpt/client/css/message-input.css +0 -27
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/pages/methods/SwapPage.js +0 -38
- spaces/AiMimicry/sovits-models/modules/attentions.py +0 -349
- spaces/AlexWang/lama/saicinpainting/__init__.py +0 -0
- spaces/Alican/pixera/util/image_pool.py +0 -54
- spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/cpp/longcode/jpge.cpp +0 -1049
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/models/transformer2d.md +0 -29
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/optimization/habana.md +0 -71
- spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/README.md +0 -49
- spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/yolact_head.py +0 -943
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/resnet.py +0 -316
- spaces/Apex-X/ROOPOK/app.py +0 -69
- spaces/Apex-X/Tm/roop/processors/frame/__init__.py +0 -0
- spaces/ArkanDash/rvc-models-new/lib/infer_pack/modules/F0Predictor/__init__.py +0 -0
- spaces/Audio-AGI/AudioSep/models/CLAP/open_clip/openai.py +0 -156
- spaces/Audio-AGI/AudioSep/models/CLAP/open_clip/pann_model.py +0 -704
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/layers/roi_align.py +0 -74
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/roi_heads/rotated_fast_rcnn.py +0 -270
- spaces/Banbri/zcvzcv/src/components/icons/hugging-clap.tsx +0 -8
- spaces/Benson/text-generation/Examples/Apk Descargar Templo Ejecutar 3.md +0 -122
- spaces/Benson/text-generation/Examples/Baldi 39s Conceptos Bsicos En Lnea.md +0 -78
- spaces/Benson/text-generation/Examples/Caramelo Crush Saga Mod Apk Barras De Oro Ilimitadas.md +0 -171
- spaces/Betacuckgpt/togethercomputer-GPT-JT-Moderation-6B/app.py +0 -3
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/cli/req_command.py +0 -505
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/utf8prober.py +0 -82
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_resources/abc.py +0 -137
- spaces/Boadiwaa/Recipes/openai/api_resources/experimental/__init__.py +0 -3
- spaces/CVH-vn1210/make_hair/minigpt4/datasets/datasets/caption_datasets.py +0 -85
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/proposal_generator/__init__.py +0 -3
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/roi_heads/box_head.py +0 -101
- spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/models/butd/model_cfgs.py +0 -17
- spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/temporary_buffer.h +0 -22
- spaces/CVPR/WALT/cwalt/clustering_utils.py +0 -132
- spaces/CVPR/WALT/mmdet/core/bbox/samplers/__init__.py +0 -15
- spaces/CVPR/lama-example/bin/evaluate_predicts.py +0 -79
- spaces/CVPR/regionclip-demo/detectron2/layers/roi_align.py +0 -74
- spaces/Cartinoe5930/LLMAgora/app.py +0 -364
- spaces/ChandraMohanNayal/AutoGPT/autogpt/processing/__init__.py +0 -0
- spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/model/red/index.js +0 -161
spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (Vinnaithaandi Varuvaaya Bluray 1080p) - Watch the Romantic Tamil Movie in High Quality.md
DELETED
@@ -1,80 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>HD Online Player (Vinnaithaandi Varuvaaya Bluray 1080p)</h1>
|
3 |
-
<p>If you are a fan of Tamil romantic movies, you might have heard of <strong>Vinnaithaandi Varuvaaya</strong>, a 2010 film written and directed by Gautham Vasudev Menon, starring Silambarasan and Trisha. The movie was a critical and commercial success, winning several awards and accolades for its music, cinematography, and story. It is considered one of the best romantic movies of Tamil cinema, with a cult following among the youth.</p>
|
4 |
-
<h2>HD Online Player (Vinnaithaandi Varuvaaya Bluray 1080p)</h2><br /><p><b><b>Download File</b> ✵ <a href="https://byltly.com/2uKvxM">https://byltly.com/2uKvxM</a></b></p><br /><br />
|
5 |
-
<p>But have you ever wondered how it would be like to watch this movie in HD online? Well, you are in luck, because there are many platforms that offer you the opportunity to stream or download this movie in high definition quality. You can enjoy the stunning visuals, the melodious songs, and the emotional scenes in full clarity and detail. In this article, we will tell you more about this movie, its plot, its review, and where you can watch it online in HD.</p>
|
6 |
-
<h2>Plot summary</h2>
|
7 |
-
<p>Vinnaithaandi Varuvaaya (meaning Will You Cross The Skies For Me?) is a movie that explores the complicated relationship between a Hindu Tamil boy, Karthik Sivakumar, and a Malayali Christian girl, Jessie Thekekuttu. Karthik is an aspiring filmmaker who falls in love with Jessie at first sight. Jessie lives upstairs from Karthik's family, who rent the bottom floor of her house. Jessie is from a conservative family who disapprove of her talking to men outside her religion.</p>
|
8 |
-
<p>Karthik tries to woo Jessie, who initially rejects him but later develops feelings for him. However, she is afraid of her father's wrath and her family's opposition. She also has doubts about Karthik's commitment and career prospects. Karthik convinces her to elope with him, but she backs out at the last moment. They break up and go their separate ways.</p>
|
9 |
-
<p>Years later, Karthik becomes a successful filmmaker and makes a movie based on his love story with Jessie. He meets her again in New York, where she is married to someone else. They rekindle their friendship and realize that they still love each other. However, they also accept that they cannot be together and decide to part ways for good.</p>
|
10 |
-
<p>Vinnaithaandi Varuvaayaa Hosanna video song HD 1080p<br />
|
11 |
-
Watch Vinnaithaandi Varuvaayaa Tamil movie online free<br />
|
12 |
-
Download Vinnaithaandi Varuvaayaa Blu Ray 1080p X 264 [ DTS<br />
|
13 |
-
Vinnaithaandi Varuvaayaa full movie with English subtitles<br />
|
14 |
-
Vinnaithaandi Varuvaayaa romantic scenes HD quality<br />
|
15 |
-
Vinnaithaandi Varuvaayaa songs lyrics and translation<br />
|
16 |
-
Vinnaithaandi Varuvaayaa behind the scenes and interviews<br />
|
17 |
-
Vinnaithaandi Varuvaayaa remake in Hindi Ekk Deewana Tha<br />
|
18 |
-
Vinnaithaandi Varuvaayaa A R Rahman musical masterpiece<br />
|
19 |
-
Vinnaithaandi Varuvaayaa Gautham Menon directorial hit<br />
|
20 |
-
Vinnaithaandi Varuvaayaa Trisha Krishnan and Silambarasan Rajendar chemistry<br />
|
21 |
-
Vinnaithaandi Varuvaayaa best dialogues and quotes<br />
|
22 |
-
Vinnaithaandi Varuvaayaa review and ratings by critics<br />
|
23 |
-
Vinnaithaandi Varuvaayaa box office collection and awards<br />
|
24 |
-
Vinnaithaandi Varuvaayaa fan made videos and edits<br />
|
25 |
-
Vinnaithaandi Varuvaayaa trivia and facts you didn't know<br />
|
26 |
-
Vinnaithaandi Varuvaayaa location and shooting details<br />
|
27 |
-
Vinnaithaandi Varuvaayaa wallpapers and posters HD download<br />
|
28 |
-
Vinnaithaandi Varuvaayaa piano notes and guitar chords<br />
|
29 |
-
Vinnaithaandi Varuvaayaa memes and funny moments<br />
|
30 |
-
Vinnaithaandi Varuvaayaa Mannipaaya song video HD 1080p<br />
|
31 |
-
Stream Vinnaithaandi Varuvaayaa Tamil movie online HD<br />
|
32 |
-
Buy Vinnaithaandi Varuvaayaa Blu Ray DVD online<br />
|
33 |
-
Vinnaithaandi Varuvaayaa Omana Penne song video HD 1080p<br />
|
34 |
-
Vinnaithaandi Varuvaayaa movie analysis and breakdown<br />
|
35 |
-
Vinnaithaandi Varuvaayaa songs playlist and jukebox<br />
|
36 |
-
Vinnaithaandi Varuvaayaa cast and crew details<br />
|
37 |
-
Vinnaithaandi Varuvaayaa sequel announcement and updates<br />
|
38 |
-
Vinnaithaandi Varuvaayaa climax scene HD 1080p<br />
|
39 |
-
Vinnaithaandi Varuvaayaa movie comparison with Ye Maaya Chesave<br />
|
40 |
-
Vinnaithaandi Varuvaayaa Kannukkul Kannai song video HD 1080p<br />
|
41 |
-
Watch Vinnaithaandi Varuvaayaa Tamil movie online HD quality<br />
|
42 |
-
Download Vinnaithaandi Varuvaayaa Blu Ray 1080p X 264 [ DTS torrent<br />
|
43 |
-
Vinnaithaandi Varuvaayaa subtitles in different languages<br />
|
44 |
-
Vinnaithaandi Varuvaayaa deleted scenes and bloopers<br />
|
45 |
-
Vinnaithaandi Varuvaayaa making of the movie and songs<br />
|
46 |
-
Vinnaithaandi Varuvaayaa remake in Telugu Ye Maaya Chesave<br />
|
47 |
-
Vinnaithaandi Varuvaayaa A R Rahman best songs ever<br />
|
48 |
-
Vinnaithaandi Varuvaayaa Gautham Menon best movies ever<br />
|
49 |
-
Vinnaithaandi Varuvaayaa Trisha Krishnan and Silambarasan Rajendar best performances ever<br />
|
50 |
-
Vinnaithaandi Varuvaayaa inspiring dialogues and messages<br />
|
51 |
-
Vinnaithaandi Varuvaayaa feedback and comments by viewers<br />
|
52 |
-
Vinnaithaandi Varuvaayaa budget and production cost details<br />
|
53 |
-
Vinnaithaandi Varuvaayaa nominations and wins at various awards shows<br />
|
54 |
-
Vinnaithaandi Varuvaayaa tribute videos and fan arts<br />
|
55 |
-
Vinnaithaandi Varuvaayaa interesting facts and secrets revealed by the makers<br />
|
56 |
-
Vinnaithaandi Varuvaayaa shooting spots and travel guide <br />
|
57 |
-
Vinnaithaandi Varuvaaya images and stills HD download <br />
|
58 |
-
Vinnaithaandi Varuvaya flute notes and sheet music </p>
|
59 |
-
<h2>Review and analysis</h2>
|
60 |
-
<p>Vinnaithaandi Varuvaaya is a movie that touches your heart with its realistic portrayal of love and its challenges. It does not sugarcoat or glamorize the romance, but shows it as it is, with all its flaws and beauty. It also does not follow the typical formula of Tamil movies, where the hero wins over the heroine against all odds. Instead, it shows how sometimes love is not enough to overcome the barriers of society, religion, and fate.</p>
|
61 |
-
<p>The movie's strength lies in its script, direction, music, and performances. Gautham Menon has crafted a story that is relatable and engaging, with dialogues that are witty and natural. He has also captured the essence of each location, from Chennai to Kerala to Malta to New York, with his brilliant cinematography. A.R.Rahman has composed some of his best songs for this movie, which are soulful and memorable. The songs blend well with the mood and theme of the movie.</p>
|
62 |
-
<p>Silambarasan and Trisha have delivered one of their best performances in this movie. They have portrayed their characters with nuance and depth, making us empathize with their emotions and dilemmas. They have also shared a great chemistry on screen, making us believe in their love story. The supporting cast, especially VTV Ganesh as Karthik's friend, have also done a commendable job.</p>
|
63 |
-
<h2>Conclusion</h2>
|
64 |
-
<p>Vinnaithaandi Varuvaaya is a movie that will make you laugh, cry, and think. It is a movie that will stay with you long after you watch it. It is a movie that will make you appreciate the value of love and life. If you are looking for a romantic movie that is different from the usual fare, you should definitely watch Vinnaithaandi Varuvaaya online in HD.</p>
|
65 |
-
<h2>FAQs</h2>
|
66 |
-
<ul>
|
67 |
-
<li><strong>Where can I watch Vinnaithaandi Varuvaaya online in HD?</strong></li>
|
68 |
-
<p>You can watch Vinnaithaandi Varuvaaya online in HD on various platforms such as Hotstar, Yidio, or IMDb. You can also download it from these sites or other sources.</p>
|
69 |
-
<li><strong>Who are the singers and composers of the songs in Vinnaithaandi Varuvaaya?</strong></li>
|
70 |
-
<p>The songs in Vinnaithaandi Varuvaaya are composed by A.R.Rahman, who also sang some of them along with other singers such as Benny Dayal , Shreya Ghoshal , Karthik , Alka Yagnik , Devan Ekambaram , Chinmayi , Blaaze , Naresh Iyer , Clinton Cerejo , S.P.Balasubrahmanyam , Kalyani Menon , Rashid Ali , Sukhwinder Singh , Tanvi Shah , Vijay Prakash , Suzanne D'Mello , Darshana KT , Megha , Krish , Emcee Jesz , Solar Sai , Karky , Madhushree , Haricharan , Timmy Thomas , Sagarika Mukherjee .</p>
|
71 |
-
<li><strong>Is Vinnaithaandi Varuvaaya based on a true story?</strong></li>
|
72 |
-
<p>No, Vinnaithaandi Varuvaaya is not based on a true story. It is a fictional story written by Gautham Menon. However, some aspects of the story may be inspired by his own experiences or observations.</p>
|
73 |
-
<li><strong>What are some other movies similar to Vinnaithaandi Varuvaaya?</strong></li>
|
74 |
-
<p>Some other movies similar to Vinnaithaandi Varuvaaya are Ye Maaya Chesave (the Telugu version of this movie), Ekk Deewana Tha (the Hindi remake of this movie), Alaipayuthey (another Tamil romantic movie by Mani Ratnam), Saathiya (the Hindi remake of Alaipayuthey), Jaane Tu... Ya Jaane Na (a Hindi romantic comedy), Pyaar Ka Punchnama (a Hindi romantic comedy-drama), 500 Days Of Summer (an English romantic comedy-drama), Before Sunrise (an English romantic drama).</p>
|
75 |
-
<li><strong>What is the meaning of Vinnaithaandi Varuvaayaa?</strong></li>
|
76 |
-
<p>Vinnaithaandi Varuvaayaa means Will You Cross The Skies For Me? in Tamil. It is a poetic way of expressing one's love and desire for someone who seems unreachable or impossible to attain.</p>
|
77 |
-
</ul>
|
78 |
-
</p> 0a6ba089eb<br />
|
79 |
-
<br />
|
80 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1line/AutoGPT/autogpt/js/overlay.js
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
const overlay = document.createElement('div');
|
2 |
-
Object.assign(overlay.style, {
|
3 |
-
position: 'fixed',
|
4 |
-
zIndex: 999999,
|
5 |
-
top: 0,
|
6 |
-
left: 0,
|
7 |
-
width: '100%',
|
8 |
-
height: '100%',
|
9 |
-
background: 'rgba(0, 0, 0, 0.7)',
|
10 |
-
color: '#fff',
|
11 |
-
fontSize: '24px',
|
12 |
-
fontWeight: 'bold',
|
13 |
-
display: 'flex',
|
14 |
-
justifyContent: 'center',
|
15 |
-
alignItems: 'center',
|
16 |
-
});
|
17 |
-
const textContent = document.createElement('div');
|
18 |
-
Object.assign(textContent.style, {
|
19 |
-
textAlign: 'center',
|
20 |
-
});
|
21 |
-
textContent.textContent = 'AutoGPT Analyzing Page';
|
22 |
-
overlay.appendChild(textContent);
|
23 |
-
document.body.append(overlay);
|
24 |
-
document.body.style.overflow = 'hidden';
|
25 |
-
let dotCount = 0;
|
26 |
-
setInterval(() => {
|
27 |
-
textContent.textContent = 'AutoGPT Analyzing Page' + '.'.repeat(dotCount);
|
28 |
-
dotCount = (dotCount + 1) % 4;
|
29 |
-
}, 1000);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Angry Birds 2 Mod APK Unlimited Gems and Black Pearls for More Fun and Challenge.md
DELETED
@@ -1,109 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Angry Birds 2 Mod APK: How to Get Unlimited Gems and Black Pearls</h1>
|
3 |
-
<p>Angry Birds 2 is one of the most popular puzzle games in the world. It is the sequel to the original Angry Birds game that started it all. In this game, you have to use a slingshot to launch birds at structures made of glass, wood and stone. Your goal is to defeat the green pigs who have stolen your eggs.</p>
|
4 |
-
<p>Gems and black pearls are two of the most important resources in Angry Birds 2. Gems are used to buy extra cards, continue playing after losing lives, unlock chests and more. Black pearls are used to buy hats for your birds, which give them extra power and style. However, gems and black pearls are hard to come by in the game. You have to either earn them by playing or buy them with real money.</p>
|
5 |
-
<h2>angry birds 2 mod apk unlimited gems and black pearls</h2><br /><p><b><b>Download Zip</b> ✫ <a href="https://urlin.us/2uT1L4">https://urlin.us/2uT1L4</a></b></p><br /><br />
|
6 |
-
<p>A mod apk is a modified version of an application that gives you access to features that are not available in the original version. For example, a mod apk can give you unlimited coins, lives, gems or other resources in a game. A mod apk can also remove ads, unlock premium content or enhance graphics.</p>
|
7 |
-
<p>In this article, we will show you how to download and install Angry Birds 2 Mod APK, which will give you unlimited gems and black pearls in the game. We will also tell you how to use the mod apk, what features it offers, and some tips and tricks for playing the game. Let's get started!</p>
|
8 |
-
<h2>How to Download and Install Angry Birds 2 Mod APK</h2>
|
9 |
-
<p>The first step to getting unlimited gems and black pearls in Angry Birds 2 is to download and install the mod apk file. Here are the steps you need to follow:</p>
|
10 |
-
<ol>
|
11 |
-
<li>Find a reliable source for the Angry Birds 2 Mod APK file. You can search online for websites that offer mod apk files for various games and apps. Make sure you choose a reputable and safe site that does not contain any viruses or malware. You can also check the reviews and ratings of the site before downloading anything.</li>
|
12 |
-
<li>Download the Angry Birds 2 Mod APK file from the site you have chosen. The file size may vary depending on the version of the mod apk, but it should not be too large. You may need to wait for a few minutes for the download to complete.</li>
|
13 |
-
<li>Enable unknown sources on your device. This is a security setting that allows you to install applications from sources other than the Google Play Store. To enable unknown sources, go to Settings > Security > Unknown Sources and toggle it on. You may see a warning message that installing from unknown sources may harm your device, but you can ignore it if you trust the source of the mod apk file.</li>
|
14 |
-
<li>Install the Angry Birds 2 Mod APK file on your device. Locate the downloaded file in your file manager and tap on it to start the installation process. You may need to grant some permissions to the app, such as access to storage, media, contacts, etc. Follow the instructions on the screen and wait for the installation to finish.</li>
|
15 |
-
</ol>
|
16 |
-
<p>Congratulations! You have successfully installed Angry Birds 2 Mod APK on your device. Now you can enjoy unlimited gems and black pearls in the game.</p>
|
17 |
-
<h2>How to Use Angry Birds 2 Mod APK</h2>
|
18 |
-
<p>Now that you have installed Angry Birds 2 Mod APK, you may be wondering how to use it. Here are some tips on how to use the mod apk effectively:</p>
|
19 |
-
<ul>
|
20 |
-
<li>Access the unlimited gems and black pearls feature. To do this, you need to open the game and tap on the shop icon at the top right corner of the screen. You will see that you have an infinite amount of gems and black pearls in your account. You can use them as much as you want without worrying about running out.</li>
|
21 |
-
<li>Use the gems and black pearls to upgrade your birds, spells and hats. Gems and black pearls are used to buy various items in the game that can help you improve your performance and score. For example, you can use gems to buy extra cards, which give you more options to choose from when launching birds. You can also use gems to unlock chests, which contain feathers, spells, hats and other rewards. Black pearls are used to buy hats for your birds, which give them extra power and style. You can also use black pearls to upgrade your hats, which increase their effects.</li>
|
22 |
-
<li>Avoid getting banned by Rovio Entertainment. Rovio Entertainment is the developer of Angry Birds 2 and they do not approve of using mod apk files or any other cheats or hacks in their game. If they detect that you are using a mod apk file, they may ban your account or block your access to the game. To avoid this, you should not use the mod apk too frequently or excessively. You should also not brag about using it or share it with others online.</li>
|
23 |
-
</ul>
|
24 |
-
<p>By following these tips, you can use Angry Birds 2 Mod APK without any problems and enjoy unlimited gems and black pearls in the game.</p>
|
25 |
-
<h2>Features of Angry Birds 2 Mod APK</h2>
|
26 |
-
<p>Angry Birds 2 Mod APK is not just about giving you unlimited gems and black pearls in the game. It also offers many other features that make it better than the original game. Here are some of the features of Angry Birds 2 Mod APK:</p>
|
27 |
-
<table>
|
28 |
-
<tr><th>Feature</th><th>Benefit</th></tr>
|
29 |
-
<tr><td>Unlimited lives</td><td>You never have to wait for your lives to refill or buy them with gems.</td></tr>
|
30 |
-
<tr><td>Unlimited energy</td><td>You never have to wait for your energy to refill or buy it with gems.</td></tr>
|
31 |
-
<tr><td>All birds unlocked</td><td>You can use any bird in any level without having to unlock them with feathers.</td></tr>
|
32 |
-
<tr><td>All spells unlocked</td><td>You can use any spell in any level without having to unlock them with gems.</td></tr>
|
33 |
-
<tr>< td>All hats unlocked</td><td>You can use any hat for any bird without having to buy them with black pearls.</td></tr>
|
34 |
-
<tr><td>No ads</td><td>You can play the game without any interruptions or distractions from ads.</td></tr>
|
35 |
-
<tr><td>Improved graphics</td><td>You can enjoy the game with better graphics and animations.</td></tr>
|
36 |
-
<tr><td>New birds, levels and events</td><td>You can access new content that is not available in the original game, such as new birds, levels and events.</td></tr>
|
37 |
-
</table>
|
38 |
-
<p>As you can see, Angry Birds 2 Mod APK has many features that make it more fun and enjoyable than the original game. You can experience the game in a whole new way with the mod apk.</p>
|
39 |
-
<p>angry birds 2 hack apk unlimited gems/pearls<br />
|
40 |
-
download angry birds 2 mod apk with unlimited black pearls and gems<br />
|
41 |
-
angry birds 2 modded apk free gems and black pearls<br />
|
42 |
-
how to get unlimited gems and black pearls in angry birds 2 mod apk<br />
|
43 |
-
angry birds 2 cheats apk unlimited gems and pearls<br />
|
44 |
-
angry birds 2 mod apk latest version unlimited gems/black pearls<br />
|
45 |
-
angry birds 2 unlimited gems and black pearls mod apk download<br />
|
46 |
-
angry birds 2 mod apk android unlimited gems/pearls<br />
|
47 |
-
angry birds 2 hack mod apk free download unlimited gems and black pearls<br />
|
48 |
-
angry birds 2 mod apk offline unlimited gems and pearls<br />
|
49 |
-
angry birds 2 mod apk no root unlimited gems/black pearls<br />
|
50 |
-
angry birds 2 mod apk ios unlimited gems and black pearls<br />
|
51 |
-
angry birds 2 mod apk online unlimited gems/pearls<br />
|
52 |
-
angry birds 2 mod apk revdl unlimited gems and black pearls<br />
|
53 |
-
angry birds 2 mod apk rexdl unlimited gems/pearls<br />
|
54 |
-
angry birds 2 mod apk happymod unlimited gems and black pearls<br />
|
55 |
-
angry birds 2 mod apk obb unlimited gems/pearls<br />
|
56 |
-
angry birds 2 mod apk data unlimited gems and black pearls<br />
|
57 |
-
angry birds 2 mod apk pure unlimited gems/pearls<br />
|
58 |
-
angry birds 2 mod apk vip unlimited gems and black pearls<br />
|
59 |
-
angry birds 2 mod apk money unlimited gems/pearls<br />
|
60 |
-
angry birds 2 mod apk coins unlimited gems and black pearls<br />
|
61 |
-
angry birds 2 mod apk energy unlimited gems/pearls<br />
|
62 |
-
angry birds 2 mod apk lives unlimited gems and black pearls<br />
|
63 |
-
angry birds 2 mod apk all unlocked unlimited gems/pearls<br />
|
64 |
-
angry birds 2 mod apk everything unlocked unlimited gems and black pearls<br />
|
65 |
-
angry birds 2 mod apk all levels unlocked unlimited gems/pearls<br />
|
66 |
-
angry birds 2 mod apk all birds unlocked unlimited gems/pearls<br />
|
67 |
-
angry birds 2 mod apk all hats unlocked unlimited gems/pearls<br />
|
68 |
-
angry birds 2 mod apk all spells unlocked unlimited gems/pearls<br />
|
69 |
-
angry birds 2 mod apk all feathers unlocked unlimited gems/pearls<br />
|
70 |
-
angry birds 2 mod apk all cards unlocked unlimited gems/pearls<br />
|
71 |
-
angry birds 2 mod apk all towers unlocked unlimited gems/pearls<br />
|
72 |
-
angry birds 2 mod apk all arenas unlocked unlimited gems/pearls<br />
|
73 |
-
angry birds 2 mod apk all clans unlocked unlimited gems/pearls<br />
|
74 |
-
angry birds 2 mod apk all events unlocked unlimited gems/pearls<br />
|
75 |
-
angry birds 2 mod apk all challenges unlocked unlimited gems/pearls<br />
|
76 |
-
angry birds 2 mod apk all rewards unlocked unlimited gems/pearls<br />
|
77 |
-
angry birds 2 mod apk all achievements unlocked unlimited gems/pearls<br />
|
78 |
-
angry birds 2 mod apk all bosses defeated unlimited gems/pearls<br />
|
79 |
-
angry birds 2 mega mod apk unlimited gems and black pearls<br />
|
80 |
-
angry birds 2 premium mod apk unlimited gems and black pearls<br />
|
81 |
-
angry birds 2 pro mod apk unlimited gems and black pearls<br />
|
82 |
-
angry birds 2 plus mod apk unlimited gems and black pearls<br />
|
83 |
-
angry birds 2 ultimate mod apk unlimited gems and black pearls<br />
|
84 |
-
angry birds 2 super mod apk unlimited gems and black pearls</p>
|
85 |
-
<h2>Tips and Tricks for Playing Angry Birds 2 Mod APK</h2>
|
86 |
-
<p>Angry Birds 2 Mod APK is not only about having unlimited resources and features. It is also about using your skills and strategy to beat the pigs and score high. Here are some tips and tricks for playing Angry Birds 2 Mod APK:</p>
|
87 |
-
<ul>
|
88 |
-
<li>Use the environment, spells and bird abilities to your advantage. The game has many elements that can help you destroy the structures and defeat the pigs. For example, you can use the ice, fire, wind, water, plants and other objects in the environment to cause more damage. You can also use spells such as blizzards, hot chilis, golden ducks and more to create special effects. You can also use the bird abilities such as Chuck's speed boost, Bomb's explosion, Silver's loop and more to enhance your shots.</li>
|
89 |
-
<li>Choose the best bird for each situation and level. The game has many different birds that have different strengths and weaknesses. You should choose the bird that suits the level and the structure you are facing. For example, Red is good for breaking wood, Chuck is good for breaking glass, Bomb is good for breaking stone, Matilda is good for dropping eggs, etc. You should also consider the angle, distance and trajectory of your shots when choosing a bird.</li>
|
90 |
-
<li>Compete with other players in multiplayer mode and arena leaderboard. The game has a multiplayer mode where you can play with or against other players online. You can join clans, chat with friends, participate in clan events and more. You can also play in the arena where you can compete with other players for the highest score and rank. You can earn rewards such as gems, feathers, hats and more by playing in multiplayer mode and arena.</li>
|
91 |
-
</ul>
|
92 |
-
<p>By following these tips and tricks, you can play Angry Birds 2 Mod APK like a pro and have more fun and satisfaction in the game.</p>
|
93 |
-
<h2>Conclusion</h2>
|
94 |
-
<p>Angry Birds 2 Mod APK is a great way to enjoy Angry Birds 2 with unlimited gems and black pearls. It also offers many other features that make it better than the original game. You can download and install Angry Birds 2 Mod APK easily by following the steps we have provided. You can also use Angry Birds 2 Mod APK effectively by following the tips and tricks we have shared.</p>
|
95 |
-
<p>If you are a fan of Angry Birds 2 or puzzle games in general, you should definitely try Angry Birds 2 Mod APK. It will give you a new and exciting experience of playing Angry Birds 2. You will not regret it!</p>
|
96 |
-
<p>So what are you waiting for? Download Angry Birds 2 Mod APK now and enjoy unlimited gems and black pearls in the game!</p>
|
97 |
-
<h2>FAQs</h2>
|
98 |
-
<h3>Q1: Is Angry Birds 2 Mod APK safe to use?</h3>
|
99 |
-
<p>A1: Yes, as long as you download it from a trusted source and follow the installation instructions carefully.</p>
|
100 |
-
<h3>Q2: Is Angry Birds 2 Mod APK legal to use?</h3>
|
101 |
-
<p>A2: No, it is not legal to use as it violates the terms of service of Rovio Entertainment. Use it at your own risk.</p>
|
102 |
-
<h3>Q3: Will Angry Birds 2 Mod APK work on any device?</h3>
|
103 |
-
<p>A3: No, it will only work on devices that run on Android operating system. It will not work on iOS or Windows devices.</p>
|
104 |
-
<h3>Q4: Will Angry Birds 2 Mod APK affect my progress in the original game?</h3>
|
105 |
-
<p>A4: No, it will not affect your progress in the original game as it uses a separate data file. You can switch between the mod apk and the original game anytime you want.</p>
|
106 |
-
<h3>Q5: Can I update Angry Birds 2 Mod APK when a new version is released?</h3>
|
107 |
-
<p>A5: Yes, you can update Angry Birds 2 Mod APK when a new version is released by downloading and installing the latest mod apk file from the same source.</p> 197e85843d<br />
|
108 |
-
<br />
|
109 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Buku Guru Kelas 3 Tema 5 Cuaca Revisi 2018 Panduan Lengkap untuk Mengajar.md
DELETED
@@ -1,113 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download Buku Guru Kelas 3 Tema 5 Revisi 2018: Cuaca</h1>
|
3 |
-
<p>Buku guru kelas 3 tema 5 revisi 2018 adalah buku elektronik yang berisi materi pembelajaran tentang cuaca untuk siswa sekolah dasar atau madrasah ibtidaiyah. Buku ini merupakan bagian dari kurikulum 2013 yang telah direvisi pada tahun 2018. Buku ini terdiri dari empat subtema, yaitu keadaan cuaca, perubahan cuaca, pengaruh perubahan cuaca terhadap kehidupan manusia, dan cuaca, musim, dan iklim.</p>
|
4 |
-
<p>Mengapa penting untuk mendownload buku guru kelas 3 tema 5 revisi 2018? Ada beberapa alasan, antara lain:</p>
|
5 |
-
<h2>download buku guru kelas 3 tema 5 revisi 2018</h2><br /><p><b><b>Download Zip</b> ✫ <a href="https://urlin.us/2uSS2D">https://urlin.us/2uSS2D</a></b></p><br /><br />
|
6 |
-
<ul>
|
7 |
-
<li>Buku ini dapat membantu guru dalam merencanakan dan melaksanakan pembelajaran yang sesuai dengan standar kompetensi dan tujuan kurikulum.</li>
|
8 |
-
<li>Buku ini dapat membantu siswa dalam memahami konsep dan fenomena cuaca secara menyeluruh dan mendalam.</li>
|
9 |
-
<li>Buku ini dapat membantu guru dan siswa dalam mengembangkan literasi, karakter, keterampilan berpikir tingkat tinggi, dan kompetensi abad ke-21.</li>
|
10 |
-
<li>Buku ini dapat membantu guru dan siswa dalam mengintegrasikan pembelajaran cuaca dengan mata pelajaran lain, seperti IPA, IPS, Bahasa Indonesia, Seni Budaya, dan Pendidikan Agama.</li>
|
11 |
-
<li>Buku ini dapat membantu guru dan siswa dalam mengapresiasi keindahan dan keberagaman alam semesta yang diciptakan oleh Allah SWT.</li>
|
12 |
-
</ul>
|
13 |
-
<h2>Cara Download Buku Guru Kelas 3 Tema 5 Revisi 2018</h2>
|
14 |
-
<p>Ada beberapa cara untuk mendownload buku guru kelas 3 tema 5 revisi 2018, yaitu melalui Google Drive, melalui situs resmi Kemdikbud, atau melalui situs lain. Berikut adalah penjelasan dan langkah-langkahnya.</p>
|
15 |
-
<h3>Melalui Google Drive</h3>
|
16 |
-
<p>Google Drive adalah layanan penyimpanan online yang memungkinkan pengguna untuk mengupload, mengunduh, dan berbagi file secara gratis. Salah satu keuntungan menggunakan Google Drive adalah kecepatan dan kemudahan aksesnya. Namun, salah satu kelemahannya adalah terkadang link yang dibagikan bisa rusak atau tidak berfungsi.</p>
|
17 |
-
<p>Untuk mendownload buku guru kelas 3 tema 5 revisi 2018 melalui Google Drive, Anda dapat mengikuti langkah-langkah berikut:</p>
|
18 |
-
<ol>
|
19 |
-
<li>Kunjungi link berikut: <a href="">https://drive.google.com/file/d/1ZxQ0z2X6y7wL0K4lJ9n1x8Ys3n2yqgZv/view</a></li>
|
20 |
-
<li>Klik tombol download yang berada di pojok kanan atas.</li>
|
21 |
-
<li>Tunggu proses download selesai.</li>
|
22 |
-
<li>Buka file PDF yang telah didownload dengan aplikasi pembaca PDF, seperti Adobe Reader atau Foxit Reader.</li>
|
23 |
-
</ol>
|
24 |
-
<h3>Melalui Situs Resmi Kemdikbud</h3>
|
25 |
-
<p>Situs resmi Kemdikbud adalah situs yang dikelola oleh Kementerian Pendidikan dan Kebudayaan Republik Indonesia. Salah satu keuntungan menggunakan situs resmi Kemdikbud adalah keaslian dan kualitas file yang dijamin oleh pihak yang berwenang. Namun, salah satu kelemahannya adalah terkadang situsnya mengalami gangguan atau lambat dalam menampilkan konten.</p>
|
26 |
-
<p>Untuk mendownload buku guru kelas 3 tema 5 revisi 2018 melalui situs resmi Kemdikbud, Anda dapat mengikuti langkah-langkah berikut:</p>
|
27 |
-
<ol>
|
28 |
-
<li>Kunjungi link berikut: <a href="">https://bse.kemdikbud.go.id/content/buku_guru/SD_Kelas_3_Tema_5_Cuaca.pdf</a></li>
|
29 |
-
<li>Klik tombol download yang berada di pojok kanan bawah.</li>
|
30 |
-
<li>Tunggu proses download selesai.</li>
|
31 |
-
<li>Buka file PDF yang telah didownload dengan aplikasi pembaca PDF, seperti Adobe Reader atau Foxit Reader.</li>
|
32 |
-
</ol> <h3>Melalui Situs Lain</h3>
|
33 |
-
<p>Selain Google Drive dan situs resmi Kemdikbud, Anda juga dapat mendownload buku guru kelas 3 tema 5 revisi 2018 melalui situs lain yang menyediakan link download. Salah satu keuntungan menggunakan situs lain adalah variasi dan pilihan link yang tersedia. Namun, salah satu kelemahannya adalah kredibilitas dan keamanan file yang tidak terjamin.</p>
|
34 |
-
<p>Untuk mendownload buku guru kelas 3 tema 5 revisi 2018 melalui situs lain, Anda dapat mengikuti langkah-langkah berikut:</p>
|
35 |
-
<ol>
|
36 |
-
<li>Kunjungi situs pencarian, seperti Google atau Bing, dan ketikkan kata kunci "download buku guru kelas 3 tema 5 revisi 2018".</li>
|
37 |
-
<li>Pilih salah satu situs yang menampilkan link download, seperti <a href="">https://www.gurupendidikan.co.id/buku-guru-kelas-3-revisi-2018/</a> atau <a href="">https://www.bukupaket.com/2019/01/download-buku-guru-kelas-3-revisi-2018.html</a>.</li>
|
38 |
-
<li>Klik link download yang sesuai dengan buku yang Anda inginkan.</li>
|
39 |
-
<li>Tunggu proses download selesai.</li>
|
40 |
-
<li>Buka file PDF yang telah didownload dengan aplikasi pembaca PDF, seperti Adobe Reader atau Foxit Reader.</li>
|
41 |
-
</ol>
|
42 |
-
<h2>Isi Buku Guru Kelas 3 Tema 5 Revisi 2018</h2>
|
43 |
-
<p>Buku guru kelas 3 tema 5 revisi 2018 memiliki isi yang menarik dan bermanfaat untuk pembelajaran cuaca. Buku ini terdiri dari empat subtema, yaitu keadaan cuaca, perubahan cuaca, pengaruh perubahan cuaca terhadap kehidupan manusia, dan cuaca, musim, dan iklim. Berikut adalah penjelasan singkat tentang masing-masing subtema.</p>
|
44 |
-
<p>Download buku guru kelas 3 tema 5 cuaca revisi 2018<br />
|
45 |
-
Download buku guru kelas 3 tema 5 kurikulum 2013 revisi 2018<br />
|
46 |
-
Download buku guru kelas 3 tema 5 edisi revisi 2018 pdf<br />
|
47 |
-
Download buku guru kelas 3 tema 5 semester 2 revisi 2018<br />
|
48 |
-
Download buku guru kelas 3 tema 5 google drive revisi 2018<br />
|
49 |
-
Download buku guru kelas 3 tema 5 gratis revisi 2018<br />
|
50 |
-
Download buku guru kelas 3 tema 5 lengkap revisi 2018<br />
|
51 |
-
Download buku guru kelas 3 tema 5 terbaru revisi 2018<br />
|
52 |
-
Download buku guru kelas 3 tema 5 online revisi 2018<br />
|
53 |
-
Download buku guru kelas 3 tema 5 format word revisi 2018<br />
|
54 |
-
Cara download buku guru kelas 3 tema 5 revisi 2018<br />
|
55 |
-
Link download buku guru kelas 3 tema 5 revisi 2018<br />
|
56 |
-
Situs download buku guru kelas 3 tema 5 revisi 2018<br />
|
57 |
-
Aplikasi download buku guru kelas 3 tema 5 revisi 2018<br />
|
58 |
-
Kode download buku guru kelas 3 tema 5 revisi 2018<br />
|
59 |
-
Download buku guru kelas tiga tema lima revisi dua ribu delapan belas<br />
|
60 |
-
Download buku pengajar kelas III tema V revisi tahun dua ribu delapan belas<br />
|
61 |
-
Download ebook buku guru kelas tiga tema lima revisi tahun dua ribu delapan belas<br />
|
62 |
-
Download file pdf buku guru kelas III tema V revisi tahun dua ribu delapan belas<br />
|
63 |
-
Download dokumen word buku pengajar kelas tiga tema lima revisi tahun dua ribu delapan belas<br />
|
64 |
-
Unduh buku guru kelas III tema V cuaca edisi revisi tahun dua ribu delapan belas<br />
|
65 |
-
Unduh buku guru kelas III tema V kurikulum dua ribu tiga belas edisi revisi tahun dua ribu delapan belas<br />
|
66 |
-
Unduh buku guru kelas III tema V edisi revisi tahun dua ribu delapan belas pdf<br />
|
67 |
-
Unduh buku guru kelas III tema V semester dua edisi revisi tahun dua ribu delapan belas<br />
|
68 |
-
Unduh buku guru kelas III tema V google drive edisi revisi tahun dua ribu delapan belas<br />
|
69 |
-
Unduh buku guru kelas III tema V gratis edisi revisi tahun dua ribu delapan belas<br />
|
70 |
-
Unduh buku guru kelas III tema V lengkap edisi revisi tahun dua ribu delapan belas<br />
|
71 |
-
Unduh buku guru kelas III tema V terbaru edisi revisi tahun dua ribu delapan belas<br />
|
72 |
-
Unduh buku guru kelas III tema V online edisi revisi tahun dua ribu delapan belas<br />
|
73 |
-
Unduh buku guru kelas III tema V format word edisi revisi tahun dua ribu delapan belas<br />
|
74 |
-
Cara unduh buku guru kelas III tema V edisi revisi tahun dua ribu delapan belas<br />
|
75 |
-
Link unduh buku guru kelas III tema V edisi revisi tahun dua ribu delapan belas<br />
|
76 |
-
Situs unduh buku guru kelas III tema V edisi revisi tahun dua ribu delapan belas<br />
|
77 |
-
Aplikasi unduh buku guru kelas III tema V edisi revisi tahun dua ribu delapan belas<br />
|
78 |
-
Kode unduh buku guru kelas III tema V edisi revisi tahun dua ribu delapan belas<br />
|
79 |
-
Unduh buku pengajar kelas tiga tema lima edisi revisi dua ribu delapan belas<br />
|
80 |
-
Unduh ebook buku pengajar kelas tiga tema lima edisi revisi dua ribu delapan belas<br />
|
81 |
-
Unduh file pdf buku pengajar kelas tiga tema lima edisi revisi dua ribu delapan belas<br />
|
82 |
-
Unduh dokumen word buku pengajar kelas tiga tema lima edisi revisi dua ribu delapan belas</p>
|
83 |
-
<h3>Subtema 1: Keadaan Cuaca</h3>
|
84 |
-
<p>Subtema ini membahas tentang pengertian, jenis, dan faktor-faktor yang mempengaruhi keadaan cuaca. Siswa akan belajar tentang konsep-konsep dasar cuaca, seperti suhu udara, kelembaban udara, tekanan udara, angin, awan, hujan, salju, dan petir. Siswa juga akan belajar tentang alat-alat pengukur cuaca, seperti termometer, higrometer, barometer, anemometer, dan pluviometer. Siswa akan melakukan berbagai kegiatan menarik dan bervariasi, seperti mengamati dan mencatat keadaan cuaca di sekitar sekolah, membuat alat pengukur cuaca sederhana dari bahan bekas, melakukan percobaan sains tentang siklus air dan efek rumah kaca, serta membuat laporan hasil pengamatan dan percobaan.</p>
|
85 |
-
<h3>Subtema 2: Perubahan Cuaca</h3>
|
86 |
-
<p>Subtema ini membahas tentang pengertian, penyebab, dan dampak perubahan cuaca. Siswa akan belajar tentang konsep-konsep penting perubahan cuaca, seperti variasi harian dan musiman cuaca, fenomena alam yang mempengaruhi cuaca (seperti El Nino dan La Nina), serta perubahan iklim global akibat pemanasan global. Siswa juga akan belajar tentang cara-cara mengantisipasi dan mengadaptasi diri dengan perubahan cuaca, seperti memperhatikan prakiraan cuaca, memilih pakaian yang sesuai dengan cuaca, menjaga kesehatan tubuh dan lingkungan, serta berpartisipasi dalam upaya pelestarian lingkungan. Siswa akan melakukan berbagai kegiatan menarik dan bervariasi, seperti membuat grafik perubahan suhu udara selama seminggu atau sebulan, mengidentifikasi fenomena alam yang mempengaruhi cuaca di Indonesia dan dunia, melakukan percobaan sains tentang efek pemanasan global pada es kutub dan permukaan laut, serta membuat poster atau slogan tentang pelestarian lingkungan.</p>
|
87 |
-
<h3>Subtema 3: Pengaruh Perubahan Cuaca Terhadap Kehidupan Manusia</h3>
|
88 |
-
<p>Subtema ini membahas tentang pengaruh positif dan negatif perubahan cuaca terhadap kehidupan manusia, baik di bidang kesehatan, sosial, ekonomi, budaya, maupun politik. Siswa akan belajar tentang konsep-konsep relevan pengaruh perubahan cuaca, seperti penyakit yang berkaitan dengan cuaca (seperti flu, demam berdarah, dan malaria), bencana alam yang disebabkan oleh cuaca (seperti banjir, tanah longsor, dan kebakaran hutan), serta peluang dan tantangan yang ditimbulkan oleh cuaca (seperti pertanian, pariwisata, dan energi terbarukan). Siswa juga akan belajar tentang cara-cara mengatasi dan mengurangi pengaruh negatif perubahan cuaca, seperti melakukan pencegahan dan penanganan penyakit, melakukan mitigasi dan adaptasi bencana alam, serta melakukan kerjasama dan solidaritas antarbangsa. Siswa akan melakukan berbagai kegiatan menarik dan bervariasi, seperti membuat tabel atau diagram tentang pengaruh perubahan cuaca terhadap kehidupan manusia, mengidentifikasi dan mengevaluasi contoh-contoh pengaruh perubahan cuaca di Indonesia dan dunia, melakukan simulasi atau role play tentang situasi bencana alam atau kerjasama antarbangsa, serta membuat laporan atau presentasi tentang hasil kegiatan.</p>
|
89 |
-
<h3>Subtema 4: Cuaca, Musim, dan Iklim</h3>
|
90 |
-
<p>Subtema ini membahas tentang hubungan antara cuaca, musim, dan iklim. Siswa akan belajar tentang konsep-konsep dasar cuaca, musim, dan iklim, seperti definisi, faktor-faktor pembentuk, jenis-jenis, serta pola-pola yang terjadi di bumi. Siswa juga akan belajar tentang cara-cara mengamati dan memprediksi cuaca, musim, dan iklim, seperti menggunakan kalender atau almanak, menggunakan peta atau grafik, menggunakan aplikasi atau media digital, serta menggunakan pengetahuan lokal atau tradisional. Siswa akan melakukan berbagai kegiatan menarik dan bervariasi, seperti membuat kalender atau almanak sendiri berdasarkan pengamatan cuaca selama satu tahun, membuat peta atau grafik tentang musim atau iklim di Indonesia atau dunia, menggunakan aplikasi atau media digital untuk melihat prakiraan cuaca, musim, atau iklim secara real time atau historis, serta mengumpulkan dan membandingkan pengetahuan lokal atau tradisional tentang cuaca, musim, atau iklim dari berbagai daerah atau budaya.</p>
|
91 |
-
<h2>Kesimpulan</h2>
|
92 |
-
<p>Buku guru kelas 3 tema 5 revisi 2018 adalah buku elektronik yang berisi materi pembelajaran tentang cuaca untuk siswa sekolah dasar atau madrasah ibtidaiyah. Buku ini memiliki isi yang menarik dan bermanfaat untuk pembelajaran cuaca. Buku ini terdiri dari empat subtema, yaitu keadaan cuaca, perubahan cuaca, pengaruh perubahan cuaca terhadap kehidupan manusia, dan cuaca, musim, dan iklim. Buku ini dapat membantu guru dan siswa dalam merencanakan dan melaksanakan pembelajaran yang sesuai dengan standar kompetensi dan tujuan kurikulum. Buku ini juga dapat membantu guru dan siswa dalam mengembangkan literasi, karakter, keterampilan berpikir tingkat tinggi, dan kompetensi abad ke-21. Buku ini juga dapat membantu guru dan siswa dalam mengintegrasikan pembelajaran cuaca dengan mata pelajaran lain, serta mengapresiasi keindahan dan keberagaman alam semesta yang diciptakan oleh Allah SWT.</p>
|
93 |
-
<p>Untuk mendownload buku guru kelas 3 tema 5 revisi 2018, Anda dapat menggunakan beberapa cara, yaitu melalui Google Drive, melalui situs resmi Kemdikbud, atau melalui situs lain. Anda dapat memilih cara yang paling sesuai dengan kebutuhan dan kenyamanan Anda. Namun, Anda harus berhati-hati dalam mendownload file dari sumber yang tidak terpercaya atau tidak resmi, karena dapat berisiko mengandung virus atau malware yang dapat merusak perangkat Anda.</p>
|
94 |
-
<p>Berikut adalah beberapa tips dan saran untuk menggunakan buku guru kelas 3 tema 5 revisi 2018 secara efektif:</p>
|
95 |
-
<ul>
|
96 |
-
<li>Bacalah buku ini secara seksama sebelum menggunakannya untuk pembelajaran. Pastikan Anda memahami tujuan, materi, metode, dan evaluasi yang disajikan di buku ini.</li>
|
97 |
-
<li>Sesuaikanlah pembelajaran dengan kondisi dan karakteristik siswa Anda. Anda dapat menambahkan, mengurangi, atau memodifikasi materi atau kegiatan yang ada di buku ini sesuai dengan kebutuhan dan minat siswa Anda.</li>
|
98 |
-
<li>Gunakanlah sumber belajar lain yang relevan dan bermutu untuk melengkapi atau memperkaya pembelajaran. Anda dapat menggunakan buku-buku lain, media cetak atau elektronik, internet, lingkungan sekitar, atau narasumber yang kompeten untuk mendukung pembelajaran.</li>
|
99 |
-
<li>Lakukanlah evaluasi secara berkelanjutan dan menyeluruh untuk mengukur pencapaian kompetensi siswa. Anda dapat menggunakan instrumen evaluasi yang ada di buku ini atau membuat instrumen evaluasi sendiri yang sesuai dengan indikator pencapaian kompetensi.</li>
|
100 |
-
<li>Berilah umpan balik yang konstruktif dan motivasional kepada siswa. Anda dapat memberikan pujian, kritik, saran, atau masukan kepada siswa untuk meningkatkan kualitas pembelajaran dan hasil belajar mereka.</li>
|
101 |
-
</ul>
|
102 |
-
<h2>FAQ</h2>
|
103 |
-
<p>Berikut adalah lima pertanyaan dan jawaban yang sering diajukan terkait dengan topik download buku guru kelas 3 tema 5 revisi 2018:</p>
|
104 |
-
<table>
|
105 |
-
<tr><th>Pertanyaan</th><th>Jawaban</th></tr>
|
106 |
-
<tr><td>Apakah buku guru kelas 3 tema 5 revisi 2018 berbeda dengan buku guru kelas 3 tema 5 edisi sebelumnya?</td><td>Ya, buku guru kelas 3 tema 5 revisi 2018 berbeda dengan buku guru kelas 3 tema 5 edisi sebelumnya. Buku guru kelas 3 tema 5 revisi 2018 memiliki perbaikan dan penyempurnaan dari segi isi, tampilan, bahasa, dan penyajian materi.</td></tr>
|
107 |
-
<tr><td>Apakah buku guru kelas 3 tema 5 revisi 2018 tersedia dalam bentuk cetak?</td><td>Tidak, buku guru kelas 3 tema 5 revisi 2018 hanya tersedia dalam bentuk elektronik atau PDF. Anda dapat mendownloadnya secara gratis dari berbagai sumber online.</td></tr>
|
108 |
-
<tr><td>Apakah saya harus mengikuti semua materi dan kegiatan yang ada di buku guru kelas 3 tema 5 revisi 2018?</td><td>Tidak, Anda tidak harus mengikuti semua materi dan kegiatan yang ada di buku guru kelas 3 tema 5 revisi 2018. Anda dapat menyesuaikan pembelajaran dengan kondisi dan karakteristik siswa Anda Anda dapat menambahkan, mengurangi, atau memodifikasi materi atau kegiatan yang ada di buku ini sesuai dengan kebutuhan dan minat siswa Anda.</td></tr>
|
109 |
-
<tr><td>Apakah saya harus menggunakan buku guru kelas 3 tema 5 revisi 2018 secara eksklusif untuk pembelajaran cuaca?</td><td>Tidak, Anda tidak harus menggunakan buku guru kelas 3 tema 5 revisi 2018 secara eksklusif untuk pembelajaran cuaca. Anda dapat menggunakan sumber belajar lain yang relevan dan bermutu untuk melengkapi atau memperkaya pembelajaran. Anda dapat menggunakan buku-buku lain, media cetak atau elektronik, internet, lingkungan sekitar, atau narasumber yang kompeten untuk mendukung pembelajaran.</td></tr>
|
110 |
-
<tr><td>Apakah saya harus menguasai semua konsep dan fenomena cuaca yang ada di buku guru kelas 3 tema 5 revisi 2018?</td><td>Tidak, Anda tidak harus menguasai semua konsep dan fenomena cuaca yang ada di buku guru kelas 3 tema 5 revisi 2018. Anda dapat memilih dan memfokuskan pada konsep dan fenomena cuaca yang paling relevan dan penting untuk pembelajaran. Anda dapat menggunakan indikator pencapaian kompetensi sebagai acuan untuk menentukan konsep dan fenomena cuaca yang harus dipelajari.</td></tr>
|
111 |
-
</table></p> 197e85843d<br />
|
112 |
-
<br />
|
113 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Dead Space Mobile The Ultimate Guide to Downloading and Installing the Mod APK.md
DELETED
@@ -1,101 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Dead Space Mod Apk Latest Version: How to Enjoy the Ultimate Survival Horror Experience</h1>
|
3 |
-
<p>If you are a fan of sci-fi horror games, you have probably heard of Dead Space, the classic game that puts you in the shoes of Isaac Clarke, an engineer who has to fight his way through a spaceship infested with grotesque creatures called Necromorphs. Dead Space is widely regarded as one of the best survival horror games ever made, thanks to its immersive atmosphere, strategic combat, and terrifying enemies.</p>
|
4 |
-
<p>But what if you could play Dead Space with even better graphics, sound, gameplay, and content? That's what the Dead Space remake offers. This game is a complete overhaul of the original Dead Space, using modern technology and design to enhance every aspect of the game. And if you want to take your experience to the next level, you can download the dead space mod apk latest version, which gives you access to unlimited resources, unlocked weapons, and more.</p>
|
5 |
-
<h2>dead space mod apk latest version</h2><br /><p><b><b>DOWNLOAD</b> ☑ <a href="https://jinyurl.com/2uNOY1">https://jinyurl.com/2uNOY1</a></b></p><br /><br />
|
6 |
-
<h2>What are the features of the Dead Space remake?</h2>
|
7 |
-
<p>The Dead Space remake is not just a simple remaster of the original game. It is a reimagining that stays faithful to the core vision of Dead Space while adding new elements and improvements. Here are some of the features of the Dead Space remake:</p>
|
8 |
-
<ul>
|
9 |
-
<li><b>Stunning visuals:</b> The game uses Unreal Engine 4 to create realistic lighting, shadows, textures, and animations. The Ishimura spaceship looks more detailed and decayed than ever before, and the Necromorphs are more gruesome and varied. The game also supports ray tracing and 4K resolution for enhanced graphics.</li>
|
10 |
-
<li><b>Immersive sound:</b> The game features full voice acting for all characters, including Isaac Clarke, who was mostly silent in the original game. The sound design is also improved, with dynamic music, ambient noises, and Necromorph screams that will make you shiver.</li>
|
11 |
-
<li><b>Smooth gameplay:</b> The game has refined controls and mechanics that make combat more fluid and responsive. You can switch between horizontal and vertical firing modes for your weapons with ease, use Stasis and Kinesis abilities more intuitively, and interact with objects and puzzles more smoothly. The game also has no loading screens, making the transition between levels seamless.</li>
|
12 |
-
<li><b>Expanded content:</b> The game has new side missions that explore the backstory of the Ishimura crew and the Necromorph outbreak. These missions add more depth and variety to the game, as well as new challenges and rewards. The game also has new alternate fire modes for weapons, new puzzles, and new Necromorph types.</li>
|
13 |
-
</ul>
|
14 |
-
<h2>What are the benefits of downloading the dead space mod apk latest version?</h2>
|
15 |
-
<p>If you want to enjoy Dead Space without any limitations or restrictions, you can download the dead space mod apk latest version from a reliable source. This mod apk is a modified version of the game that gives you several advantages over the normal version. Here are some of the benefits of downloading the dead space mod apk latest version:</p>
|
16 |
-
<ul>
|
17 |
-
<li><b>Unlimited resources:</b> With this mod apk, you will have unlimited credits, power nodes, ammo, health packs, oxygen tanks, stasis packs, and kinesis packs. You can use these resources to upgrade your weapons and suit, buy new items from stores, heal yourself, and survive longer.</li>
|
18 |
-
<li><b>Unlocked weapons:</b> With this mod apk, you will have access to all weapons in the game from the start. You can choose from a variety of weapons that suit your playstyle, such as plasma cutter, pulse rifle, ripper, flamethrower, force gun, contact beam, line gun, seeker rifle, javelin gun, detonator mines, plasma saws,</li>
|
19 |
-
<li><b>No ads:</b> With this mod apk,</li>
|
20 |
-
<|im <p>With this mod apk, you will not see any annoying ads or pop-ups that interrupt your gameplay. You can enjoy Dead Space without any distractions or interruptions.</p>
|
21 |
-
<h2>How to install the dead space mod apk latest version on your device?</h2>
|
22 |
-
<p>Installing the dead space mod apk latest version on your device is easy and simple. Just follow these steps:</p>
|
23 |
-
<ol>
|
24 |
-
<li>Download the dead space mod apk latest version file from a trusted source. Make sure you have enough storage space on your device.</li>
|
25 |
-
<li>Enable the installation of apps from unknown sources on your device. You can do this by going to Settings > Security > Unknown Sources and toggling it on.</li>
|
26 |
-
<li>Locate the downloaded file on your device and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to finish.</li>
|
27 |
-
<li>Launch the game and enjoy the ultimate survival horror experience.</li>
|
28 |
-
</ol>
|
29 |
-
<h2>Conclusion</h2>
|
30 |
-
<p>Dead Space is a masterpiece of survival horror that deserves to be played by every fan of the genre. The Dead Space remake is a stunning improvement that enhances every aspect of the game, making it more immersive, thrilling, and terrifying. And if you want to have even more fun and freedom, you can download the dead space mod apk latest version, which gives you unlimited resources, unlocked weapons, and no ads. So what are you waiting for? Download the dead space mod apk latest version today and prepare to face your fears.</p>
|
31 |
-
<h2>FAQs</h2>
|
32 |
-
<p>Here are some frequently asked questions about Dead Space and the dead space mod apk latest version:</p>
|
33 |
-
<h3>Q: Is Dead Space remake available for Android devices?</h3>
|
34 |
-
<p>A: Yes, Dead Space remake is available for Android devices. You can download it from the Google Play Store or from other sources. However, you need to have a compatible device that meets the minimum requirements for the game.</p>
|
35 |
-
<h3>Q: Is dead space mod apk latest version safe to use?</h3>
|
36 |
-
<p>A: Yes, dead space mod apk latest version is safe to use as long as you download it from a reliable source. However, you should always be careful when installing apps from unknown sources and scan them for viruses or malware before installing them.</p>
|
37 |
-
<p>dead space mobile game mod apk<br />
|
38 |
-
dead space android mod apk download<br />
|
39 |
-
dead space vita port mod apk<br />
|
40 |
-
dead space unlimited credits mod apk<br />
|
41 |
-
dead space mod apk offline<br />
|
42 |
-
dead space mod apk no ads<br />
|
43 |
-
dead space mod apk unlimited ammo<br />
|
44 |
-
dead space mod apk revdl<br />
|
45 |
-
dead space mod apk rexdl<br />
|
46 |
-
dead space mod apk obb<br />
|
47 |
-
dead space mod apk data<br />
|
48 |
-
dead space mod apk filehippo<br />
|
49 |
-
dead space mod apk android 1<br />
|
50 |
-
dead space mod apk android 2<br />
|
51 |
-
dead space mod apk android 3<br />
|
52 |
-
dead space mod apk android 4<br />
|
53 |
-
dead space mod apk android 5<br />
|
54 |
-
dead space mod apk android 6<br />
|
55 |
-
dead space mod apk android 7<br />
|
56 |
-
dead space mod apk android 8<br />
|
57 |
-
dead space mod apk android 9<br />
|
58 |
-
dead space mod apk android 10<br />
|
59 |
-
dead space mod apk android 11<br />
|
60 |
-
dead space mod apk for pc<br />
|
61 |
-
dead space mod apk for ios<br />
|
62 |
-
dead space mod apk for psp<br />
|
63 |
-
dead space mod apk for ps vita<br />
|
64 |
-
dead space mod apk for xperia play<br />
|
65 |
-
dead space mod apk for armv6 devices<br />
|
66 |
-
dead space mod apk for armv7 devices<br />
|
67 |
-
dead space mod apk for arm64 devices<br />
|
68 |
-
dead space mod apk for x86 devices<br />
|
69 |
-
dead space survival horror game mod apk<br />
|
70 |
-
dead space necromorph outbreak game mod apk<br />
|
71 |
-
dead space spin-off series game mod apk<br />
|
72 |
-
dead space ea games mobile game mod apk <br />
|
73 |
-
download latest version of dead space mod apk <br />
|
74 |
-
how to install latest version of dead space mod apk <br />
|
75 |
-
how to update latest version of dead space mod apk <br />
|
76 |
-
how to play latest version of dead space mod apk <br />
|
77 |
-
how to run latest version of dead space mod apk <br />
|
78 |
-
how to get latest version of dead space mod apk <br />
|
79 |
-
how to download latest version of dead space mod apk <br />
|
80 |
-
where to download latest version of dead space mod apk <br />
|
81 |
-
best site to download latest version of dead space mod apk <br />
|
82 |
-
free download latest version of dead space mod apk <br />
|
83 |
-
full download latest version of dead space mod apk <br />
|
84 |
-
cracked download latest version of dead space mod apk <br />
|
85 |
-
hacked download latest version of dead space mod apk <br />
|
86 |
-
patched download latest version of dead space mod apk </p>
|
87 |
-
<h3>Q: Can I play Dead Space offline?</h3>
|
88 |
-
<p>A: Yes, you can play Dead Space offline without any internet connection. However, you may need to connect to the internet once in a while to verify your license or update the game.</p>
|
89 |
-
<h3>Q: How long is Dead Space?</h3>
|
90 |
-
<p>A: Dead Space is a fairly long game that can take you around 10 to 15 hours to complete, depending on your difficulty level and playstyle. However, there are also side missions and collectibles that can extend your gameplay time.</p>
|
91 |
-
<h3>Q: What are some tips and tricks for playing Dead Space?</h3>
|
92 |
-
<p>A: Here are some tips and tricks for playing Dead Space:</p>
|
93 |
-
<ul>
|
94 |
-
<li><b>Aim for the limbs:</b> The Necromorphs are resilient creatures that can survive headshots or body shots. The best way to kill them is to dismember their limbs using your weapons. This will also save you ammo and resources.</li>
|
95 |
-
<li><b>Use Stasis and Kinesis:</b> These are two abilities that can help you survive in Dead Space. Stasis allows you to slow down enemies or objects, giving you time to escape or attack. Kinesis allows you to grab and throw objects or enemies, which can be useful for solving puzzles or dealing damage.</li>
|
96 |
-
<li><b>Upgrade your weapons and suit:</b> You can use power nodes to upgrade your weapons and suit at workbenches scattered throughout the game. Upgrading your weapons will increase their damage, capacity, reload speed, and accuracy. Upgrading your suit will increase your health, armor, inventory space, and oxygen capacity.</li>
|
97 |
-
<li><b>Conserve your resources:</b> Dead Space is a survival horror game that does not give you a lot of resources. You have to manage your ammo, health packs, stasis packs, kinesis packs, credits, and power nodes wisely. You can find resources by exploring the environment, looting corpses, or buying them from stores.</li>
|
98 |
-
<li><b>Pay attention to the environment:</b> The environment in Dead Space is full of clues, hints, secrets, and dangers. You can use your locator to find your objective or important locations. You can also use audio logs, text logs, video logs, or graffiti to learn more about the story and lore of the game. You should also be aware of environmental hazards such as fire, electricity, vacuum, or gravity that can harm you or help you.</li>
|
99 |
-
</ul></p> 401be4b1e0<br />
|
100 |
-
<br />
|
101 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download JioTV Live APK and Never Miss Your Favourite Shows Again.md
DELETED
@@ -1,162 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Jio TV Live APK: A Complete Guide</h1>
|
3 |
-
<p>If you are looking for a way to watch live TV on your smartphone, tablet, or computer, you might want to check out Jio TV Live APK. This is an app that lets you stream over 1000+ TV channels, including 300+ HD channels, in 15+ languages, for free. You can also watch the latest TV shows, movies, sports, news, devotional, and more on this app. In this article, we will tell you everything you need to know about Jio TV Live APK, such as its features, benefits, how to download and install it, how to use it, alternatives, reviews, and FAQs.</p>
|
4 |
-
<h2>jio tv live apk</h2><br /><p><b><b>Download File</b> > <a href="https://jinyurl.com/2uNQeQ">https://jinyurl.com/2uNQeQ</a></b></p><br /><br />
|
5 |
-
<h2>What is Jio TV Live APK?</h2>
|
6 |
-
<p>Jio TV Live APK is an Android app that allows you to watch live TV on your device. It is developed by Jio Platforms Limited, a subsidiary of Reliance Industries Limited, which also offers other digital services such as Jio SIM, Jio Fiber, Jio Cinema, Jio Music, and more. Jio TV Live APK is one of the most popular entertainment apps in India, with over 100 million downloads on Google Play Store. It is also available for iOS users and web users.</p>
|
7 |
-
<h3>Features and benefits of Jio TV Live APK</h3>
|
8 |
-
<p>Some of the features and benefits of Jio TV Live APK are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>It offers over 1000+ live TV channels across various genres and languages, including entertainment, movies, sports, news, music, devotional, kids, lifestyle, infotainment, education, and more.</li>
|
11 |
-
<li>It has a rich collection of on-demand movies and shows from various platforms such as Eros Now, Hungama Play, YouTube, Hooq, Zee5, Sony Liv, Voot, Alt Balaji, Shemaroo Me, Sun NXT, etc.</li>
|
12 |
-
<li>It allows you to watch live sports events such as cricket, football, hockey, tennis, basketball, F1, WWE, UFC, Moto GP, NBA, PGA Tour, etc. with exclusive access to Jio Sports channels.</li>
|
13 |
-
<li>It lets you catch up on the latest shows and episodes that you missed with the 7 days catch-up feature.</li>
|
14 |
-
<li>It enables you to pause and play live TV channels at your convenience.</li>
|
15 |
-
<li>It lets you set reminders for your favorite shows and never miss them.</li>
|
16 |
-
<li>It allows you to record your favorite shows and watch them later.</li>
|
17 |
-
<li>It supports multiple audio and language options for different channels.</li>
|
18 |
-
<li>It lets you mark channels or programs as favorites for easy access.</li>
|
19 |
-
<li>It lets you find all the popular and trending shows in the ‘Featured’ tab.</li>
|
20 |
-
<li>It lets you browse through the top stories of the day in the ‘News’ tab.</li>
|
21 |
-
<li>It lets you listen to your favorite tunes on the go with the ‘Music’ tab.</li>
|
22 |
-
<li>It lets you connect spiritually with live darshans, poojas, aartis, and more with the ‘Jio Darshan’ tab.</li>
|
23 |
-
<li>It lets you play exciting games and have unlimited fun with the ‘Games’ tab.</li>
|
24 |
-
</ul>
|
25 |
-
<h3>How to download and install Jio TV Live APK</h3>
|
26 |
-
<h2>How to use Jio TV Live APK</h2>
|
27 |
-
<p>Once you have downloaded and installed Jio TV Live APK on your device, you can start using it to watch live TV channels and programs. Here are some steps to help you use Jio TV Live APK:</p>
|
28 |
-
<h3>How to watch live TV channels on Jio TV Live APK</h3>
|
29 |
-
<ol>
|
30 |
-
<li>Open the Jio TV Live APK app on your device and sign in with your Jio ID and password. If you don't have a Jio ID, you can create one using your Jio number or email address.</li>
|
31 |
-
<li>On the home screen, you will see different tabs such as Featured, News, Music, Games, etc. You can swipe left or right to browse through them.</li>
|
32 |
-
<li>To watch live TV channels, tap on the 'Live TV' tab at the bottom of the screen. You will see a list of channels categorized by genres and languages.</li>
|
33 |
-
<li>You can scroll up or down to find your preferred channel or use the search bar at the top to look for a specific channel or program.</li>
|
34 |
-
<li>Tap on the channel icon to start watching it live. You can also swipe left or right on the screen to switch between channels.</li>
|
35 |
-
<li>To adjust the video quality, audio language, or subtitles, tap on the settings icon at the top right corner of the screen.</li>
|
36 |
-
<li>To pause or play the live stream, tap on the screen once. You can also rewind or fast forward up to 30 seconds by tapping on the left or right edge of the screen.</li>
|
37 |
-
</ol>
|
38 |
-
<h3>How to catch up on missed shows on Jio TV Live APK</h3>
|
39 |
-
<ol>
|
40 |
-
<li>If you missed any show or episode that was aired in the last 7 days, you can watch it using the catch-up feature of Jio TV Live APK.</li>
|
41 |
-
<li>To access the catch-up feature, tap on the 'Live TV' tab at the bottom of the screen and then tap on the 'Catch Up' icon at the top left corner of the screen.</li>
|
42 |
-
<li>You will see a calendar with dates marked in blue. Tap on any date to see the list of shows and episodes that were aired on that day.</li>
|
43 |
-
<li>You can also filter the list by genre, language, or channel using the icons at the top of the screen.</li>
|
44 |
-
<li>Tap on any show or episode to start watching it. You can also pause, play, rewind, or fast forward as you wish.</li>
|
45 |
-
</ol>
|
46 |
-
<h3>How to record and watch later on Jio TV Live APK</h3>
|
47 |
-
<ol>
|
48 |
-
<li>If you want to record any show or episode and watch it later, you can use the record feature of Jio TV Live APK.</li>
|
49 |
-
<li>To access the record feature, tap on the 'Live TV' tab at the bottom of the screen and then tap on any channel icon.</li>
|
50 |
-
<li>You will see a list of programs that are currently airing or scheduled to air on that channel.</li>
|
51 |
-
<li>Tap on any program that you want to record and then tap on the 'Record' icon at the bottom of the screen.</li>
|
52 |
-
<li>You can choose to record only that program or all episodes of that program. You can also set a reminder for that program if you want.</li>
|
53 |
-
<li>The recorded program will be saved in your device storage and you can watch it later by tapping on the 'Recordings' tab at the bottom of the screen.</li>
|
54 |
-
<li>You can also delete any recording by tapping on it and then tapping on the 'Delete' icon at the bottom of the screen.</li>
|
55 |
-
</ol>
|
56 |
-
<h3>How to set reminders and favorites on Jio TV Live APK</h3>
|
57 |
-
<ol>
|
58 |
-
<li>If you want to get notified about any upcoming show or episode that you don't want to miss, you can set a reminder for it using Jio TV Live APK.</li>
|
59 |
-
<li>To set a reminder, tap on any program that is scheduled to air in future and then tap on the 'Reminder' icon at the bottom of the screen.</li>
|
60 |
-
<li>You will get a notification when that program is about to start. You can also cancel any reminder by tapping on it and then tapping on the 'Cancel' icon at the bottom of the screen.</li>
|
61 |
-
<li>If you want to mark any channel or program as your favorite for easy access, you can do so using Jio TV Live APK.</li>
|
62 |
-
<li>To mark a channel as your favorite, tap and hold on any channel icon until a star appears on it. To unmark it, tap and hold again until the star disappears.</li>
|
63 |
-
<li>To mark a <p>program as your favorite, tap on any program and then tap on the 'Favorite' icon at the bottom of the screen. To unmark it, tap on it again and then tap on the 'Unfavorite' icon at the bottom of the screen.</li>
|
64 |
-
<li>You can access your favorite channels and programs by tapping on the 'Favorites' tab at the bottom of the screen.</li>
|
65 |
-
</ol>
|
66 |
-
<h2>Alternatives to Jio TV Live APK</h2>
|
67 |
-
<p>While Jio TV Live APK is a great app for watching live TV on your device, it is not the only one. There are some other apps that offer similar or better features and services. Here are some of the alternatives to Jio TV Live APK that you can try:</p>
|
68 |
-
<h3>Airtel Xstream TV</h3>
|
69 |
-
<p>Airtel Xstream TV is an app that lets you watch live TV, movies, shows, and more on your device. It is offered by Airtel, one of the leading telecom operators in India. It has over 400+ live TV channels, 10000+ movies and shows, and exclusive content from platforms such as Zee5, Eros Now, Hooq, Hungama Play, etc. It also has a dedicated kids section with educational and fun content. You can also download and watch offline any content that you like. Airtel Xstream TV is free for Airtel users and requires a subscription for non-Airtel users.</p>
|
70 |
-
<h3>Disney+ Hotstar</h3>
|
71 |
-
<p>Disney+ Hotstar is an app that lets you watch live TV, movies, shows, sports, news, and more on your device. It is offered by Star India, a subsidiary of The Walt Disney Company. It has over 300+ live TV channels, 100000+ hours of content, and exclusive access to Disney+ originals, Marvel movies, Star Wars series, Pixar animations, etc. It also has a huge collection of Indian movies and shows in various languages. You can also watch live sports events such as IPL, ICC Cricket World Cup, Premier League, Formula 1, etc. Disney+ Hotstar requires a subscription to access its premium content.</p>
|
72 |
-
<h3>Vodafone Play</h3>
|
73 |
-
<p>Vodafone Play is an app that lets you watch live TV, movies, shows, and more on your device. It is offered by Vodafone Idea Limited, another leading telecom operator in India. It has over 450+ live TV channels, 15000+ movies and shows, and exclusive content from platforms such as Zee5, Sony Liv, Lionsgate Play, Shemaroo Me, Sun NXT, etc. It also has a curated section for kids with educational and entertaining content. You can also download and watch offline any content that you like. Vodafone Play is free for Vodafone Idea users and requires a subscription for non-Vodafone Idea users.</p>
|
74 |
-
<h2>Reviews of Jio TV Live APK</h2>
|
75 |
-
<p>Jio TV Live APK has received mixed reviews from its users. Some users have praised its features, quality, variety, and user interface. Some users have complained about its bugs, errors, <p>Some of the user reviews of Jio TV Live APK are:</p>
|
76 |
-
<p>jio tv live apk download<br />
|
77 |
-
jio tv live apk mod<br />
|
78 |
-
jio tv live apk latest version<br />
|
79 |
-
jio tv live apk for android tv<br />
|
80 |
-
jio tv live apk for pc<br />
|
81 |
-
jio tv live apk for firestick<br />
|
82 |
-
jio tv live apk free download<br />
|
83 |
-
jio tv live apk mirror<br />
|
84 |
-
jio tv live apk old version<br />
|
85 |
-
jio tv live apk without jio sim<br />
|
86 |
-
jio tv live apk 2023<br />
|
87 |
-
jio tv live apk hack<br />
|
88 |
-
jio tv live apk cracked<br />
|
89 |
-
jio tv live apk update<br />
|
90 |
-
jio tv live apk for smart tv<br />
|
91 |
-
jio tv live apk for mi tv<br />
|
92 |
-
jio tv live apk for laptop<br />
|
93 |
-
jio tv live apk for ios<br />
|
94 |
-
jio tv live apk premium<br />
|
95 |
-
jio tv live apk pro<br />
|
96 |
-
jio tv live apk online<br />
|
97 |
-
jio tv live apk install<br />
|
98 |
-
jio tv live apk file<br />
|
99 |
-
jio tv live apk pure<br />
|
100 |
-
jio tv live apk app<br />
|
101 |
-
jio tv live sports movies shows apk<br />
|
102 |
-
jio tv live cricket hd apk<br />
|
103 |
-
jio tv live news channels free online streaming apk<br />
|
104 |
-
jio cinema and tv live sports movies shows modded unlocked cracked premium no ads adfree latest version updated new 2023 working free download android app apk<br />
|
105 |
-
how to watch jio tv live on android phone without app using browser chrome firefox opera uc mini apkpure apkmirror apkpure.com apkmirror.com website link url online streaming hd quality video resolution 1080p 720p 480p 360p 240p 144p tutorial guide step by step instructions tips tricks hacks cheats modded unlocked cracked premium no ads adfree latest version updated new 2023 working free download android app apk<br />
|
106 |
-
how to install and use jio tv live on pc windows mac linux laptop desktop computer using emulator software program application bluestacks nox player memu ldplayer gameloop tencent gaming buddy smartgaga koplayer andy droid4x genymotion msi app player phoenix os prime os remix os bliss os open thop os android x86 project x86 x64 bit 32 bit 64 bit tutorial guide step by step instructions tips tricks hacks cheats modded unlocked cracked premium no ads adfree latest version updated new 2023 working free download android app apk<br />
|
107 |
-
how to install and use jio tv live on smart tv android tv mi tv samsung lg sony tcl vu panasonic philips onida micromax videocon haier kodak bpl sansui hisense hitachi toshiba sharp akai mitashi lloyd intex cloudwalker iffalcon realme oneplus tcl ifalcon nokia motorola zebronics coocaa skyworth sanyo nobel skiodo weston wybor marq flipkart amazon firestick fire stick fire cube fire hd fire tablet echo dot echo show echo spot echo studio echo auto echo buds echo frames echo loop alexa voice remote alexa app alexa skills alexa devices tutorial guide step by step instructions tips tricks hacks cheats modded unlocked cracked premium no ads adfree latest version updated new 2023 working free download android app apk<br />
|
108 |
-
how to watch and record jio tv live channels shows movies sports cricket football tennis news music devotional lifestyle infotainment kids education darshan spiritual religious regional hindi english tamil telugu kannada malayalam marathi gujarati bengali punjabi urdu oriya assamese nepali bhojpuri sindhi kashmiri dogri maithili santhali konkani tulu awadhi braj bhasha magahi chhattisgarhi haryanvi rajasthani bundeli bagheli pahari garhwali kumaoni ladakhi kashmiri dardic balti zangskari purik shina kohistani kalasha khowar wakhi burushaski yidgha domaki torwali gowro indus kohistani chilisso dameli palula phalura sawi kalami gawri bateri dhatki memoni bhil bhili gamit vasavi ahirani khandeshi dhodia dubli ravalia padharia wagdi bhagoria bhilala pawari ratadi bhavra bhagoria patelia patali rathawi rathodi rathwi rathawa rathor rathor rathor rathor rathor rathor rathor rathor rathor rathor rathor rathor rathor rathor rathor rathor rathor rathor rathor rathor in hd quality video</p>
|
109 |
-
<h3>Pros and cons of Jio TV Live APK</h3>
|
110 |
-
<p>According to the user ratings and feedback on Google Play Store, some of the pros and cons of Jio TV Live APK are:</p>
|
111 |
-
<table>
|
112 |
-
<tr>
|
113 |
-
<th>Pros</th>
|
114 |
-
<th>Cons</th>
|
115 |
-
</tr>
|
116 |
-
<tr>
|
117 |
-
<td>It has a large variety of channels and content in different languages and genres.</td>
|
118 |
-
<td>It sometimes crashes or freezes while streaming.</td>
|
119 |
-
</tr>
|
120 |
-
<tr>
|
121 |
-
<td>It has a user-friendly interface and easy navigation.</td>
|
122 |
-
<td>It consumes a lot of data and battery while streaming.</td>
|
123 |
-
</tr>
|
124 |
-
<tr>
|
125 |
-
<td>It has a catch-up, record, and reminder feature for convenience.</td>
|
126 |
-
<td>It does not support casting or mirroring to other devices.</td>
|
127 |
-
</tr>
|
128 |
-
<tr>
|
129 |
-
<td>It has exclusive access to Jio Sports channels and events.</td>
|
130 |
-
<td>It requires a Jio SIM or ID to access the app.</td>
|
131 |
-
</tr>
|
132 |
-
<tr>
|
133 |
-
<td>It is free to download and use for Jio users.</td>
|
134 |
-
<td>It has ads that can be annoying or intrusive.</td>
|
135 |
-
</tr>
|
136 |
-
</table>
|
137 |
-
<h3>User ratings and feedback of Jio TV Live APK</h3>
|
138 |
-
<p>Here are some of the user ratings and feedback of Jio TV Live APK from Google Play Store:</p>
|
139 |
-
<ul>
|
140 |
-
<li>"I love this app. It has all the channels that I want to watch. The quality is good and the catch-up feature is very useful. I can watch my favorite shows anytime, anywhere. Thank you Jio TV." - 5 stars</li>
|
141 |
-
<li>"This app is good but it needs improvement. It often stops working or shows error messages. It also consumes a lot of data and battery. Please fix these issues and make it more stable." - 3 stars</li>
|
142 |
-
<li>"This app is very bad. It does not work properly on my device. It always buffers or shows a black screen. It also does not have some of the channels that I like. It is a waste of time and space." - 1 star</li>
|
143 |
-
<li>"This app is amazing. It has a lot of channels and content in different languages and genres. The interface is simple and easy to use. The record and reminder feature is very helpful. I enjoy watching live sports on this app." - 4 stars</li>
|
144 |
-
<li>"This app is okay but it could be better. It does not support casting or mirroring to other devices. It also has ads that are annoying or intrusive. It should have more options for video quality, audio language, and subtitles." - 2 stars</li>
|
145 |
-
</ul>
|
146 |
-
<h2>Conclusion</h2>
|
147 |
-
<p>Jio TV Live APK is an app that lets you watch live TV on your device. It has over 1000+ live TV channels, 10000+ movies and shows, and exclusive content from various platforms. It also has features such as catch-up, record, reminder, favorites, etc. It is free for Jio users and requires a subscription for non-Jio users. It has received mixed reviews from its users, with some praising its features, quality, variety, and user interface, and some complaining about its bugs, errors, data consumption, battery drain, casting support, ads, etc. Overall, it is a good app for watching live TV on your device, but it needs improvement in some areas.</p>
|
148 |
-
<h2>FAQs</h2>
|
149 |
-
<ol>
|
150 |
-
<li>What are the requirements to use Jio TV Live APK?</li>
|
151 |
-
<p>To use Jio TV Live APK, you need an Android device with Android 4.4 or above, a Jio SIM or ID, and an internet connection.</p>
|
152 |
-
<li>How can I watch Jio TV Live APK on my PC or laptop?</li>
|
153 |
-
<p>To watch Jio TV Live APK on your PC or laptop, you need to visit the official website of Jio TV and sign in with your Jio ID and password. You can also use an Android emulator such as BlueStacks or Nox Player to run the app on your PC or laptop.</p>
|
154 |
-
<li>How can I watch Jio TV Live APK on my smart TV?</li>
|
155 |
-
<p>To watch Jio TV Live APK on your smart TV, you need to connect your device with the app to your smart TV using a Chromecast device or a similar device that supports screen mirroring or casting. You can also use an HDMI cable to connect your device with the app to your smart TV.</p>
|
156 |
-
<li>How can I update Jio TV Live APK?</li>
|
157 |
-
<p>To update Jio TV Live APK, you need to visit the Google Play Store or the official website of Jio TV <p>and download the latest version of the app. You can also enable the auto-update option in the settings of the app to get the updates automatically.</p>
|
158 |
-
<li>How can I contact the customer support of Jio TV Live APK?</li>
|
159 |
-
<p>To contact the customer support of Jio TV Live APK, you can call the toll-free number 1800-889-9999 or email at [email protected]. You can also visit the help and support section of the app or the website to get answers to your queries.</p>
|
160 |
-
</ol></p> 401be4b1e0<br />
|
161 |
-
<br />
|
162 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/configs/ms1mv3_r50.py
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
from easydict import EasyDict as edict
|
2 |
-
|
3 |
-
# make training faster
|
4 |
-
# our RAM is 256G
|
5 |
-
# mount -t tmpfs -o size=140G tmpfs /train_tmp
|
6 |
-
|
7 |
-
config = edict()
|
8 |
-
config.loss = "arcface"
|
9 |
-
config.network = "r50"
|
10 |
-
config.resume = False
|
11 |
-
config.output = None
|
12 |
-
config.embedding_size = 512
|
13 |
-
config.sample_rate = 1.0
|
14 |
-
config.fp16 = True
|
15 |
-
config.momentum = 0.9
|
16 |
-
config.weight_decay = 5e-4
|
17 |
-
config.batch_size = 128
|
18 |
-
config.lr = 0.1 # batch size is 512
|
19 |
-
|
20 |
-
config.rec = "/train_tmp/ms1m-retinaface-t1"
|
21 |
-
config.num_classes = 93431
|
22 |
-
config.num_image = 5179510
|
23 |
-
config.num_epoch = 25
|
24 |
-
config.warmup_epoch = -1
|
25 |
-
config.decay_epoch = [10, 16, 22]
|
26 |
-
config.val_targets = ["lfw", "cfp_fp", "agedb_30"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/StyleGANEX/models/__init__.py
DELETED
File without changes
|
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/distributions/__init__.py
DELETED
File without changes
|
spaces/AISuperheroes/05GR-Image-To-Multilingual-OCR/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: 05GR Image To Multilingual OCR
|
3 |
-
emoji: 🐢
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.6
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIZero2HeroBootcamp/MultiPDF-QA-ChatGPT-Langchain/htmlTemplates.py
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
css = '''
|
2 |
-
<style>
|
3 |
-
.chat-message {
|
4 |
-
padding: 1.5rem; border-radius: 0.5rem; margin-bottom: 1rem; display: flex
|
5 |
-
}
|
6 |
-
.chat-message.user {
|
7 |
-
background-color: #2b313e
|
8 |
-
}
|
9 |
-
.chat-message.bot {
|
10 |
-
background-color: #475063
|
11 |
-
}
|
12 |
-
.chat-message .avatar {
|
13 |
-
width: 20%;
|
14 |
-
}
|
15 |
-
.chat-message .avatar img {
|
16 |
-
max-width: 78px;
|
17 |
-
max-height: 78px;
|
18 |
-
border-radius: 50%;
|
19 |
-
object-fit: cover;
|
20 |
-
}
|
21 |
-
.chat-message .message {
|
22 |
-
width: 80%;
|
23 |
-
padding: 0 1.5rem;
|
24 |
-
color: #fff;
|
25 |
-
}
|
26 |
-
'''
|
27 |
-
|
28 |
-
bot_template = '''
|
29 |
-
<div class="chat-message bot">
|
30 |
-
<div class="avatar">
|
31 |
-
<img src="https://i.ibb.co/cN0nmSj/Screenshot-2023-05-28-at-02-37-21.png" style="max-height: 78px; max-width: 78px; border-radius: 50%; object-fit: cover;">
|
32 |
-
</div>
|
33 |
-
<div class="message">{{MSG}}</div>
|
34 |
-
</div>
|
35 |
-
'''
|
36 |
-
|
37 |
-
user_template = '''
|
38 |
-
<div class="chat-message user">
|
39 |
-
<div class="avatar">
|
40 |
-
<img src="https://i.ibb.co/rdZC7LZ/Photo-logo-1.png">
|
41 |
-
</div>
|
42 |
-
<div class="message">{{MSG}}</div>
|
43 |
-
</div>
|
44 |
-
'''
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ASJMO/freegpt/client/css/message-input.css
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
#message-input {
|
2 |
-
margin-right: 30px;
|
3 |
-
height: 64px;
|
4 |
-
}
|
5 |
-
|
6 |
-
#message-input::-webkit-scrollbar {
|
7 |
-
width: 5px;
|
8 |
-
}
|
9 |
-
|
10 |
-
#message-input::-webkit-scrollbar-track {
|
11 |
-
background: #f1f1f1;
|
12 |
-
}
|
13 |
-
|
14 |
-
#message-input::-webkit-scrollbar-thumb {
|
15 |
-
background: #c7a2ff;
|
16 |
-
}
|
17 |
-
|
18 |
-
#message-input::-webkit-scrollbar-thumb:hover {
|
19 |
-
background: #8b3dff;
|
20 |
-
}
|
21 |
-
|
22 |
-
@media screen and (max-width: 360px) {
|
23 |
-
#message-input {
|
24 |
-
margin: 0;
|
25 |
-
}
|
26 |
-
}
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/pages/methods/SwapPage.js
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
import Container from '../../container/Container.js';
|
2 |
-
|
3 |
-
const ContainerSetChildVisible = Container.prototype.setChildVisible;
|
4 |
-
|
5 |
-
var SwapPage = function (key, fadeInDuration) {
|
6 |
-
this._previousKey = this._currentKey;
|
7 |
-
var prevoiusPage = this.previousPage;
|
8 |
-
if (prevoiusPage) {
|
9 |
-
if (this.swapMode === 0) { // Invisible
|
10 |
-
ContainerSetChildVisible.call(this, prevoiusPage, false);
|
11 |
-
this.emit('pageinvisible', prevoiusPage, this._previousKey, this);
|
12 |
-
} else { // Destroy
|
13 |
-
prevoiusPage.destroy();
|
14 |
-
}
|
15 |
-
}
|
16 |
-
|
17 |
-
if (key && !this.sizerChildren.hasOwnProperty(key)) {
|
18 |
-
this.emit('createpage', key, this);
|
19 |
-
}
|
20 |
-
|
21 |
-
this._currentKey = key;
|
22 |
-
var currentPage = this.currentPage;
|
23 |
-
if (currentPage) {
|
24 |
-
ContainerSetChildVisible.call(this, currentPage, true);
|
25 |
-
this.emit('pagevisible', currentPage, this._currentKey, this);
|
26 |
-
|
27 |
-
if (fadeInDuration === undefined) {
|
28 |
-
fadeInDuration = this.fadeInDuration;
|
29 |
-
}
|
30 |
-
|
31 |
-
if (fadeInDuration > 0) {
|
32 |
-
currentPage.setAlpha(0).fadeIn(fadeInDuration, 1);
|
33 |
-
}
|
34 |
-
}
|
35 |
-
return this;
|
36 |
-
}
|
37 |
-
|
38 |
-
export default SwapPage;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AiMimicry/sovits-models/modules/attentions.py
DELETED
@@ -1,349 +0,0 @@
|
|
1 |
-
import copy
|
2 |
-
import math
|
3 |
-
import numpy as np
|
4 |
-
import torch
|
5 |
-
from torch import nn
|
6 |
-
from torch.nn import functional as F
|
7 |
-
|
8 |
-
import modules.commons as commons
|
9 |
-
import modules.modules as modules
|
10 |
-
from modules.modules import LayerNorm
|
11 |
-
|
12 |
-
|
13 |
-
class FFT(nn.Module):
|
14 |
-
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers=1, kernel_size=1, p_dropout=0.,
|
15 |
-
proximal_bias=False, proximal_init=True, **kwargs):
|
16 |
-
super().__init__()
|
17 |
-
self.hidden_channels = hidden_channels
|
18 |
-
self.filter_channels = filter_channels
|
19 |
-
self.n_heads = n_heads
|
20 |
-
self.n_layers = n_layers
|
21 |
-
self.kernel_size = kernel_size
|
22 |
-
self.p_dropout = p_dropout
|
23 |
-
self.proximal_bias = proximal_bias
|
24 |
-
self.proximal_init = proximal_init
|
25 |
-
|
26 |
-
self.drop = nn.Dropout(p_dropout)
|
27 |
-
self.self_attn_layers = nn.ModuleList()
|
28 |
-
self.norm_layers_0 = nn.ModuleList()
|
29 |
-
self.ffn_layers = nn.ModuleList()
|
30 |
-
self.norm_layers_1 = nn.ModuleList()
|
31 |
-
for i in range(self.n_layers):
|
32 |
-
self.self_attn_layers.append(
|
33 |
-
MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias,
|
34 |
-
proximal_init=proximal_init))
|
35 |
-
self.norm_layers_0.append(LayerNorm(hidden_channels))
|
36 |
-
self.ffn_layers.append(
|
37 |
-
FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
|
38 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
39 |
-
|
40 |
-
def forward(self, x, x_mask):
|
41 |
-
"""
|
42 |
-
x: decoder input
|
43 |
-
h: encoder output
|
44 |
-
"""
|
45 |
-
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
|
46 |
-
x = x * x_mask
|
47 |
-
for i in range(self.n_layers):
|
48 |
-
y = self.self_attn_layers[i](x, x, self_attn_mask)
|
49 |
-
y = self.drop(y)
|
50 |
-
x = self.norm_layers_0[i](x + y)
|
51 |
-
|
52 |
-
y = self.ffn_layers[i](x, x_mask)
|
53 |
-
y = self.drop(y)
|
54 |
-
x = self.norm_layers_1[i](x + y)
|
55 |
-
x = x * x_mask
|
56 |
-
return x
|
57 |
-
|
58 |
-
|
59 |
-
class Encoder(nn.Module):
|
60 |
-
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
|
61 |
-
super().__init__()
|
62 |
-
self.hidden_channels = hidden_channels
|
63 |
-
self.filter_channels = filter_channels
|
64 |
-
self.n_heads = n_heads
|
65 |
-
self.n_layers = n_layers
|
66 |
-
self.kernel_size = kernel_size
|
67 |
-
self.p_dropout = p_dropout
|
68 |
-
self.window_size = window_size
|
69 |
-
|
70 |
-
self.drop = nn.Dropout(p_dropout)
|
71 |
-
self.attn_layers = nn.ModuleList()
|
72 |
-
self.norm_layers_1 = nn.ModuleList()
|
73 |
-
self.ffn_layers = nn.ModuleList()
|
74 |
-
self.norm_layers_2 = nn.ModuleList()
|
75 |
-
for i in range(self.n_layers):
|
76 |
-
self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
|
77 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
78 |
-
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
|
79 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
80 |
-
|
81 |
-
def forward(self, x, x_mask):
|
82 |
-
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
83 |
-
x = x * x_mask
|
84 |
-
for i in range(self.n_layers):
|
85 |
-
y = self.attn_layers[i](x, x, attn_mask)
|
86 |
-
y = self.drop(y)
|
87 |
-
x = self.norm_layers_1[i](x + y)
|
88 |
-
|
89 |
-
y = self.ffn_layers[i](x, x_mask)
|
90 |
-
y = self.drop(y)
|
91 |
-
x = self.norm_layers_2[i](x + y)
|
92 |
-
x = x * x_mask
|
93 |
-
return x
|
94 |
-
|
95 |
-
|
96 |
-
class Decoder(nn.Module):
|
97 |
-
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
|
98 |
-
super().__init__()
|
99 |
-
self.hidden_channels = hidden_channels
|
100 |
-
self.filter_channels = filter_channels
|
101 |
-
self.n_heads = n_heads
|
102 |
-
self.n_layers = n_layers
|
103 |
-
self.kernel_size = kernel_size
|
104 |
-
self.p_dropout = p_dropout
|
105 |
-
self.proximal_bias = proximal_bias
|
106 |
-
self.proximal_init = proximal_init
|
107 |
-
|
108 |
-
self.drop = nn.Dropout(p_dropout)
|
109 |
-
self.self_attn_layers = nn.ModuleList()
|
110 |
-
self.norm_layers_0 = nn.ModuleList()
|
111 |
-
self.encdec_attn_layers = nn.ModuleList()
|
112 |
-
self.norm_layers_1 = nn.ModuleList()
|
113 |
-
self.ffn_layers = nn.ModuleList()
|
114 |
-
self.norm_layers_2 = nn.ModuleList()
|
115 |
-
for i in range(self.n_layers):
|
116 |
-
self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
|
117 |
-
self.norm_layers_0.append(LayerNorm(hidden_channels))
|
118 |
-
self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
|
119 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
120 |
-
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
|
121 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
122 |
-
|
123 |
-
def forward(self, x, x_mask, h, h_mask):
|
124 |
-
"""
|
125 |
-
x: decoder input
|
126 |
-
h: encoder output
|
127 |
-
"""
|
128 |
-
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
|
129 |
-
encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
130 |
-
x = x * x_mask
|
131 |
-
for i in range(self.n_layers):
|
132 |
-
y = self.self_attn_layers[i](x, x, self_attn_mask)
|
133 |
-
y = self.drop(y)
|
134 |
-
x = self.norm_layers_0[i](x + y)
|
135 |
-
|
136 |
-
y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
|
137 |
-
y = self.drop(y)
|
138 |
-
x = self.norm_layers_1[i](x + y)
|
139 |
-
|
140 |
-
y = self.ffn_layers[i](x, x_mask)
|
141 |
-
y = self.drop(y)
|
142 |
-
x = self.norm_layers_2[i](x + y)
|
143 |
-
x = x * x_mask
|
144 |
-
return x
|
145 |
-
|
146 |
-
|
147 |
-
class MultiHeadAttention(nn.Module):
|
148 |
-
def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
|
149 |
-
super().__init__()
|
150 |
-
assert channels % n_heads == 0
|
151 |
-
|
152 |
-
self.channels = channels
|
153 |
-
self.out_channels = out_channels
|
154 |
-
self.n_heads = n_heads
|
155 |
-
self.p_dropout = p_dropout
|
156 |
-
self.window_size = window_size
|
157 |
-
self.heads_share = heads_share
|
158 |
-
self.block_length = block_length
|
159 |
-
self.proximal_bias = proximal_bias
|
160 |
-
self.proximal_init = proximal_init
|
161 |
-
self.attn = None
|
162 |
-
|
163 |
-
self.k_channels = channels // n_heads
|
164 |
-
self.conv_q = nn.Conv1d(channels, channels, 1)
|
165 |
-
self.conv_k = nn.Conv1d(channels, channels, 1)
|
166 |
-
self.conv_v = nn.Conv1d(channels, channels, 1)
|
167 |
-
self.conv_o = nn.Conv1d(channels, out_channels, 1)
|
168 |
-
self.drop = nn.Dropout(p_dropout)
|
169 |
-
|
170 |
-
if window_size is not None:
|
171 |
-
n_heads_rel = 1 if heads_share else n_heads
|
172 |
-
rel_stddev = self.k_channels**-0.5
|
173 |
-
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
174 |
-
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
175 |
-
|
176 |
-
nn.init.xavier_uniform_(self.conv_q.weight)
|
177 |
-
nn.init.xavier_uniform_(self.conv_k.weight)
|
178 |
-
nn.init.xavier_uniform_(self.conv_v.weight)
|
179 |
-
if proximal_init:
|
180 |
-
with torch.no_grad():
|
181 |
-
self.conv_k.weight.copy_(self.conv_q.weight)
|
182 |
-
self.conv_k.bias.copy_(self.conv_q.bias)
|
183 |
-
|
184 |
-
def forward(self, x, c, attn_mask=None):
|
185 |
-
q = self.conv_q(x)
|
186 |
-
k = self.conv_k(c)
|
187 |
-
v = self.conv_v(c)
|
188 |
-
|
189 |
-
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
190 |
-
|
191 |
-
x = self.conv_o(x)
|
192 |
-
return x
|
193 |
-
|
194 |
-
def attention(self, query, key, value, mask=None):
|
195 |
-
# reshape [b, d, t] -> [b, n_h, t, d_k]
|
196 |
-
b, d, t_s, t_t = (*key.size(), query.size(2))
|
197 |
-
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
|
198 |
-
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
199 |
-
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
200 |
-
|
201 |
-
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
|
202 |
-
if self.window_size is not None:
|
203 |
-
assert t_s == t_t, "Relative attention is only available for self-attention."
|
204 |
-
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
|
205 |
-
rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
|
206 |
-
scores_local = self._relative_position_to_absolute_position(rel_logits)
|
207 |
-
scores = scores + scores_local
|
208 |
-
if self.proximal_bias:
|
209 |
-
assert t_s == t_t, "Proximal bias is only available for self-attention."
|
210 |
-
scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
|
211 |
-
if mask is not None:
|
212 |
-
scores = scores.masked_fill(mask == 0, -1e4)
|
213 |
-
if self.block_length is not None:
|
214 |
-
assert t_s == t_t, "Local attention is only available for self-attention."
|
215 |
-
block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
|
216 |
-
scores = scores.masked_fill(block_mask == 0, -1e4)
|
217 |
-
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
|
218 |
-
p_attn = self.drop(p_attn)
|
219 |
-
output = torch.matmul(p_attn, value)
|
220 |
-
if self.window_size is not None:
|
221 |
-
relative_weights = self._absolute_position_to_relative_position(p_attn)
|
222 |
-
value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
|
223 |
-
output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
|
224 |
-
output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
|
225 |
-
return output, p_attn
|
226 |
-
|
227 |
-
def _matmul_with_relative_values(self, x, y):
|
228 |
-
"""
|
229 |
-
x: [b, h, l, m]
|
230 |
-
y: [h or 1, m, d]
|
231 |
-
ret: [b, h, l, d]
|
232 |
-
"""
|
233 |
-
ret = torch.matmul(x, y.unsqueeze(0))
|
234 |
-
return ret
|
235 |
-
|
236 |
-
def _matmul_with_relative_keys(self, x, y):
|
237 |
-
"""
|
238 |
-
x: [b, h, l, d]
|
239 |
-
y: [h or 1, m, d]
|
240 |
-
ret: [b, h, l, m]
|
241 |
-
"""
|
242 |
-
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
|
243 |
-
return ret
|
244 |
-
|
245 |
-
def _get_relative_embeddings(self, relative_embeddings, length):
|
246 |
-
max_relative_position = 2 * self.window_size + 1
|
247 |
-
# Pad first before slice to avoid using cond ops.
|
248 |
-
pad_length = max(length - (self.window_size + 1), 0)
|
249 |
-
slice_start_position = max((self.window_size + 1) - length, 0)
|
250 |
-
slice_end_position = slice_start_position + 2 * length - 1
|
251 |
-
if pad_length > 0:
|
252 |
-
padded_relative_embeddings = F.pad(
|
253 |
-
relative_embeddings,
|
254 |
-
commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
|
255 |
-
else:
|
256 |
-
padded_relative_embeddings = relative_embeddings
|
257 |
-
used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
|
258 |
-
return used_relative_embeddings
|
259 |
-
|
260 |
-
def _relative_position_to_absolute_position(self, x):
|
261 |
-
"""
|
262 |
-
x: [b, h, l, 2*l-1]
|
263 |
-
ret: [b, h, l, l]
|
264 |
-
"""
|
265 |
-
batch, heads, length, _ = x.size()
|
266 |
-
# Concat columns of pad to shift from relative to absolute indexing.
|
267 |
-
x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
|
268 |
-
|
269 |
-
# Concat extra elements so to add up to shape (len+1, 2*len-1).
|
270 |
-
x_flat = x.view([batch, heads, length * 2 * length])
|
271 |
-
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
|
272 |
-
|
273 |
-
# Reshape and slice out the padded elements.
|
274 |
-
x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
|
275 |
-
return x_final
|
276 |
-
|
277 |
-
def _absolute_position_to_relative_position(self, x):
|
278 |
-
"""
|
279 |
-
x: [b, h, l, l]
|
280 |
-
ret: [b, h, l, 2*l-1]
|
281 |
-
"""
|
282 |
-
batch, heads, length, _ = x.size()
|
283 |
-
# padd along column
|
284 |
-
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
|
285 |
-
x_flat = x.view([batch, heads, length**2 + length*(length -1)])
|
286 |
-
# add 0's in the beginning that will skew the elements after reshape
|
287 |
-
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
|
288 |
-
x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
|
289 |
-
return x_final
|
290 |
-
|
291 |
-
def _attention_bias_proximal(self, length):
|
292 |
-
"""Bias for self-attention to encourage attention to close positions.
|
293 |
-
Args:
|
294 |
-
length: an integer scalar.
|
295 |
-
Returns:
|
296 |
-
a Tensor with shape [1, 1, length, length]
|
297 |
-
"""
|
298 |
-
r = torch.arange(length, dtype=torch.float32)
|
299 |
-
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
|
300 |
-
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
|
301 |
-
|
302 |
-
|
303 |
-
class FFN(nn.Module):
|
304 |
-
def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
|
305 |
-
super().__init__()
|
306 |
-
self.in_channels = in_channels
|
307 |
-
self.out_channels = out_channels
|
308 |
-
self.filter_channels = filter_channels
|
309 |
-
self.kernel_size = kernel_size
|
310 |
-
self.p_dropout = p_dropout
|
311 |
-
self.activation = activation
|
312 |
-
self.causal = causal
|
313 |
-
|
314 |
-
if causal:
|
315 |
-
self.padding = self._causal_padding
|
316 |
-
else:
|
317 |
-
self.padding = self._same_padding
|
318 |
-
|
319 |
-
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
|
320 |
-
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
|
321 |
-
self.drop = nn.Dropout(p_dropout)
|
322 |
-
|
323 |
-
def forward(self, x, x_mask):
|
324 |
-
x = self.conv_1(self.padding(x * x_mask))
|
325 |
-
if self.activation == "gelu":
|
326 |
-
x = x * torch.sigmoid(1.702 * x)
|
327 |
-
else:
|
328 |
-
x = torch.relu(x)
|
329 |
-
x = self.drop(x)
|
330 |
-
x = self.conv_2(self.padding(x * x_mask))
|
331 |
-
return x * x_mask
|
332 |
-
|
333 |
-
def _causal_padding(self, x):
|
334 |
-
if self.kernel_size == 1:
|
335 |
-
return x
|
336 |
-
pad_l = self.kernel_size - 1
|
337 |
-
pad_r = 0
|
338 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
339 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
340 |
-
return x
|
341 |
-
|
342 |
-
def _same_padding(self, x):
|
343 |
-
if self.kernel_size == 1:
|
344 |
-
return x
|
345 |
-
pad_l = (self.kernel_size - 1) // 2
|
346 |
-
pad_r = self.kernel_size // 2
|
347 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
348 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
349 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlexWang/lama/saicinpainting/__init__.py
DELETED
File without changes
|
spaces/Alican/pixera/util/image_pool.py
DELETED
@@ -1,54 +0,0 @@
|
|
1 |
-
import random
|
2 |
-
import torch
|
3 |
-
|
4 |
-
|
5 |
-
class ImagePool():
|
6 |
-
"""This class implements an image buffer that stores previously generated images.
|
7 |
-
|
8 |
-
This buffer enables us to update discriminators using a history of generated images
|
9 |
-
rather than the ones produced by the latest generators.
|
10 |
-
"""
|
11 |
-
|
12 |
-
def __init__(self, pool_size):
|
13 |
-
"""Initialize the ImagePool class
|
14 |
-
|
15 |
-
Parameters:
|
16 |
-
pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created
|
17 |
-
"""
|
18 |
-
self.pool_size = pool_size
|
19 |
-
if self.pool_size > 0: # create an empty pool
|
20 |
-
self.num_imgs = 0
|
21 |
-
self.images = []
|
22 |
-
|
23 |
-
def query(self, images):
|
24 |
-
"""Return an image from the pool.
|
25 |
-
|
26 |
-
Parameters:
|
27 |
-
images: the latest generated images from the generator
|
28 |
-
|
29 |
-
Returns images from the buffer.
|
30 |
-
|
31 |
-
By 50/100, the buffer will return input images.
|
32 |
-
By 50/100, the buffer will return images previously stored in the buffer,
|
33 |
-
and insert the current images to the buffer.
|
34 |
-
"""
|
35 |
-
if self.pool_size == 0: # if the buffer size is 0, do nothing
|
36 |
-
return images
|
37 |
-
return_images = []
|
38 |
-
for image in images:
|
39 |
-
image = torch.unsqueeze(image.data, 0)
|
40 |
-
if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer
|
41 |
-
self.num_imgs = self.num_imgs + 1
|
42 |
-
self.images.append(image)
|
43 |
-
return_images.append(image)
|
44 |
-
else:
|
45 |
-
p = random.uniform(0, 1)
|
46 |
-
if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer
|
47 |
-
random_id = random.randint(0, self.pool_size - 1) # randint is inclusive
|
48 |
-
tmp = self.images[random_id].clone()
|
49 |
-
self.images[random_id] = image
|
50 |
-
return_images.append(tmp)
|
51 |
-
else: # by another 50% chance, the buffer will return the current image
|
52 |
-
return_images.append(image)
|
53 |
-
return_images = torch.cat(return_images, 0) # collect all the images and return
|
54 |
-
return return_images
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/cpp/longcode/jpge.cpp
DELETED
@@ -1,1049 +0,0 @@
|
|
1 |
-
// jpge.cpp - C++ class for JPEG compression.
|
2 |
-
// Public domain, Rich Geldreich <[email protected]>
|
3 |
-
// v1.01, Dec. 18, 2010 - Initial release
|
4 |
-
// v1.02, Apr. 6, 2011 - Removed 2x2 ordered dither in H2V1 chroma subsampling method load_block_16_8_8(). (The rounding factor was 2, when it should have been 1. Either way, it wasn't helping.)
|
5 |
-
// v1.03, Apr. 16, 2011 - Added support for optimized Huffman code tables, optimized dynamic memory allocation down to only 1 alloc.
|
6 |
-
// Also from Alex Evans: Added RGBA support, linear memory allocator (no longer needed in v1.03).
|
7 |
-
// v1.04, May. 19, 2012: Forgot to set m_pFile ptr to NULL in cfile_stream::close(). Thanks to Owen Kaluza for reporting this bug.
|
8 |
-
// Code tweaks to fix VS2008 static code analysis warnings (all looked harmless).
|
9 |
-
// Code review revealed method load_block_16_8_8() (used for the non-default H2V1 sampling mode to downsample chroma) somehow didn't get the rounding factor fix from v1.02.
|
10 |
-
|
11 |
-
#include "jpge.h"
|
12 |
-
|
13 |
-
#include <stdlib.h>
|
14 |
-
#include <string.h>
|
15 |
-
#if PLATFORM_WINDOWS
|
16 |
-
#include <malloc.h>
|
17 |
-
#endif
|
18 |
-
|
19 |
-
#define JPGE_MAX(a,b) (((a)>(b))?(a):(b))
|
20 |
-
#define JPGE_MIN(a,b) (((a)<(b))?(a):(b))
|
21 |
-
|
22 |
-
namespace jpge {
|
23 |
-
|
24 |
-
static inline void *jpge_malloc(size_t nSize) { return FMemory::Malloc(nSize); }
|
25 |
-
static inline void jpge_free(void *p) { FMemory::Free(p);; }
|
26 |
-
|
27 |
-
// Various JPEG enums and tables.
|
28 |
-
enum { M_SOF0 = 0xC0, M_DHT = 0xC4, M_SOI = 0xD8, M_EOI = 0xD9, M_SOS = 0xDA, M_DQT = 0xDB, M_APP0 = 0xE0 };
|
29 |
-
enum { DC_LUM_CODES = 12, AC_LUM_CODES = 256, DC_CHROMA_CODES = 12, AC_CHROMA_CODES = 256, MAX_HUFF_SYMBOLS = 257, MAX_HUFF_CODESIZE = 32 };
|
30 |
-
|
31 |
-
static uint8 s_zag[64] = { 0,1,8,16,9,2,3,10,17,24,32,25,18,11,4,5,12,19,26,33,40,48,41,34,27,20,13,6,7,14,21,28,35,42,49,56,57,50,43,36,29,22,15,23,30,37,44,51,58,59,52,45,38,31,39,46,53,60,61,54,47,55,62,63 };
|
32 |
-
static int16 s_std_lum_quant[64] = { 16,11,12,14,12,10,16,14,13,14,18,17,16,19,24,40,26,24,22,22,24,49,35,37,29,40,58,51,61,60,57,51,56,55,64,72,92,78,64,68,87,69,55,56,80,109,81,87,95,98,103,104,103,62,77,113,121,112,100,120,92,101,103,99 };
|
33 |
-
static int16 s_std_croma_quant[64] = { 17,18,18,24,21,24,47,26,26,47,99,66,56,66,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99 };
|
34 |
-
static uint8 s_dc_lum_bits[17] = { 0,0,1,5,1,1,1,1,1,1,0,0,0,0,0,0,0 };
|
35 |
-
static uint8 s_dc_lum_val[DC_LUM_CODES] = { 0,1,2,3,4,5,6,7,8,9,10,11 };
|
36 |
-
static uint8 s_ac_lum_bits[17] = { 0,0,2,1,3,3,2,4,3,5,5,4,4,0,0,1,0x7d };
|
37 |
-
static uint8 s_ac_lum_val[AC_LUM_CODES] =
|
38 |
-
{
|
39 |
-
0x01,0x02,0x03,0x00,0x04,0x11,0x05,0x12,0x21,0x31,0x41,0x06,0x13,0x51,0x61,0x07,0x22,0x71,0x14,0x32,0x81,0x91,0xa1,0x08,0x23,0x42,0xb1,0xc1,0x15,0x52,0xd1,0xf0,
|
40 |
-
0x24,0x33,0x62,0x72,0x82,0x09,0x0a,0x16,0x17,0x18,0x19,0x1a,0x25,0x26,0x27,0x28,0x29,0x2a,0x34,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,0x49,
|
41 |
-
0x4a,0x53,0x54,0x55,0x56,0x57,0x58,0x59,0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x83,0x84,0x85,0x86,0x87,0x88,0x89,
|
42 |
-
0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xc2,0xc3,0xc4,0xc5,
|
43 |
-
0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0xe1,0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,
|
44 |
-
0xf9,0xfa
|
45 |
-
};
|
46 |
-
static uint8 s_dc_chroma_bits[17] = { 0,0,3,1,1,1,1,1,1,1,1,1,0,0,0,0,0 };
|
47 |
-
static uint8 s_dc_chroma_val[DC_CHROMA_CODES] = { 0,1,2,3,4,5,6,7,8,9,10,11 };
|
48 |
-
static uint8 s_ac_chroma_bits[17] = { 0,0,2,1,2,4,4,3,4,7,5,4,4,0,1,2,0x77 };
|
49 |
-
static uint8 s_ac_chroma_val[AC_CHROMA_CODES] =
|
50 |
-
{
|
51 |
-
0x00,0x01,0x02,0x03,0x11,0x04,0x05,0x21,0x31,0x06,0x12,0x41,0x51,0x07,0x61,0x71,0x13,0x22,0x32,0x81,0x08,0x14,0x42,0x91,0xa1,0xb1,0xc1,0x09,0x23,0x33,0x52,0xf0,
|
52 |
-
0x15,0x62,0x72,0xd1,0x0a,0x16,0x24,0x34,0xe1,0x25,0xf1,0x17,0x18,0x19,0x1a,0x26,0x27,0x28,0x29,0x2a,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,
|
53 |
-
0x49,0x4a,0x53,0x54,0x55,0x56,0x57,0x58,0x59,0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x82,0x83,0x84,0x85,0x86,0x87,
|
54 |
-
0x88,0x89,0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xc2,0xc3,
|
55 |
-
0xc4,0xc5,0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,
|
56 |
-
0xf9,0xfa
|
57 |
-
};
|
58 |
-
|
59 |
-
// Low-level helper functions.
|
60 |
-
template <class T> inline void clear_obj(T &obj) { memset(&obj, 0, sizeof(obj)); }
|
61 |
-
|
62 |
-
const int YR = 19595, YG = 38470, YB = 7471, CB_R = -11059, CB_G = -21709, CB_B = 32768, CR_R = 32768, CR_G = -27439, CR_B = -5329;
|
63 |
-
static inline uint8 clamp(int i) { if (static_cast<uint>(i) > 255U) { if (i < 0) i = 0; else if (i > 255) i = 255; } return static_cast<uint8>(i); }
|
64 |
-
|
65 |
-
static void RGB_to_YCC(uint8* pDst, const uint8 *pSrc, int num_pixels)
|
66 |
-
{
|
67 |
-
for ( ; num_pixels; pDst += 3, pSrc += 3, num_pixels--)
|
68 |
-
{
|
69 |
-
const int r = pSrc[0], g = pSrc[1], b = pSrc[2];
|
70 |
-
pDst[0] = static_cast<uint8>((r * YR + g * YG + b * YB + 32768) >> 16);
|
71 |
-
pDst[1] = clamp(128 + ((r * CB_R + g * CB_G + b * CB_B + 32768) >> 16));
|
72 |
-
pDst[2] = clamp(128 + ((r * CR_R + g * CR_G + b * CR_B + 32768) >> 16));
|
73 |
-
}
|
74 |
-
}
|
75 |
-
|
76 |
-
static void RGB_to_Y(uint8* pDst, const uint8 *pSrc, int num_pixels)
|
77 |
-
{
|
78 |
-
for ( ; num_pixels; pDst++, pSrc += 3, num_pixels--)
|
79 |
-
pDst[0] = static_cast<uint8>((pSrc[0] * YR + pSrc[1] * YG + pSrc[2] * YB + 32768) >> 16);
|
80 |
-
}
|
81 |
-
|
82 |
-
static void RGBA_to_YCC(uint8* pDst, const uint8 *pSrc, int num_pixels)
|
83 |
-
{
|
84 |
-
for ( ; num_pixels; pDst += 3, pSrc += 4, num_pixels--)
|
85 |
-
{
|
86 |
-
const int r = pSrc[0], g = pSrc[1], b = pSrc[2];
|
87 |
-
pDst[0] = static_cast<uint8>((r * YR + g * YG + b * YB + 32768) >> 16);
|
88 |
-
pDst[1] = clamp(128 + ((r * CB_R + g * CB_G + b * CB_B + 32768) >> 16));
|
89 |
-
pDst[2] = clamp(128 + ((r * CR_R + g * CR_G + b * CR_B + 32768) >> 16));
|
90 |
-
}
|
91 |
-
}
|
92 |
-
|
93 |
-
static void RGBA_to_Y(uint8* pDst, const uint8 *pSrc, int num_pixels)
|
94 |
-
{
|
95 |
-
for ( ; num_pixels; pDst++, pSrc += 4, num_pixels--)
|
96 |
-
pDst[0] = static_cast<uint8>((pSrc[0] * YR + pSrc[1] * YG + pSrc[2] * YB + 32768) >> 16);
|
97 |
-
}
|
98 |
-
|
99 |
-
static void Y_to_YCC(uint8* pDst, const uint8* pSrc, int num_pixels)
|
100 |
-
{
|
101 |
-
for( ; num_pixels; pDst += 3, pSrc++, num_pixels--) { pDst[0] = pSrc[0]; pDst[1] = 128; pDst[2] = 128; }
|
102 |
-
}
|
103 |
-
|
104 |
-
// Forward DCT - DCT derived from jfdctint.
|
105 |
-
#define CONST_BITS 13
|
106 |
-
#define ROW_BITS 2
|
107 |
-
#define DCT_DESCALE(x, n) (((x) + (((int32)1) << ((n) - 1))) >> (n))
|
108 |
-
#define DCT_MUL(var, c) (static_cast<int16>(var) * static_cast<int32>(c))
|
109 |
-
#define DCT1D(s0, s1, s2, s3, s4, s5, s6, s7) \
|
110 |
-
int32 t0 = s0 + s7, t7 = s0 - s7, t1 = s1 + s6, t6 = s1 - s6, t2 = s2 + s5, t5 = s2 - s5, t3 = s3 + s4, t4 = s3 - s4; \
|
111 |
-
int32 t10 = t0 + t3, t13 = t0 - t3, t11 = t1 + t2, t12 = t1 - t2; \
|
112 |
-
int32 u1 = DCT_MUL(t12 + t13, 4433); \
|
113 |
-
s2 = u1 + DCT_MUL(t13, 6270); \
|
114 |
-
s6 = u1 + DCT_MUL(t12, -15137); \
|
115 |
-
u1 = t4 + t7; \
|
116 |
-
int32 u2 = t5 + t6, u3 = t4 + t6, u4 = t5 + t7; \
|
117 |
-
int32 z5 = DCT_MUL(u3 + u4, 9633); \
|
118 |
-
t4 = DCT_MUL(t4, 2446); t5 = DCT_MUL(t5, 16819); \
|
119 |
-
t6 = DCT_MUL(t6, 25172); t7 = DCT_MUL(t7, 12299); \
|
120 |
-
u1 = DCT_MUL(u1, -7373); u2 = DCT_MUL(u2, -20995); \
|
121 |
-
u3 = DCT_MUL(u3, -16069); u4 = DCT_MUL(u4, -3196); \
|
122 |
-
u3 += z5; u4 += z5; \
|
123 |
-
s0 = t10 + t11; s1 = t7 + u1 + u4; s3 = t6 + u2 + u3; s4 = t10 - t11; s5 = t5 + u2 + u4; s7 = t4 + u1 + u3;
|
124 |
-
|
125 |
-
static void DCT2D(int32 *p)
|
126 |
-
{
|
127 |
-
int32 c, *q = p;
|
128 |
-
for (c = 7; c >= 0; c--, q += 8)
|
129 |
-
{
|
130 |
-
int32 s0 = q[0], s1 = q[1], s2 = q[2], s3 = q[3], s4 = q[4], s5 = q[5], s6 = q[6], s7 = q[7];
|
131 |
-
DCT1D(s0, s1, s2, s3, s4, s5, s6, s7);
|
132 |
-
q[0] = s0 << ROW_BITS; q[1] = DCT_DESCALE(s1, CONST_BITS-ROW_BITS); q[2] = DCT_DESCALE(s2, CONST_BITS-ROW_BITS); q[3] = DCT_DESCALE(s3, CONST_BITS-ROW_BITS);
|
133 |
-
q[4] = s4 << ROW_BITS; q[5] = DCT_DESCALE(s5, CONST_BITS-ROW_BITS); q[6] = DCT_DESCALE(s6, CONST_BITS-ROW_BITS); q[7] = DCT_DESCALE(s7, CONST_BITS-ROW_BITS);
|
134 |
-
}
|
135 |
-
for (q = p, c = 7; c >= 0; c--, q++)
|
136 |
-
{
|
137 |
-
int32 s0 = q[0*8], s1 = q[1*8], s2 = q[2*8], s3 = q[3*8], s4 = q[4*8], s5 = q[5*8], s6 = q[6*8], s7 = q[7*8];
|
138 |
-
DCT1D(s0, s1, s2, s3, s4, s5, s6, s7);
|
139 |
-
q[0*8] = DCT_DESCALE(s0, ROW_BITS+3); q[1*8] = DCT_DESCALE(s1, CONST_BITS+ROW_BITS+3); q[2*8] = DCT_DESCALE(s2, CONST_BITS+ROW_BITS+3); q[3*8] = DCT_DESCALE(s3, CONST_BITS+ROW_BITS+3);
|
140 |
-
q[4*8] = DCT_DESCALE(s4, ROW_BITS+3); q[5*8] = DCT_DESCALE(s5, CONST_BITS+ROW_BITS+3); q[6*8] = DCT_DESCALE(s6, CONST_BITS+ROW_BITS+3); q[7*8] = DCT_DESCALE(s7, CONST_BITS+ROW_BITS+3);
|
141 |
-
}
|
142 |
-
}
|
143 |
-
|
144 |
-
struct sym_freq { uint m_key, m_sym_index; };
|
145 |
-
|
146 |
-
// Radix sorts sym_freq[] array by 32-bit key m_key. Returns ptr to sorted values.
|
147 |
-
static inline sym_freq* radix_sort_syms(uint num_syms, sym_freq* pSyms0, sym_freq* pSyms1)
|
148 |
-
{
|
149 |
-
const uint cMaxPasses = 4;
|
150 |
-
uint32 hist[256 * cMaxPasses]; clear_obj(hist);
|
151 |
-
for (uint i = 0; i < num_syms; i++) { uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; hist[256*2 + ((freq >> 16) & 0xFF)]++; hist[256*3 + ((freq >> 24) & 0xFF)]++; }
|
152 |
-
sym_freq* pCur_syms = pSyms0, *pNew_syms = pSyms1;
|
153 |
-
uint total_passes = cMaxPasses; while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--;
|
154 |
-
for (uint pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8)
|
155 |
-
{
|
156 |
-
const uint32* pHist = &hist[pass << 8];
|
157 |
-
uint offsets[256], cur_ofs = 0;
|
158 |
-
for (uint i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; }
|
159 |
-
for (uint i = 0; i < num_syms; i++)
|
160 |
-
pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i];
|
161 |
-
sym_freq* t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t;
|
162 |
-
}
|
163 |
-
return pCur_syms;
|
164 |
-
}
|
165 |
-
|
166 |
-
// calculate_minimum_redundancy() originally written by: Alistair Moffat, [email protected], Jyrki Katajainen, [email protected], November 1996.
|
167 |
-
static void calculate_minimum_redundancy(sym_freq *A, int n)
|
168 |
-
{
|
169 |
-
int root, leaf, next, avbl, used, dpth;
|
170 |
-
if (n==0) return; else if (n==1) { A[0].m_key = 1; return; }
|
171 |
-
A[0].m_key += A[1].m_key; root = 0; leaf = 2;
|
172 |
-
for (next=1; next < n-1; next++)
|
173 |
-
{
|
174 |
-
if (leaf>=n || A[root].m_key<A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = next; } else A[next].m_key = A[leaf++].m_key;
|
175 |
-
if (leaf>=n || (root<next && A[root].m_key<A[leaf].m_key)) { A[next].m_key += A[root].m_key; A[root++].m_key = next; } else A[next].m_key += A[leaf++].m_key;
|
176 |
-
}
|
177 |
-
A[n-2].m_key = 0;
|
178 |
-
for (next=n-3; next>=0; next--) A[next].m_key = A[A[next].m_key].m_key+1;
|
179 |
-
avbl = 1; used = dpth = 0; root = n-2; next = n-1;
|
180 |
-
while (avbl>0)
|
181 |
-
{
|
182 |
-
while (root>=0 && (int)A[root].m_key==dpth) { used++; root--; }
|
183 |
-
while (avbl>used) { A[next--].m_key = dpth; avbl--; }
|
184 |
-
avbl = 2*used; dpth++; used = 0;
|
185 |
-
}
|
186 |
-
}
|
187 |
-
|
188 |
-
// Limits canonical Huffman code table's max code size to max_code_size.
|
189 |
-
static void huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size)
|
190 |
-
{
|
191 |
-
if (code_list_len <= 1) return;
|
192 |
-
|
193 |
-
for (int i = max_code_size + 1; i <= MAX_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i];
|
194 |
-
|
195 |
-
uint32 total = 0;
|
196 |
-
for (int i = max_code_size; i > 0; i--)
|
197 |
-
total += (((uint32)pNum_codes[i]) << (max_code_size - i));
|
198 |
-
|
199 |
-
while (total != (1UL << max_code_size))
|
200 |
-
{
|
201 |
-
pNum_codes[max_code_size]--;
|
202 |
-
for (int i = max_code_size - 1; i > 0; i--)
|
203 |
-
{
|
204 |
-
if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; }
|
205 |
-
}
|
206 |
-
total--;
|
207 |
-
}
|
208 |
-
}
|
209 |
-
|
210 |
-
// Generates an optimized offman table.
|
211 |
-
void jpeg_encoder::optimize_huffman_table(int table_num, int table_len)
|
212 |
-
{
|
213 |
-
sym_freq syms0[MAX_HUFF_SYMBOLS], syms1[MAX_HUFF_SYMBOLS];
|
214 |
-
syms0[0].m_key = 1; syms0[0].m_sym_index = 0; // dummy symbol, assures that no valid code contains all 1's
|
215 |
-
int num_used_syms = 1;
|
216 |
-
const uint32 *pSym_count = &m_huff_count[table_num][0];
|
217 |
-
for (int i = 0; i < table_len; i++)
|
218 |
-
if (pSym_count[i]) { syms0[num_used_syms].m_key = pSym_count[i]; syms0[num_used_syms++].m_sym_index = i + 1; }
|
219 |
-
sym_freq* pSyms = radix_sort_syms(num_used_syms, syms0, syms1);
|
220 |
-
calculate_minimum_redundancy(pSyms, num_used_syms);
|
221 |
-
|
222 |
-
// Count the # of symbols of each code size.
|
223 |
-
int num_codes[1 + MAX_HUFF_CODESIZE]; clear_obj(num_codes);
|
224 |
-
for (int i = 0; i < num_used_syms; i++)
|
225 |
-
num_codes[pSyms[i].m_key]++;
|
226 |
-
|
227 |
-
const uint JPGE_CODE_SIZE_LIMIT = 16; // the maximum possible size of a JPEG Huffman code (valid range is [9,16] - 9 vs. 8 because of the dummy symbol)
|
228 |
-
huffman_enforce_max_code_size(num_codes, num_used_syms, JPGE_CODE_SIZE_LIMIT);
|
229 |
-
|
230 |
-
// Compute m_huff_bits array, which contains the # of symbols per code size.
|
231 |
-
clear_obj(m_huff_bits[table_num]);
|
232 |
-
for (int i = 1; i <= (int)JPGE_CODE_SIZE_LIMIT; i++)
|
233 |
-
m_huff_bits[table_num][i] = static_cast<uint8>(num_codes[i]);
|
234 |
-
|
235 |
-
// Remove the dummy symbol added above, which must be in largest bucket.
|
236 |
-
for (int i = JPGE_CODE_SIZE_LIMIT; i >= 1; i--)
|
237 |
-
{
|
238 |
-
if (m_huff_bits[table_num][i]) { m_huff_bits[table_num][i]--; break; }
|
239 |
-
}
|
240 |
-
|
241 |
-
// Compute the m_huff_val array, which contains the symbol indices sorted by code size (smallest to largest).
|
242 |
-
for (int i = num_used_syms - 1; i >= 1; i--)
|
243 |
-
m_huff_val[table_num][num_used_syms - 1 - i] = static_cast<uint8>(pSyms[i].m_sym_index - 1);
|
244 |
-
}
|
245 |
-
|
246 |
-
// JPEG marker generation.
|
247 |
-
void jpeg_encoder::emit_byte(uint8 i)
|
248 |
-
{
|
249 |
-
m_all_stream_writes_succeeded = m_all_stream_writes_succeeded && m_pStream->put_obj(i);
|
250 |
-
}
|
251 |
-
|
252 |
-
void jpeg_encoder::emit_word(uint i)
|
253 |
-
{
|
254 |
-
emit_byte(uint8(i >> 8)); emit_byte(uint8(i & 0xFF));
|
255 |
-
}
|
256 |
-
|
257 |
-
void jpeg_encoder::emit_marker(int marker)
|
258 |
-
{
|
259 |
-
emit_byte(uint8(0xFF)); emit_byte(uint8(marker));
|
260 |
-
}
|
261 |
-
|
262 |
-
// Emit JFIF marker
|
263 |
-
void jpeg_encoder::emit_jfif_app0()
|
264 |
-
{
|
265 |
-
emit_marker(M_APP0);
|
266 |
-
emit_word(2 + 4 + 1 + 2 + 1 + 2 + 2 + 1 + 1);
|
267 |
-
emit_byte(0x4A); emit_byte(0x46); emit_byte(0x49); emit_byte(0x46); /* Identifier: ASCII "JFIF" */
|
268 |
-
emit_byte(0);
|
269 |
-
emit_byte(1); /* Major version */
|
270 |
-
emit_byte(1); /* Minor version */
|
271 |
-
emit_byte(0); /* Density unit */
|
272 |
-
emit_word(1);
|
273 |
-
emit_word(1);
|
274 |
-
emit_byte(0); /* No thumbnail image */
|
275 |
-
emit_byte(0);
|
276 |
-
}
|
277 |
-
|
278 |
-
// Emit quantization tables
|
279 |
-
void jpeg_encoder::emit_dqt()
|
280 |
-
{
|
281 |
-
for (int i = 0; i < ((m_num_components == 3) ? 2 : 1); i++)
|
282 |
-
{
|
283 |
-
emit_marker(M_DQT);
|
284 |
-
emit_word(64 + 1 + 2);
|
285 |
-
emit_byte(static_cast<uint8>(i));
|
286 |
-
for (int j = 0; j < 64; j++)
|
287 |
-
emit_byte(static_cast<uint8>(m_quantization_tables[i][j]));
|
288 |
-
}
|
289 |
-
}
|
290 |
-
|
291 |
-
// Emit start of frame marker
|
292 |
-
void jpeg_encoder::emit_sof()
|
293 |
-
{
|
294 |
-
emit_marker(M_SOF0); /* baseline */
|
295 |
-
emit_word(3 * m_num_components + 2 + 5 + 1);
|
296 |
-
emit_byte(8); /* precision */
|
297 |
-
emit_word(m_image_y);
|
298 |
-
emit_word(m_image_x);
|
299 |
-
emit_byte(m_num_components);
|
300 |
-
for (int i = 0; i < m_num_components; i++)
|
301 |
-
{
|
302 |
-
emit_byte(static_cast<uint8>(i + 1)); /* component ID */
|
303 |
-
emit_byte((m_comp_h_samp[i] << 4) + m_comp_v_samp[i]); /* h and v sampling */
|
304 |
-
emit_byte(i > 0); /* quant. table num */
|
305 |
-
}
|
306 |
-
}
|
307 |
-
|
308 |
-
// Emit Huffman table.
|
309 |
-
void jpeg_encoder::emit_dht(uint8 *bits, uint8 *val, int index, bool ac_flag)
|
310 |
-
{
|
311 |
-
emit_marker(M_DHT);
|
312 |
-
|
313 |
-
int length = 0;
|
314 |
-
for (int i = 1; i <= 16; i++)
|
315 |
-
length += bits[i];
|
316 |
-
|
317 |
-
emit_word(length + 2 + 1 + 16);
|
318 |
-
emit_byte(static_cast<uint8>(index + (ac_flag << 4)));
|
319 |
-
|
320 |
-
for (int i = 1; i <= 16; i++)
|
321 |
-
emit_byte(bits[i]);
|
322 |
-
|
323 |
-
for (int i = 0; i < length; i++)
|
324 |
-
emit_byte(val[i]);
|
325 |
-
}
|
326 |
-
|
327 |
-
// Emit all Huffman tables.
|
328 |
-
void jpeg_encoder::emit_dhts()
|
329 |
-
{
|
330 |
-
emit_dht(m_huff_bits[0+0], m_huff_val[0+0], 0, false);
|
331 |
-
emit_dht(m_huff_bits[2+0], m_huff_val[2+0], 0, true);
|
332 |
-
if (m_num_components == 3)
|
333 |
-
{
|
334 |
-
emit_dht(m_huff_bits[0+1], m_huff_val[0+1], 1, false);
|
335 |
-
emit_dht(m_huff_bits[2+1], m_huff_val[2+1], 1, true);
|
336 |
-
}
|
337 |
-
}
|
338 |
-
|
339 |
-
// emit start of scan
|
340 |
-
void jpeg_encoder::emit_sos()
|
341 |
-
{
|
342 |
-
emit_marker(M_SOS);
|
343 |
-
emit_word(2 * m_num_components + 2 + 1 + 3);
|
344 |
-
emit_byte(m_num_components);
|
345 |
-
for (int i = 0; i < m_num_components; i++)
|
346 |
-
{
|
347 |
-
emit_byte(static_cast<uint8>(i + 1));
|
348 |
-
if (i == 0)
|
349 |
-
emit_byte((0 << 4) + 0);
|
350 |
-
else
|
351 |
-
emit_byte((1 << 4) + 1);
|
352 |
-
}
|
353 |
-
emit_byte(0); /* spectral selection */
|
354 |
-
emit_byte(63);
|
355 |
-
emit_byte(0);
|
356 |
-
}
|
357 |
-
|
358 |
-
// Emit all markers at beginning of image file.
|
359 |
-
void jpeg_encoder::emit_markers()
|
360 |
-
{
|
361 |
-
emit_marker(M_SOI);
|
362 |
-
emit_jfif_app0();
|
363 |
-
emit_dqt();
|
364 |
-
emit_sof();
|
365 |
-
emit_dhts();
|
366 |
-
emit_sos();
|
367 |
-
}
|
368 |
-
|
369 |
-
// Compute the actual canonical Huffman codes/code sizes given the JPEG huff bits and val arrays.
|
370 |
-
void jpeg_encoder::compute_huffman_table(uint *codes, uint8 *code_sizes, uint8 *bits, uint8 *val)
|
371 |
-
{
|
372 |
-
int i, l, last_p, si;
|
373 |
-
uint8 huff_size[257];
|
374 |
-
uint huff_code[257];
|
375 |
-
uint code;
|
376 |
-
|
377 |
-
int p = 0;
|
378 |
-
for (l = 1; l <= 16; l++)
|
379 |
-
for (i = 1; i <= bits[l]; i++)
|
380 |
-
huff_size[p++] = (char)l;
|
381 |
-
|
382 |
-
huff_size[p] = 0; last_p = p; // write sentinel
|
383 |
-
|
384 |
-
code = 0; si = huff_size[0]; p = 0;
|
385 |
-
|
386 |
-
while (huff_size[p])
|
387 |
-
{
|
388 |
-
while (huff_size[p] == si)
|
389 |
-
huff_code[p++] = code++;
|
390 |
-
code <<= 1;
|
391 |
-
si++;
|
392 |
-
}
|
393 |
-
|
394 |
-
memset(codes, 0, sizeof(codes[0])*256);
|
395 |
-
memset(code_sizes, 0, sizeof(code_sizes[0])*256);
|
396 |
-
for (p = 0; p < last_p; p++)
|
397 |
-
{
|
398 |
-
codes[val[p]] = huff_code[p];
|
399 |
-
code_sizes[val[p]] = huff_size[p];
|
400 |
-
}
|
401 |
-
}
|
402 |
-
|
403 |
-
// Quantization table generation.
|
404 |
-
void jpeg_encoder::compute_quant_table(int32 *pDst, int16 *pSrc)
|
405 |
-
{
|
406 |
-
int32 q;
|
407 |
-
if (m_params.m_quality < 50)
|
408 |
-
q = 5000 / m_params.m_quality;
|
409 |
-
else
|
410 |
-
q = 200 - m_params.m_quality * 2;
|
411 |
-
for (int i = 0; i < 64; i++)
|
412 |
-
{
|
413 |
-
int32 j = *pSrc++; j = (j * q + 50L) / 100L;
|
414 |
-
*pDst++ = JPGE_MIN(JPGE_MAX(j, 1), 255);
|
415 |
-
}
|
416 |
-
}
|
417 |
-
|
418 |
-
// Higher-level methods.
|
419 |
-
void jpeg_encoder::first_pass_init()
|
420 |
-
{
|
421 |
-
m_bit_buffer = 0; m_bits_in = 0;
|
422 |
-
memset(m_last_dc_val, 0, 3 * sizeof(m_last_dc_val[0]));
|
423 |
-
m_mcu_y_ofs = 0;
|
424 |
-
m_pass_num = 1;
|
425 |
-
}
|
426 |
-
|
427 |
-
bool jpeg_encoder::second_pass_init()
|
428 |
-
{
|
429 |
-
compute_huffman_table(&m_huff_codes[0+0][0], &m_huff_code_sizes[0+0][0], m_huff_bits[0+0], m_huff_val[0+0]);
|
430 |
-
compute_huffman_table(&m_huff_codes[2+0][0], &m_huff_code_sizes[2+0][0], m_huff_bits[2+0], m_huff_val[2+0]);
|
431 |
-
if (m_num_components > 1)
|
432 |
-
{
|
433 |
-
compute_huffman_table(&m_huff_codes[0+1][0], &m_huff_code_sizes[0+1][0], m_huff_bits[0+1], m_huff_val[0+1]);
|
434 |
-
compute_huffman_table(&m_huff_codes[2+1][0], &m_huff_code_sizes[2+1][0], m_huff_bits[2+1], m_huff_val[2+1]);
|
435 |
-
}
|
436 |
-
first_pass_init();
|
437 |
-
emit_markers();
|
438 |
-
m_pass_num = 2;
|
439 |
-
return true;
|
440 |
-
}
|
441 |
-
|
442 |
-
bool jpeg_encoder::jpg_open(int p_x_res, int p_y_res, int src_channels)
|
443 |
-
{
|
444 |
-
m_num_components = 3;
|
445 |
-
switch (m_params.m_subsampling)
|
446 |
-
{
|
447 |
-
case Y_ONLY:
|
448 |
-
{
|
449 |
-
m_num_components = 1;
|
450 |
-
m_comp_h_samp[0] = 1; m_comp_v_samp[0] = 1;
|
451 |
-
m_mcu_x = 8; m_mcu_y = 8;
|
452 |
-
break;
|
453 |
-
}
|
454 |
-
case H1V1:
|
455 |
-
{
|
456 |
-
m_comp_h_samp[0] = 1; m_comp_v_samp[0] = 1;
|
457 |
-
m_comp_h_samp[1] = 1; m_comp_v_samp[1] = 1;
|
458 |
-
m_comp_h_samp[2] = 1; m_comp_v_samp[2] = 1;
|
459 |
-
m_mcu_x = 8; m_mcu_y = 8;
|
460 |
-
break;
|
461 |
-
}
|
462 |
-
case H2V1:
|
463 |
-
{
|
464 |
-
m_comp_h_samp[0] = 2; m_comp_v_samp[0] = 1;
|
465 |
-
m_comp_h_samp[1] = 1; m_comp_v_samp[1] = 1;
|
466 |
-
m_comp_h_samp[2] = 1; m_comp_v_samp[2] = 1;
|
467 |
-
m_mcu_x = 16; m_mcu_y = 8;
|
468 |
-
break;
|
469 |
-
}
|
470 |
-
case H2V2:
|
471 |
-
{
|
472 |
-
m_comp_h_samp[0] = 2; m_comp_v_samp[0] = 2;
|
473 |
-
m_comp_h_samp[1] = 1; m_comp_v_samp[1] = 1;
|
474 |
-
m_comp_h_samp[2] = 1; m_comp_v_samp[2] = 1;
|
475 |
-
m_mcu_x = 16; m_mcu_y = 16;
|
476 |
-
}
|
477 |
-
}
|
478 |
-
|
479 |
-
m_image_x = p_x_res; m_image_y = p_y_res;
|
480 |
-
m_image_bpp = src_channels;
|
481 |
-
m_image_bpl = m_image_x * src_channels;
|
482 |
-
m_image_x_mcu = (m_image_x + m_mcu_x - 1) & (~(m_mcu_x - 1));
|
483 |
-
m_image_y_mcu = (m_image_y + m_mcu_y - 1) & (~(m_mcu_y - 1));
|
484 |
-
m_image_bpl_xlt = m_image_x * m_num_components;
|
485 |
-
m_image_bpl_mcu = m_image_x_mcu * m_num_components;
|
486 |
-
m_mcus_per_row = m_image_x_mcu / m_mcu_x;
|
487 |
-
|
488 |
-
if ((m_mcu_lines[0] = static_cast<uint8*>(jpge_malloc(m_image_bpl_mcu * m_mcu_y))) == NULL) return false;
|
489 |
-
for (int i = 1; i < m_mcu_y; i++)
|
490 |
-
m_mcu_lines[i] = m_mcu_lines[i-1] + m_image_bpl_mcu;
|
491 |
-
|
492 |
-
compute_quant_table(m_quantization_tables[0], s_std_lum_quant);
|
493 |
-
compute_quant_table(m_quantization_tables[1], m_params.m_no_chroma_discrim_flag ? s_std_lum_quant : s_std_croma_quant);
|
494 |
-
|
495 |
-
m_out_buf_left = JPGE_OUT_BUF_SIZE;
|
496 |
-
m_pOut_buf = m_out_buf;
|
497 |
-
|
498 |
-
if (m_params.m_two_pass_flag)
|
499 |
-
{
|
500 |
-
clear_obj(m_huff_count);
|
501 |
-
first_pass_init();
|
502 |
-
}
|
503 |
-
else
|
504 |
-
{
|
505 |
-
memcpy(m_huff_bits[0+0], s_dc_lum_bits, 17); memcpy(m_huff_val [0+0], s_dc_lum_val, DC_LUM_CODES);
|
506 |
-
memcpy(m_huff_bits[2+0], s_ac_lum_bits, 17); memcpy(m_huff_val [2+0], s_ac_lum_val, AC_LUM_CODES);
|
507 |
-
memcpy(m_huff_bits[0+1], s_dc_chroma_bits, 17); memcpy(m_huff_val [0+1], s_dc_chroma_val, DC_CHROMA_CODES);
|
508 |
-
memcpy(m_huff_bits[2+1], s_ac_chroma_bits, 17); memcpy(m_huff_val [2+1], s_ac_chroma_val, AC_CHROMA_CODES);
|
509 |
-
if (!second_pass_init()) return false; // in effect, skip over the first pass
|
510 |
-
}
|
511 |
-
return m_all_stream_writes_succeeded;
|
512 |
-
}
|
513 |
-
|
514 |
-
void jpeg_encoder::load_block_8_8_grey(int x)
|
515 |
-
{
|
516 |
-
uint8 *pSrc;
|
517 |
-
sample_array_t *pDst = m_sample_array;
|
518 |
-
x <<= 3;
|
519 |
-
for (int i = 0; i < 8; i++, pDst += 8)
|
520 |
-
{
|
521 |
-
pSrc = m_mcu_lines[i] + x;
|
522 |
-
pDst[0] = pSrc[0] - 128; pDst[1] = pSrc[1] - 128; pDst[2] = pSrc[2] - 128; pDst[3] = pSrc[3] - 128;
|
523 |
-
pDst[4] = pSrc[4] - 128; pDst[5] = pSrc[5] - 128; pDst[6] = pSrc[6] - 128; pDst[7] = pSrc[7] - 128;
|
524 |
-
}
|
525 |
-
}
|
526 |
-
|
527 |
-
void jpeg_encoder::load_block_8_8(int x, int y, int c)
|
528 |
-
{
|
529 |
-
uint8 *pSrc;
|
530 |
-
sample_array_t *pDst = m_sample_array;
|
531 |
-
x = (x * (8 * 3)) + c;
|
532 |
-
y <<= 3;
|
533 |
-
for (int i = 0; i < 8; i++, pDst += 8)
|
534 |
-
{
|
535 |
-
pSrc = m_mcu_lines[y + i] + x;
|
536 |
-
pDst[0] = pSrc[0 * 3] - 128; pDst[1] = pSrc[1 * 3] - 128; pDst[2] = pSrc[2 * 3] - 128; pDst[3] = pSrc[3 * 3] - 128;
|
537 |
-
pDst[4] = pSrc[4 * 3] - 128; pDst[5] = pSrc[5 * 3] - 128; pDst[6] = pSrc[6 * 3] - 128; pDst[7] = pSrc[7 * 3] - 128;
|
538 |
-
}
|
539 |
-
}
|
540 |
-
|
541 |
-
void jpeg_encoder::load_block_16_8(int x, int c)
|
542 |
-
{
|
543 |
-
uint8 *pSrc1, *pSrc2;
|
544 |
-
sample_array_t *pDst = m_sample_array;
|
545 |
-
x = (x * (16 * 3)) + c;
|
546 |
-
int a = 0, b = 2;
|
547 |
-
for (int i = 0; i < 16; i += 2, pDst += 8)
|
548 |
-
{
|
549 |
-
pSrc1 = m_mcu_lines[i + 0] + x;
|
550 |
-
pSrc2 = m_mcu_lines[i + 1] + x;
|
551 |
-
pDst[0] = ((pSrc1[ 0 * 3] + pSrc1[ 1 * 3] + pSrc2[ 0 * 3] + pSrc2[ 1 * 3] + a) >> 2) - 128; pDst[1] = ((pSrc1[ 2 * 3] + pSrc1[ 3 * 3] + pSrc2[ 2 * 3] + pSrc2[ 3 * 3] + b) >> 2) - 128;
|
552 |
-
pDst[2] = ((pSrc1[ 4 * 3] + pSrc1[ 5 * 3] + pSrc2[ 4 * 3] + pSrc2[ 5 * 3] + a) >> 2) - 128; pDst[3] = ((pSrc1[ 6 * 3] + pSrc1[ 7 * 3] + pSrc2[ 6 * 3] + pSrc2[ 7 * 3] + b) >> 2) - 128;
|
553 |
-
pDst[4] = ((pSrc1[ 8 * 3] + pSrc1[ 9 * 3] + pSrc2[ 8 * 3] + pSrc2[ 9 * 3] + a) >> 2) - 128; pDst[5] = ((pSrc1[10 * 3] + pSrc1[11 * 3] + pSrc2[10 * 3] + pSrc2[11 * 3] + b) >> 2) - 128;
|
554 |
-
pDst[6] = ((pSrc1[12 * 3] + pSrc1[13 * 3] + pSrc2[12 * 3] + pSrc2[13 * 3] + a) >> 2) - 128; pDst[7] = ((pSrc1[14 * 3] + pSrc1[15 * 3] + pSrc2[14 * 3] + pSrc2[15 * 3] + b) >> 2) - 128;
|
555 |
-
int temp = a; a = b; b = temp;
|
556 |
-
}
|
557 |
-
}
|
558 |
-
|
559 |
-
void jpeg_encoder::load_block_16_8_8(int x, int c)
|
560 |
-
{
|
561 |
-
uint8 *pSrc1;
|
562 |
-
sample_array_t *pDst = m_sample_array;
|
563 |
-
x = (x * (16 * 3)) + c;
|
564 |
-
for (int i = 0; i < 8; i++, pDst += 8)
|
565 |
-
{
|
566 |
-
pSrc1 = m_mcu_lines[i + 0] + x;
|
567 |
-
pDst[0] = ((pSrc1[ 0 * 3] + pSrc1[ 1 * 3]) >> 1) - 128; pDst[1] = ((pSrc1[ 2 * 3] + pSrc1[ 3 * 3]) >> 1) - 128;
|
568 |
-
pDst[2] = ((pSrc1[ 4 * 3] + pSrc1[ 5 * 3]) >> 1) - 128; pDst[3] = ((pSrc1[ 6 * 3] + pSrc1[ 7 * 3]) >> 1) - 128;
|
569 |
-
pDst[4] = ((pSrc1[ 8 * 3] + pSrc1[ 9 * 3]) >> 1) - 128; pDst[5] = ((pSrc1[10 * 3] + pSrc1[11 * 3]) >> 1) - 128;
|
570 |
-
pDst[6] = ((pSrc1[12 * 3] + pSrc1[13 * 3]) >> 1) - 128; pDst[7] = ((pSrc1[14 * 3] + pSrc1[15 * 3]) >> 1) - 128;
|
571 |
-
}
|
572 |
-
}
|
573 |
-
|
574 |
-
void jpeg_encoder::load_quantized_coefficients(int component_num)
|
575 |
-
{
|
576 |
-
int32 *q = m_quantization_tables[component_num > 0];
|
577 |
-
int16 *pDst = m_coefficient_array;
|
578 |
-
for (int i = 0; i < 64; i++)
|
579 |
-
{
|
580 |
-
sample_array_t j = m_sample_array[s_zag[i]];
|
581 |
-
if (j < 0)
|
582 |
-
{
|
583 |
-
if ((j = -j + (*q >> 1)) < *q)
|
584 |
-
*pDst++ = 0;
|
585 |
-
else
|
586 |
-
*pDst++ = static_cast<int16>(-(j / *q));
|
587 |
-
}
|
588 |
-
else
|
589 |
-
{
|
590 |
-
if ((j = j + (*q >> 1)) < *q)
|
591 |
-
*pDst++ = 0;
|
592 |
-
else
|
593 |
-
*pDst++ = static_cast<int16>((j / *q));
|
594 |
-
}
|
595 |
-
q++;
|
596 |
-
}
|
597 |
-
}
|
598 |
-
|
599 |
-
void jpeg_encoder::flush_output_buffer()
|
600 |
-
{
|
601 |
-
if (m_out_buf_left != JPGE_OUT_BUF_SIZE)
|
602 |
-
m_all_stream_writes_succeeded = m_all_stream_writes_succeeded && m_pStream->put_buf(m_out_buf, JPGE_OUT_BUF_SIZE - m_out_buf_left);
|
603 |
-
m_pOut_buf = m_out_buf;
|
604 |
-
m_out_buf_left = JPGE_OUT_BUF_SIZE;
|
605 |
-
}
|
606 |
-
|
607 |
-
void jpeg_encoder::put_bits(uint bits, uint len)
|
608 |
-
{
|
609 |
-
m_bit_buffer |= ((uint32)bits << (24 - (m_bits_in += len)));
|
610 |
-
while (m_bits_in >= 8)
|
611 |
-
{
|
612 |
-
uint8 c;
|
613 |
-
#define JPGE_PUT_BYTE(c) { *m_pOut_buf++ = (c); if (--m_out_buf_left == 0) flush_output_buffer(); }
|
614 |
-
JPGE_PUT_BYTE(c = (uint8)((m_bit_buffer >> 16) & 0xFF));
|
615 |
-
if (c == 0xFF) JPGE_PUT_BYTE(0);
|
616 |
-
m_bit_buffer <<= 8;
|
617 |
-
m_bits_in -= 8;
|
618 |
-
}
|
619 |
-
}
|
620 |
-
|
621 |
-
void jpeg_encoder::code_coefficients_pass_one(int component_num)
|
622 |
-
{
|
623 |
-
if (component_num >= 3) return; // just to shut up static analysis
|
624 |
-
int i, run_len, nbits, temp1;
|
625 |
-
int16 *src = m_coefficient_array;
|
626 |
-
uint32 *dc_count = component_num ? m_huff_count[0 + 1] : m_huff_count[0 + 0], *ac_count = component_num ? m_huff_count[2 + 1] : m_huff_count[2 + 0];
|
627 |
-
|
628 |
-
temp1 = src[0] - m_last_dc_val[component_num];
|
629 |
-
m_last_dc_val[component_num] = src[0];
|
630 |
-
if (temp1 < 0) temp1 = -temp1;
|
631 |
-
|
632 |
-
nbits = 0;
|
633 |
-
while (temp1)
|
634 |
-
{
|
635 |
-
nbits++; temp1 >>= 1;
|
636 |
-
}
|
637 |
-
|
638 |
-
dc_count[nbits]++;
|
639 |
-
for (run_len = 0, i = 1; i < 64; i++)
|
640 |
-
{
|
641 |
-
if ((temp1 = m_coefficient_array[i]) == 0)
|
642 |
-
run_len++;
|
643 |
-
else
|
644 |
-
{
|
645 |
-
while (run_len >= 16)
|
646 |
-
{
|
647 |
-
ac_count[0xF0]++;
|
648 |
-
run_len -= 16;
|
649 |
-
}
|
650 |
-
if (temp1 < 0) temp1 = -temp1;
|
651 |
-
nbits = 1;
|
652 |
-
while (temp1 >>= 1) nbits++;
|
653 |
-
ac_count[(run_len << 4) + nbits]++;
|
654 |
-
run_len = 0;
|
655 |
-
}
|
656 |
-
}
|
657 |
-
if (run_len) ac_count[0]++;
|
658 |
-
}
|
659 |
-
|
660 |
-
void jpeg_encoder::code_coefficients_pass_two(int component_num)
|
661 |
-
{
|
662 |
-
int i, j, run_len, nbits, temp1, temp2;
|
663 |
-
int16 *pSrc = m_coefficient_array;
|
664 |
-
uint *codes[2];
|
665 |
-
uint8 *code_sizes[2];
|
666 |
-
|
667 |
-
if (component_num == 0)
|
668 |
-
{
|
669 |
-
codes[0] = m_huff_codes[0 + 0]; codes[1] = m_huff_codes[2 + 0];
|
670 |
-
code_sizes[0] = m_huff_code_sizes[0 + 0]; code_sizes[1] = m_huff_code_sizes[2 + 0];
|
671 |
-
}
|
672 |
-
else
|
673 |
-
{
|
674 |
-
codes[0] = m_huff_codes[0 + 1]; codes[1] = m_huff_codes[2 + 1];
|
675 |
-
code_sizes[0] = m_huff_code_sizes[0 + 1]; code_sizes[1] = m_huff_code_sizes[2 + 1];
|
676 |
-
}
|
677 |
-
|
678 |
-
temp1 = temp2 = pSrc[0] - m_last_dc_val[component_num];
|
679 |
-
m_last_dc_val[component_num] = pSrc[0];
|
680 |
-
|
681 |
-
if (temp1 < 0)
|
682 |
-
{
|
683 |
-
temp1 = -temp1; temp2--;
|
684 |
-
}
|
685 |
-
|
686 |
-
nbits = 0;
|
687 |
-
while (temp1)
|
688 |
-
{
|
689 |
-
nbits++; temp1 >>= 1;
|
690 |
-
}
|
691 |
-
|
692 |
-
put_bits(codes[0][nbits], code_sizes[0][nbits]);
|
693 |
-
if (nbits) put_bits(temp2 & ((1 << nbits) - 1), nbits);
|
694 |
-
|
695 |
-
for (run_len = 0, i = 1; i < 64; i++)
|
696 |
-
{
|
697 |
-
if ((temp1 = m_coefficient_array[i]) == 0)
|
698 |
-
run_len++;
|
699 |
-
else
|
700 |
-
{
|
701 |
-
while (run_len >= 16)
|
702 |
-
{
|
703 |
-
put_bits(codes[1][0xF0], code_sizes[1][0xF0]);
|
704 |
-
run_len -= 16;
|
705 |
-
}
|
706 |
-
if ((temp2 = temp1) < 0)
|
707 |
-
{
|
708 |
-
temp1 = -temp1;
|
709 |
-
temp2--;
|
710 |
-
}
|
711 |
-
nbits = 1;
|
712 |
-
while (temp1 >>= 1)
|
713 |
-
nbits++;
|
714 |
-
j = (run_len << 4) + nbits;
|
715 |
-
put_bits(codes[1][j], code_sizes[1][j]);
|
716 |
-
put_bits(temp2 & ((1 << nbits) - 1), nbits);
|
717 |
-
run_len = 0;
|
718 |
-
}
|
719 |
-
}
|
720 |
-
if (run_len)
|
721 |
-
put_bits(codes[1][0], code_sizes[1][0]);
|
722 |
-
}
|
723 |
-
|
724 |
-
void jpeg_encoder::code_block(int component_num)
|
725 |
-
{
|
726 |
-
DCT2D(m_sample_array);
|
727 |
-
load_quantized_coefficients(component_num);
|
728 |
-
if (m_pass_num == 1)
|
729 |
-
code_coefficients_pass_one(component_num);
|
730 |
-
else
|
731 |
-
code_coefficients_pass_two(component_num);
|
732 |
-
}
|
733 |
-
|
734 |
-
void jpeg_encoder::process_mcu_row()
|
735 |
-
{
|
736 |
-
if (m_num_components == 1)
|
737 |
-
{
|
738 |
-
for (int i = 0; i < m_mcus_per_row; i++)
|
739 |
-
{
|
740 |
-
load_block_8_8_grey(i); code_block(0);
|
741 |
-
}
|
742 |
-
}
|
743 |
-
else if ((m_comp_h_samp[0] == 1) && (m_comp_v_samp[0] == 1))
|
744 |
-
{
|
745 |
-
for (int i = 0; i < m_mcus_per_row; i++)
|
746 |
-
{
|
747 |
-
load_block_8_8(i, 0, 0); code_block(0); load_block_8_8(i, 0, 1); code_block(1); load_block_8_8(i, 0, 2); code_block(2);
|
748 |
-
}
|
749 |
-
}
|
750 |
-
else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 1))
|
751 |
-
{
|
752 |
-
for (int i = 0; i < m_mcus_per_row; i++)
|
753 |
-
{
|
754 |
-
load_block_8_8(i * 2 + 0, 0, 0); code_block(0); load_block_8_8(i * 2 + 1, 0, 0); code_block(0);
|
755 |
-
load_block_16_8_8(i, 1); code_block(1); load_block_16_8_8(i, 2); code_block(2);
|
756 |
-
}
|
757 |
-
}
|
758 |
-
else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 2))
|
759 |
-
{
|
760 |
-
for (int i = 0; i < m_mcus_per_row; i++)
|
761 |
-
{
|
762 |
-
load_block_8_8(i * 2 + 0, 0, 0); code_block(0); load_block_8_8(i * 2 + 1, 0, 0); code_block(0);
|
763 |
-
load_block_8_8(i * 2 + 0, 1, 0); code_block(0); load_block_8_8(i * 2 + 1, 1, 0); code_block(0);
|
764 |
-
load_block_16_8(i, 1); code_block(1); load_block_16_8(i, 2); code_block(2);
|
765 |
-
}
|
766 |
-
}
|
767 |
-
}
|
768 |
-
|
769 |
-
bool jpeg_encoder::terminate_pass_one()
|
770 |
-
{
|
771 |
-
optimize_huffman_table(0+0, DC_LUM_CODES); optimize_huffman_table(2+0, AC_LUM_CODES);
|
772 |
-
if (m_num_components > 1)
|
773 |
-
{
|
774 |
-
optimize_huffman_table(0+1, DC_CHROMA_CODES); optimize_huffman_table(2+1, AC_CHROMA_CODES);
|
775 |
-
}
|
776 |
-
return second_pass_init();
|
777 |
-
}
|
778 |
-
|
779 |
-
bool jpeg_encoder::terminate_pass_two()
|
780 |
-
{
|
781 |
-
put_bits(0x7F, 7);
|
782 |
-
flush_output_buffer();
|
783 |
-
emit_marker(M_EOI);
|
784 |
-
m_pass_num++; // purposely bump up m_pass_num, for debugging
|
785 |
-
return true;
|
786 |
-
}
|
787 |
-
|
788 |
-
bool jpeg_encoder::process_end_of_image()
|
789 |
-
{
|
790 |
-
if (m_mcu_y_ofs)
|
791 |
-
{
|
792 |
-
if (m_mcu_y_ofs < 16) // check here just to shut up static analysis
|
793 |
-
{
|
794 |
-
for (int i = m_mcu_y_ofs; i < m_mcu_y; i++)
|
795 |
-
memcpy(m_mcu_lines[i], m_mcu_lines[m_mcu_y_ofs - 1], m_image_bpl_mcu);
|
796 |
-
}
|
797 |
-
|
798 |
-
process_mcu_row();
|
799 |
-
}
|
800 |
-
|
801 |
-
if (m_pass_num == 1)
|
802 |
-
return terminate_pass_one();
|
803 |
-
else
|
804 |
-
return terminate_pass_two();
|
805 |
-
}
|
806 |
-
|
807 |
-
void jpeg_encoder::load_mcu(const void *pSrc)
|
808 |
-
{
|
809 |
-
const uint8* Psrc = reinterpret_cast<const uint8*>(pSrc);
|
810 |
-
|
811 |
-
uint8* pDst = m_mcu_lines[m_mcu_y_ofs]; // OK to write up to m_image_bpl_xlt bytes to pDst
|
812 |
-
|
813 |
-
if (m_num_components == 1)
|
814 |
-
{
|
815 |
-
if (m_image_bpp == 4)
|
816 |
-
RGBA_to_Y(pDst, Psrc, m_image_x);
|
817 |
-
else if (m_image_bpp == 3)
|
818 |
-
RGB_to_Y(pDst, Psrc, m_image_x);
|
819 |
-
else
|
820 |
-
memcpy(pDst, Psrc, m_image_x);
|
821 |
-
}
|
822 |
-
else
|
823 |
-
{
|
824 |
-
if (m_image_bpp == 4)
|
825 |
-
RGBA_to_YCC(pDst, Psrc, m_image_x);
|
826 |
-
else if (m_image_bpp == 3)
|
827 |
-
RGB_to_YCC(pDst, Psrc, m_image_x);
|
828 |
-
else
|
829 |
-
Y_to_YCC(pDst, Psrc, m_image_x);
|
830 |
-
}
|
831 |
-
|
832 |
-
// Possibly duplicate pixels at end of scanline if not a multiple of 8 or 16
|
833 |
-
if (m_num_components == 1)
|
834 |
-
memset(m_mcu_lines[m_mcu_y_ofs] + m_image_bpl_xlt, pDst[m_image_bpl_xlt - 1], m_image_x_mcu - m_image_x);
|
835 |
-
else
|
836 |
-
{
|
837 |
-
const uint8 y = pDst[m_image_bpl_xlt - 3 + 0], cb = pDst[m_image_bpl_xlt - 3 + 1], cr = pDst[m_image_bpl_xlt - 3 + 2];
|
838 |
-
uint8 *q = m_mcu_lines[m_mcu_y_ofs] + m_image_bpl_xlt;
|
839 |
-
for (int i = m_image_x; i < m_image_x_mcu; i++)
|
840 |
-
{
|
841 |
-
*q++ = y; *q++ = cb; *q++ = cr;
|
842 |
-
}
|
843 |
-
}
|
844 |
-
|
845 |
-
if (++m_mcu_y_ofs == m_mcu_y)
|
846 |
-
{
|
847 |
-
process_mcu_row();
|
848 |
-
m_mcu_y_ofs = 0;
|
849 |
-
}
|
850 |
-
}
|
851 |
-
|
852 |
-
void jpeg_encoder::clear()
|
853 |
-
{
|
854 |
-
m_mcu_lines[0] = NULL;
|
855 |
-
m_pass_num = 0;
|
856 |
-
m_all_stream_writes_succeeded = true;
|
857 |
-
}
|
858 |
-
|
859 |
-
jpeg_encoder::jpeg_encoder()
|
860 |
-
{
|
861 |
-
clear();
|
862 |
-
}
|
863 |
-
|
864 |
-
jpeg_encoder::~jpeg_encoder()
|
865 |
-
{
|
866 |
-
deinit();
|
867 |
-
}
|
868 |
-
|
869 |
-
bool jpeg_encoder::init(output_stream *pStream, int64_t width, int64_t height, int64_t src_channels, const params &comp_params)
|
870 |
-
{
|
871 |
-
deinit();
|
872 |
-
if (((!pStream) || (width < 1) || (height < 1)) || ((src_channels != 1) && (src_channels != 3) && (src_channels != 4)) || (!comp_params.check_valid())) return false;
|
873 |
-
m_pStream = pStream;
|
874 |
-
m_params = comp_params;
|
875 |
-
return jpg_open(width, height, src_channels);
|
876 |
-
}
|
877 |
-
|
878 |
-
void jpeg_encoder::deinit()
|
879 |
-
{
|
880 |
-
jpge_free(m_mcu_lines[0]);
|
881 |
-
clear();
|
882 |
-
}
|
883 |
-
|
884 |
-
bool jpeg_encoder::process_scanline(const void* pScanline)
|
885 |
-
{
|
886 |
-
if ((m_pass_num < 1) || (m_pass_num > 2)) return false;
|
887 |
-
if (m_all_stream_writes_succeeded)
|
888 |
-
{
|
889 |
-
if (!pScanline)
|
890 |
-
{
|
891 |
-
if (!process_end_of_image()) return false;
|
892 |
-
}
|
893 |
-
else
|
894 |
-
{
|
895 |
-
load_mcu(pScanline);
|
896 |
-
}
|
897 |
-
}
|
898 |
-
return m_all_stream_writes_succeeded;
|
899 |
-
}
|
900 |
-
|
901 |
-
// Higher level wrappers/examples (optional).
|
902 |
-
#include <stdio.h>
|
903 |
-
|
904 |
-
class cfile_stream : public output_stream
|
905 |
-
{
|
906 |
-
cfile_stream(const cfile_stream &);
|
907 |
-
cfile_stream &operator= (const cfile_stream &);
|
908 |
-
|
909 |
-
FILE* m_pFile;
|
910 |
-
bool m_bStatus;
|
911 |
-
|
912 |
-
public:
|
913 |
-
cfile_stream() : m_pFile(NULL), m_bStatus(false) { }
|
914 |
-
|
915 |
-
virtual ~cfile_stream()
|
916 |
-
{
|
917 |
-
close();
|
918 |
-
}
|
919 |
-
|
920 |
-
bool open(const char *pFilename)
|
921 |
-
{
|
922 |
-
close();
|
923 |
-
#if defined(_MSC_VER)
|
924 |
-
if (fopen_s(&m_pFile, pFilename, "wb") != 0)
|
925 |
-
{
|
926 |
-
return false;
|
927 |
-
}
|
928 |
-
#else
|
929 |
-
m_pFile = fopen(pFilename, "wb");
|
930 |
-
#endif
|
931 |
-
m_bStatus = (m_pFile != NULL);
|
932 |
-
return m_bStatus;
|
933 |
-
}
|
934 |
-
|
935 |
-
bool close()
|
936 |
-
{
|
937 |
-
if (m_pFile)
|
938 |
-
{
|
939 |
-
if (fclose(m_pFile) == EOF)
|
940 |
-
{
|
941 |
-
m_bStatus = false;
|
942 |
-
}
|
943 |
-
m_pFile = NULL;
|
944 |
-
}
|
945 |
-
return m_bStatus;
|
946 |
-
}
|
947 |
-
|
948 |
-
virtual bool put_buf(const void* pBuf, int64_t len)
|
949 |
-
{
|
950 |
-
m_bStatus = m_bStatus && (fwrite(pBuf, len, 1, m_pFile) == 1);
|
951 |
-
return m_bStatus;
|
952 |
-
}
|
953 |
-
|
954 |
-
uint get_size() const
|
955 |
-
{
|
956 |
-
return m_pFile ? ftell(m_pFile) : 0;
|
957 |
-
}
|
958 |
-
};
|
959 |
-
|
960 |
-
// Writes JPEG image to file.
|
961 |
-
bool compress_image_to_jpeg_file(const char *pFilename, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params)
|
962 |
-
{
|
963 |
-
cfile_stream dst_stream;
|
964 |
-
if (!dst_stream.open(pFilename))
|
965 |
-
return false;
|
966 |
-
|
967 |
-
jpge::jpeg_encoder dst_image;
|
968 |
-
if (!dst_image.init(&dst_stream, width, height, num_channels, comp_params))
|
969 |
-
return false;
|
970 |
-
|
971 |
-
for (uint pass_index = 0; pass_index < dst_image.get_total_passes(); pass_index++)
|
972 |
-
{
|
973 |
-
for (int64_t i = 0; i < height; i++)
|
974 |
-
{
|
975 |
-
// i, width, and num_channels are all 64bit
|
976 |
-
const uint8* pBuf = pImage_data + i * width * num_channels;
|
977 |
-
if (!dst_image.process_scanline(pBuf))
|
978 |
-
return false;
|
979 |
-
}
|
980 |
-
if (!dst_image.process_scanline(NULL))
|
981 |
-
return false;
|
982 |
-
}
|
983 |
-
|
984 |
-
dst_image.deinit();
|
985 |
-
|
986 |
-
return dst_stream.close();
|
987 |
-
}
|
988 |
-
|
989 |
-
class memory_stream : public output_stream
|
990 |
-
{
|
991 |
-
memory_stream(const memory_stream &);
|
992 |
-
memory_stream &operator= (const memory_stream &);
|
993 |
-
|
994 |
-
uint8 *m_pBuf;
|
995 |
-
uint64_t m_buf_size, m_buf_ofs;
|
996 |
-
|
997 |
-
public:
|
998 |
-
memory_stream(void *pBuf, uint64_t buf_size) : m_pBuf(static_cast<uint8*>(pBuf)), m_buf_size(buf_size), m_buf_ofs(0) { }
|
999 |
-
|
1000 |
-
virtual ~memory_stream() { }
|
1001 |
-
|
1002 |
-
virtual bool put_buf(const void* pBuf, int64_t len)
|
1003 |
-
{
|
1004 |
-
uint64_t buf_remaining = m_buf_size - m_buf_ofs;
|
1005 |
-
if ((uint64_t)len > buf_remaining)
|
1006 |
-
return false;
|
1007 |
-
memcpy(m_pBuf + m_buf_ofs, pBuf, len);
|
1008 |
-
m_buf_ofs += len;
|
1009 |
-
return true;
|
1010 |
-
}
|
1011 |
-
|
1012 |
-
uint64_t get_size() const
|
1013 |
-
{
|
1014 |
-
return m_buf_ofs;
|
1015 |
-
}
|
1016 |
-
};
|
1017 |
-
|
1018 |
-
bool compress_image_to_jpeg_file_in_memory(void *pDstBuf, int64_t &buf_size, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params)
|
1019 |
-
{
|
1020 |
-
if ((!pDstBuf) || (!buf_size))
|
1021 |
-
return false;
|
1022 |
-
|
1023 |
-
memory_stream dst_stream(pDstBuf, buf_size);
|
1024 |
-
|
1025 |
-
buf_size = 0;
|
1026 |
-
|
1027 |
-
jpge::jpeg_encoder dst_image;
|
1028 |
-
if (!dst_image.init(&dst_stream, width, height, num_channels, comp_params))
|
1029 |
-
return false;
|
1030 |
-
|
1031 |
-
for (uint pass_index = 0; pass_index < dst_image.get_total_passes(); pass_index++)
|
1032 |
-
{
|
1033 |
-
for (int64_t i = 0; i < height; i++)
|
1034 |
-
{
|
1035 |
-
const uint8* pScanline = pImage_data + i * width * num_channels;
|
1036 |
-
if (!dst_image.process_scanline(pScanline))
|
1037 |
-
return false;
|
1038 |
-
}
|
1039 |
-
if (!dst_image.process_scanline(NULL))
|
1040 |
-
return false;
|
1041 |
-
}
|
1042 |
-
|
1043 |
-
dst_image.deinit();
|
1044 |
-
|
1045 |
-
buf_size = dst_stream.get_size();
|
1046 |
-
return true;
|
1047 |
-
}
|
1048 |
-
|
1049 |
-
} // namespace jpge
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/models/transformer2d.md
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
# Transformer2D
|
2 |
-
|
3 |
-
A Transformer model for image-like data from [CompVis](https://huggingface.co/CompVis) that is based on the [Vision Transformer](https://huggingface.co/papers/2010.11929) introduced by Dosovitskiy et al. The [`Transformer2DModel`] accepts discrete (classes of vector embeddings) or continuous (actual embeddings) inputs.
|
4 |
-
|
5 |
-
When the input is **continuous**:
|
6 |
-
|
7 |
-
1. Project the input and reshape it to `(batch_size, sequence_length, feature_dimension)`.
|
8 |
-
2. Apply the Transformer blocks in the standard way.
|
9 |
-
3. Reshape to image.
|
10 |
-
|
11 |
-
When the input is **discrete**:
|
12 |
-
|
13 |
-
<Tip>
|
14 |
-
|
15 |
-
It is assumed one of the input classes is the masked latent pixel. The predicted classes of the unnoised image don't contain a prediction for the masked pixel because the unnoised image cannot be masked.
|
16 |
-
|
17 |
-
</Tip>
|
18 |
-
|
19 |
-
1. Convert input (classes of latent pixels) to embeddings and apply positional embeddings.
|
20 |
-
2. Apply the Transformer blocks in the standard way.
|
21 |
-
3. Predict classes of unnoised image.
|
22 |
-
|
23 |
-
## Transformer2DModel
|
24 |
-
|
25 |
-
[[autodoc]] Transformer2DModel
|
26 |
-
|
27 |
-
## Transformer2DModelOutput
|
28 |
-
|
29 |
-
[[autodoc]] models.transformer_2d.Transformer2DModelOutput
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/optimization/habana.md
DELETED
@@ -1,71 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# Habana Gaudi에서 Stable Diffusion을 사용하는 방법
|
14 |
-
|
15 |
-
🤗 Diffusers는 🤗 [Optimum Habana](https://huggingface.co/docs/optimum/habana/usage_guides/stable_diffusion)를 통해서 Habana Gaudi와 호환됩니다.
|
16 |
-
|
17 |
-
## 요구 사항
|
18 |
-
|
19 |
-
- Optimum Habana 1.4 또는 이후, [여기](https://huggingface.co/docs/optimum/habana/installation)에 설치하는 방법이 있습니다.
|
20 |
-
- SynapseAI 1.8.
|
21 |
-
|
22 |
-
|
23 |
-
## 추론 파이프라인
|
24 |
-
|
25 |
-
Gaudi에서 Stable Diffusion 1 및 2로 이미지를 생성하려면 두 인스턴스를 인스턴스화해야 합니다:
|
26 |
-
- [`GaudiStableDiffusionPipeline`](https://huggingface.co/docs/optimum/habana/package_reference/stable_diffusion_pipeline)이 포함된 파이프라인. 이 파이프라인은 *텍스트-이미지 생성*을 지원합니다.
|
27 |
-
- [`GaudiDDIMScheduler`](https://huggingface.co/docs/optimum/habana/package_reference/stable_diffusion_pipeline#optimum.habana.diffusers.GaudiDDIMScheduler)이 포함된 스케줄러. 이 스케줄러는 Habana Gaudi에 최적화되어 있습니다.
|
28 |
-
|
29 |
-
파이프라인을 초기화할 때, HPU에 배포하기 위해 `use_habana=True`를 지정해야 합니다.
|
30 |
-
또한 가능한 가장 빠른 생성을 위해 `use_hpu_graphs=True`로 **HPU 그래프**를 활성화해야 합니다.
|
31 |
-
마지막으로, [Hugging Face Hub](https://huggingface.co/Habana)에서 다운로드할 수 있는 [Gaudi configuration](https://huggingface.co/docs/optimum/habana/package_reference/gaudi_config)을 지정해야 합니다.
|
32 |
-
|
33 |
-
```python
|
34 |
-
from optimum.habana import GaudiConfig
|
35 |
-
from optimum.habana.diffusers import GaudiDDIMScheduler, GaudiStableDiffusionPipeline
|
36 |
-
|
37 |
-
model_name = "stabilityai/stable-diffusion-2-base"
|
38 |
-
scheduler = GaudiDDIMScheduler.from_pretrained(model_name, subfolder="scheduler")
|
39 |
-
pipeline = GaudiStableDiffusionPipeline.from_pretrained(
|
40 |
-
model_name,
|
41 |
-
scheduler=scheduler,
|
42 |
-
use_habana=True,
|
43 |
-
use_hpu_graphs=True,
|
44 |
-
gaudi_config="Habana/stable-diffusion",
|
45 |
-
)
|
46 |
-
```
|
47 |
-
|
48 |
-
파이프라인을 호출하여 하나 이상의 프롬프트에서 배치별로 이미지를 생성할 수 있습니다.
|
49 |
-
|
50 |
-
```python
|
51 |
-
outputs = pipeline(
|
52 |
-
prompt=[
|
53 |
-
"High quality photo of an astronaut riding a horse in space",
|
54 |
-
"Face of a yellow cat, high resolution, sitting on a park bench",
|
55 |
-
],
|
56 |
-
num_images_per_prompt=10,
|
57 |
-
batch_size=4,
|
58 |
-
)
|
59 |
-
```
|
60 |
-
|
61 |
-
더 많은 정보를 얻기 위해, Optimum Habana의 [문서](https://huggingface.co/docs/optimum/habana/usage_guides/stable_diffusion)와 공식 Github 저장소에 제공된 [예시](https://github.com/huggingface/optimum-habana/tree/main/examples/stable-diffusion)를 확인하세요.
|
62 |
-
|
63 |
-
|
64 |
-
## 벤치마크
|
65 |
-
|
66 |
-
다음은 [Habana/stable-diffusion](https://huggingface.co/Habana/stable-diffusion) Gaudi 구성(혼합 정밀도 bf16/fp32)을 사용하는 Habana first-generation Gaudi 및 Gaudi2의 지연 시간입니다:
|
67 |
-
|
68 |
-
| | Latency (배치 크기 = 1) | Throughput (배치 크기 = 8) |
|
69 |
-
| ---------------------- |:------------------------:|:---------------------------:|
|
70 |
-
| first-generation Gaudi | 4.29s | 0.283 images/s |
|
71 |
-
| Gaudi2 | 1.54s | 0.904 images/s |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/README.md
DELETED
@@ -1,49 +0,0 @@
|
|
1 |
-
# Region Proposal by Guided Anchoring
|
2 |
-
|
3 |
-
## Introduction
|
4 |
-
|
5 |
-
[ALGORITHM]
|
6 |
-
|
7 |
-
We provide config files to reproduce the results in the CVPR 2019 paper for [Region Proposal by Guided Anchoring](https://arxiv.org/abs/1901.03278).
|
8 |
-
|
9 |
-
```latex
|
10 |
-
@inproceedings{wang2019region,
|
11 |
-
title={Region Proposal by Guided Anchoring},
|
12 |
-
author={Jiaqi Wang and Kai Chen and Shuo Yang and Chen Change Loy and Dahua Lin},
|
13 |
-
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
|
14 |
-
year={2019}
|
15 |
-
}
|
16 |
-
```
|
17 |
-
|
18 |
-
## Results and Models
|
19 |
-
|
20 |
-
The results on COCO 2017 val is shown in the below table. (results on test-dev are usually slightly higher than val).
|
21 |
-
|
22 |
-
| Method | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | AR 1000 | Config | Download |
|
23 |
-
| :----: | :-------------: | :-----: | :-----: | :------: | :------------: | :-----: | :------: | :--------: |
|
24 |
-
| GA-RPN | R-50-FPN | caffe | 1x | 5.3 | 15.8 | 68.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco/ga_rpn_r50_caffe_fpn_1x_coco_20200531-899008a6.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco/ga_rpn_r50_caffe_fpn_1x_coco_20200531_011819.log.json) |
|
25 |
-
| GA-RPN | R-101-FPN | caffe | 1x | 7.3 | 13.0 | 69.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco/ga_rpn_r101_caffe_fpn_1x_coco_20200531-ca9ba8fb.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco/ga_rpn_r101_caffe_fpn_1x_coco_20200531_011812.log.json) |
|
26 |
-
| GA-RPN | X-101-32x4d-FPN | pytorch | 1x | 8.5 | 10.0 | 70.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco/ga_rpn_x101_32x4d_fpn_1x_coco_20200220-c28d1b18.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco/ga_rpn_x101_32x4d_fpn_1x_coco_20200220_221326.log.json) |
|
27 |
-
| GA-RPN | X-101-64x4d-FPN | pytorch | 1x | 7.1 | 7.5 | 71.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco/ga_rpn_x101_64x4d_fpn_1x_coco_20200225-3c6e1aa2.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco/ga_rpn_x101_64x4d_fpn_1x_coco_20200225_152704.log.json) |
|
28 |
-
|
29 |
-
| Method | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download |
|
30 |
-
| :------------: | :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :------: | :--------: |
|
31 |
-
| GA-Faster RCNN | R-50-FPN | caffe | 1x | 5.5 | | 39.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco/ga_faster_r50_caffe_fpn_1x_coco_20200702_000718-a11ccfe6.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco/ga_faster_r50_caffe_fpn_1x_coco_20200702_000718.log.json) |
|
32 |
-
| GA-Faster RCNN | R-101-FPN | caffe | 1x | 7.5 | | 41.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco/ga_faster_r101_caffe_fpn_1x_coco_bbox_mAP-0.415_20200505_115528-fb82e499.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco/ga_faster_r101_caffe_fpn_1x_coco_20200505_115528.log.json) |
|
33 |
-
| GA-Faster RCNN | X-101-32x4d-FPN | pytorch | 1x | 8.7 | 9.7 | 43.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco/ga_faster_x101_32x4d_fpn_1x_coco_20200215-1ded9da3.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco/ga_faster_x101_32x4d_fpn_1x_coco_20200215_184547.log.json) |
|
34 |
-
| GA-Faster RCNN | X-101-64x4d-FPN | pytorch | 1x | 11.8 | 7.3 | 43.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco/ga_faster_x101_64x4d_fpn_1x_coco_20200215-0fa7bde7.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco/ga_faster_x101_64x4d_fpn_1x_coco_20200215_104455.log.json) |
|
35 |
-
| GA-RetinaNet | R-50-FPN | caffe | 1x | 3.5 | 16.8 | 36.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco/ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco/ga_retinanet_r50_caffe_fpn_1x_coco_20201020_225450.log.json) |
|
36 |
-
| GA-RetinaNet | R-101-FPN | caffe | 1x | 5.5 | 12.9 | 39.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco/ga_retinanet_r101_caffe_fpn_1x_coco_20200531-6266453c.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco/ga_retinanet_r101_caffe_fpn_1x_coco_20200531_012847.log.json) |
|
37 |
-
| GA-RetinaNet | X-101-32x4d-FPN | pytorch | 1x | 6.9 | 10.6 | 40.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco/ga_retinanet_x101_32x4d_fpn_1x_coco_20200219-40c56caa.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco/ga_retinanet_x101_32x4d_fpn_1x_coco_20200219_223025.log.json) |
|
38 |
-
| GA-RetinaNet | X-101-64x4d-FPN | pytorch | 1x | 9.9 | 7.7 | 41.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco/ga_retinanet_x101_64x4d_fpn_1x_coco_20200226-ef9f7f1f.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco/ga_retinanet_x101_64x4d_fpn_1x_coco_20200226_221123.log.json) |
|
39 |
-
|
40 |
-
- In the Guided Anchoring paper, `score_thr` is set to 0.001 in Fast/Faster RCNN and 0.05 in RetinaNet for both baselines and Guided Anchoring.
|
41 |
-
|
42 |
-
- Performance on COCO test-dev benchmark are shown as follows.
|
43 |
-
|
44 |
-
| Method | Backbone | Style | Lr schd | Aug Train | Score thr | AP | AP_50 | AP_75 | AP_small | AP_medium | AP_large | Download |
|
45 |
-
| :------------: | :-------: | :---: | :-----: | :-------: | :-------: | :---: | :---: | :---: | :------: | :-------: | :------: | :------: |
|
46 |
-
| GA-Faster RCNN | R-101-FPN | caffe | 1x | F | 0.05 | | | | | | | |
|
47 |
-
| GA-Faster RCNN | R-101-FPN | caffe | 1x | F | 0.001 | | | | | | | |
|
48 |
-
| GA-RetinaNet | R-101-FPN | caffe | 1x | F | 0.05 | | | | | | | |
|
49 |
-
| GA-RetinaNet | R-101-FPN | caffe | 2x | T | 0.05 | | | | | | | |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/yolact_head.py
DELETED
@@ -1,943 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import torch
|
3 |
-
import torch.nn as nn
|
4 |
-
import torch.nn.functional as F
|
5 |
-
from mmcv.cnn import ConvModule, xavier_init
|
6 |
-
from mmcv.runner import force_fp32
|
7 |
-
|
8 |
-
from mmdet.core import build_sampler, fast_nms, images_to_levels, multi_apply
|
9 |
-
from ..builder import HEADS, build_loss
|
10 |
-
from .anchor_head import AnchorHead
|
11 |
-
|
12 |
-
|
13 |
-
@HEADS.register_module()
|
14 |
-
class YOLACTHead(AnchorHead):
|
15 |
-
"""YOLACT box head used in https://arxiv.org/abs/1904.02689.
|
16 |
-
|
17 |
-
Note that YOLACT head is a light version of RetinaNet head.
|
18 |
-
Four differences are described as follows:
|
19 |
-
|
20 |
-
1. YOLACT box head has three-times fewer anchors.
|
21 |
-
2. YOLACT box head shares the convs for box and cls branches.
|
22 |
-
3. YOLACT box head uses OHEM instead of Focal loss.
|
23 |
-
4. YOLACT box head predicts a set of mask coefficients for each box.
|
24 |
-
|
25 |
-
Args:
|
26 |
-
num_classes (int): Number of categories excluding the background
|
27 |
-
category.
|
28 |
-
in_channels (int): Number of channels in the input feature map.
|
29 |
-
anchor_generator (dict): Config dict for anchor generator
|
30 |
-
loss_cls (dict): Config of classification loss.
|
31 |
-
loss_bbox (dict): Config of localization loss.
|
32 |
-
num_head_convs (int): Number of the conv layers shared by
|
33 |
-
box and cls branches.
|
34 |
-
num_protos (int): Number of the mask coefficients.
|
35 |
-
use_ohem (bool): If true, ``loss_single_OHEM`` will be used for
|
36 |
-
cls loss calculation. If false, ``loss_single`` will be used.
|
37 |
-
conv_cfg (dict): Dictionary to construct and config conv layer.
|
38 |
-
norm_cfg (dict): Dictionary to construct and config norm layer.
|
39 |
-
"""
|
40 |
-
|
41 |
-
def __init__(self,
|
42 |
-
num_classes,
|
43 |
-
in_channels,
|
44 |
-
anchor_generator=dict(
|
45 |
-
type='AnchorGenerator',
|
46 |
-
octave_base_scale=3,
|
47 |
-
scales_per_octave=1,
|
48 |
-
ratios=[0.5, 1.0, 2.0],
|
49 |
-
strides=[8, 16, 32, 64, 128]),
|
50 |
-
loss_cls=dict(
|
51 |
-
type='CrossEntropyLoss',
|
52 |
-
use_sigmoid=False,
|
53 |
-
reduction='none',
|
54 |
-
loss_weight=1.0),
|
55 |
-
loss_bbox=dict(
|
56 |
-
type='SmoothL1Loss', beta=1.0, loss_weight=1.5),
|
57 |
-
num_head_convs=1,
|
58 |
-
num_protos=32,
|
59 |
-
use_ohem=True,
|
60 |
-
conv_cfg=None,
|
61 |
-
norm_cfg=None,
|
62 |
-
**kwargs):
|
63 |
-
self.num_head_convs = num_head_convs
|
64 |
-
self.num_protos = num_protos
|
65 |
-
self.use_ohem = use_ohem
|
66 |
-
self.conv_cfg = conv_cfg
|
67 |
-
self.norm_cfg = norm_cfg
|
68 |
-
super(YOLACTHead, self).__init__(
|
69 |
-
num_classes,
|
70 |
-
in_channels,
|
71 |
-
loss_cls=loss_cls,
|
72 |
-
loss_bbox=loss_bbox,
|
73 |
-
anchor_generator=anchor_generator,
|
74 |
-
**kwargs)
|
75 |
-
if self.use_ohem:
|
76 |
-
sampler_cfg = dict(type='PseudoSampler')
|
77 |
-
self.sampler = build_sampler(sampler_cfg, context=self)
|
78 |
-
self.sampling = False
|
79 |
-
|
80 |
-
def _init_layers(self):
|
81 |
-
"""Initialize layers of the head."""
|
82 |
-
self.relu = nn.ReLU(inplace=True)
|
83 |
-
self.head_convs = nn.ModuleList()
|
84 |
-
for i in range(self.num_head_convs):
|
85 |
-
chn = self.in_channels if i == 0 else self.feat_channels
|
86 |
-
self.head_convs.append(
|
87 |
-
ConvModule(
|
88 |
-
chn,
|
89 |
-
self.feat_channels,
|
90 |
-
3,
|
91 |
-
stride=1,
|
92 |
-
padding=1,
|
93 |
-
conv_cfg=self.conv_cfg,
|
94 |
-
norm_cfg=self.norm_cfg))
|
95 |
-
self.conv_cls = nn.Conv2d(
|
96 |
-
self.feat_channels,
|
97 |
-
self.num_anchors * self.cls_out_channels,
|
98 |
-
3,
|
99 |
-
padding=1)
|
100 |
-
self.conv_reg = nn.Conv2d(
|
101 |
-
self.feat_channels, self.num_anchors * 4, 3, padding=1)
|
102 |
-
self.conv_coeff = nn.Conv2d(
|
103 |
-
self.feat_channels,
|
104 |
-
self.num_anchors * self.num_protos,
|
105 |
-
3,
|
106 |
-
padding=1)
|
107 |
-
|
108 |
-
def init_weights(self):
|
109 |
-
"""Initialize weights of the head."""
|
110 |
-
for m in self.head_convs:
|
111 |
-
xavier_init(m.conv, distribution='uniform', bias=0)
|
112 |
-
xavier_init(self.conv_cls, distribution='uniform', bias=0)
|
113 |
-
xavier_init(self.conv_reg, distribution='uniform', bias=0)
|
114 |
-
xavier_init(self.conv_coeff, distribution='uniform', bias=0)
|
115 |
-
|
116 |
-
def forward_single(self, x):
|
117 |
-
"""Forward feature of a single scale level.
|
118 |
-
|
119 |
-
Args:
|
120 |
-
x (Tensor): Features of a single scale level.
|
121 |
-
|
122 |
-
Returns:
|
123 |
-
tuple:
|
124 |
-
cls_score (Tensor): Cls scores for a single scale level \
|
125 |
-
the channels number is num_anchors * num_classes.
|
126 |
-
bbox_pred (Tensor): Box energies / deltas for a single scale \
|
127 |
-
level, the channels number is num_anchors * 4.
|
128 |
-
coeff_pred (Tensor): Mask coefficients for a single scale \
|
129 |
-
level, the channels number is num_anchors * num_protos.
|
130 |
-
"""
|
131 |
-
for head_conv in self.head_convs:
|
132 |
-
x = head_conv(x)
|
133 |
-
cls_score = self.conv_cls(x)
|
134 |
-
bbox_pred = self.conv_reg(x)
|
135 |
-
coeff_pred = self.conv_coeff(x).tanh()
|
136 |
-
return cls_score, bbox_pred, coeff_pred
|
137 |
-
|
138 |
-
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
|
139 |
-
def loss(self,
|
140 |
-
cls_scores,
|
141 |
-
bbox_preds,
|
142 |
-
gt_bboxes,
|
143 |
-
gt_labels,
|
144 |
-
img_metas,
|
145 |
-
gt_bboxes_ignore=None):
|
146 |
-
"""A combination of the func:``AnchorHead.loss`` and
|
147 |
-
func:``SSDHead.loss``.
|
148 |
-
|
149 |
-
When ``self.use_ohem == True``, it functions like ``SSDHead.loss``,
|
150 |
-
otherwise, it follows ``AnchorHead.loss``. Besides, it additionally
|
151 |
-
returns ``sampling_results``.
|
152 |
-
|
153 |
-
Args:
|
154 |
-
cls_scores (list[Tensor]): Box scores for each scale level
|
155 |
-
Has shape (N, num_anchors * num_classes, H, W)
|
156 |
-
bbox_preds (list[Tensor]): Box energies / deltas for each scale
|
157 |
-
level with shape (N, num_anchors * 4, H, W)
|
158 |
-
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
|
159 |
-
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
|
160 |
-
gt_labels (list[Tensor]): Class indices corresponding to each box
|
161 |
-
img_metas (list[dict]): Meta information of each image, e.g.,
|
162 |
-
image size, scaling factor, etc.
|
163 |
-
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
|
164 |
-
boxes can be ignored when computing the loss. Default: None
|
165 |
-
|
166 |
-
Returns:
|
167 |
-
tuple:
|
168 |
-
dict[str, Tensor]: A dictionary of loss components.
|
169 |
-
List[:obj:``SamplingResult``]: Sampler results for each image.
|
170 |
-
"""
|
171 |
-
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
|
172 |
-
assert len(featmap_sizes) == self.anchor_generator.num_levels
|
173 |
-
|
174 |
-
device = cls_scores[0].device
|
175 |
-
|
176 |
-
anchor_list, valid_flag_list = self.get_anchors(
|
177 |
-
featmap_sizes, img_metas, device=device)
|
178 |
-
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
|
179 |
-
cls_reg_targets = self.get_targets(
|
180 |
-
anchor_list,
|
181 |
-
valid_flag_list,
|
182 |
-
gt_bboxes,
|
183 |
-
img_metas,
|
184 |
-
gt_bboxes_ignore_list=gt_bboxes_ignore,
|
185 |
-
gt_labels_list=gt_labels,
|
186 |
-
label_channels=label_channels,
|
187 |
-
unmap_outputs=not self.use_ohem,
|
188 |
-
return_sampling_results=True)
|
189 |
-
if cls_reg_targets is None:
|
190 |
-
return None
|
191 |
-
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
|
192 |
-
num_total_pos, num_total_neg, sampling_results) = cls_reg_targets
|
193 |
-
|
194 |
-
if self.use_ohem:
|
195 |
-
num_images = len(img_metas)
|
196 |
-
all_cls_scores = torch.cat([
|
197 |
-
s.permute(0, 2, 3, 1).reshape(
|
198 |
-
num_images, -1, self.cls_out_channels) for s in cls_scores
|
199 |
-
], 1)
|
200 |
-
all_labels = torch.cat(labels_list, -1).view(num_images, -1)
|
201 |
-
all_label_weights = torch.cat(label_weights_list,
|
202 |
-
-1).view(num_images, -1)
|
203 |
-
all_bbox_preds = torch.cat([
|
204 |
-
b.permute(0, 2, 3, 1).reshape(num_images, -1, 4)
|
205 |
-
for b in bbox_preds
|
206 |
-
], -2)
|
207 |
-
all_bbox_targets = torch.cat(bbox_targets_list,
|
208 |
-
-2).view(num_images, -1, 4)
|
209 |
-
all_bbox_weights = torch.cat(bbox_weights_list,
|
210 |
-
-2).view(num_images, -1, 4)
|
211 |
-
|
212 |
-
# concat all level anchors to a single tensor
|
213 |
-
all_anchors = []
|
214 |
-
for i in range(num_images):
|
215 |
-
all_anchors.append(torch.cat(anchor_list[i]))
|
216 |
-
|
217 |
-
# check NaN and Inf
|
218 |
-
assert torch.isfinite(all_cls_scores).all().item(), \
|
219 |
-
'classification scores become infinite or NaN!'
|
220 |
-
assert torch.isfinite(all_bbox_preds).all().item(), \
|
221 |
-
'bbox predications become infinite or NaN!'
|
222 |
-
|
223 |
-
losses_cls, losses_bbox = multi_apply(
|
224 |
-
self.loss_single_OHEM,
|
225 |
-
all_cls_scores,
|
226 |
-
all_bbox_preds,
|
227 |
-
all_anchors,
|
228 |
-
all_labels,
|
229 |
-
all_label_weights,
|
230 |
-
all_bbox_targets,
|
231 |
-
all_bbox_weights,
|
232 |
-
num_total_samples=num_total_pos)
|
233 |
-
else:
|
234 |
-
num_total_samples = (
|
235 |
-
num_total_pos +
|
236 |
-
num_total_neg if self.sampling else num_total_pos)
|
237 |
-
|
238 |
-
# anchor number of multi levels
|
239 |
-
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
|
240 |
-
# concat all level anchors and flags to a single tensor
|
241 |
-
concat_anchor_list = []
|
242 |
-
for i in range(len(anchor_list)):
|
243 |
-
concat_anchor_list.append(torch.cat(anchor_list[i]))
|
244 |
-
all_anchor_list = images_to_levels(concat_anchor_list,
|
245 |
-
num_level_anchors)
|
246 |
-
losses_cls, losses_bbox = multi_apply(
|
247 |
-
self.loss_single,
|
248 |
-
cls_scores,
|
249 |
-
bbox_preds,
|
250 |
-
all_anchor_list,
|
251 |
-
labels_list,
|
252 |
-
label_weights_list,
|
253 |
-
bbox_targets_list,
|
254 |
-
bbox_weights_list,
|
255 |
-
num_total_samples=num_total_samples)
|
256 |
-
|
257 |
-
return dict(
|
258 |
-
loss_cls=losses_cls, loss_bbox=losses_bbox), sampling_results
|
259 |
-
|
260 |
-
def loss_single_OHEM(self, cls_score, bbox_pred, anchors, labels,
|
261 |
-
label_weights, bbox_targets, bbox_weights,
|
262 |
-
num_total_samples):
|
263 |
-
""""See func:``SSDHead.loss``."""
|
264 |
-
loss_cls_all = self.loss_cls(cls_score, labels, label_weights)
|
265 |
-
|
266 |
-
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
|
267 |
-
pos_inds = ((labels >= 0) & (labels < self.num_classes)).nonzero(
|
268 |
-
as_tuple=False).reshape(-1)
|
269 |
-
neg_inds = (labels == self.num_classes).nonzero(
|
270 |
-
as_tuple=False).view(-1)
|
271 |
-
|
272 |
-
num_pos_samples = pos_inds.size(0)
|
273 |
-
if num_pos_samples == 0:
|
274 |
-
num_neg_samples = neg_inds.size(0)
|
275 |
-
else:
|
276 |
-
num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples
|
277 |
-
if num_neg_samples > neg_inds.size(0):
|
278 |
-
num_neg_samples = neg_inds.size(0)
|
279 |
-
topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples)
|
280 |
-
loss_cls_pos = loss_cls_all[pos_inds].sum()
|
281 |
-
loss_cls_neg = topk_loss_cls_neg.sum()
|
282 |
-
loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples
|
283 |
-
if self.reg_decoded_bbox:
|
284 |
-
# When the regression loss (e.g. `IouLoss`, `GIouLoss`)
|
285 |
-
# is applied directly on the decoded bounding boxes, it
|
286 |
-
# decodes the already encoded coordinates to absolute format.
|
287 |
-
bbox_pred = self.bbox_coder.decode(anchors, bbox_pred)
|
288 |
-
loss_bbox = self.loss_bbox(
|
289 |
-
bbox_pred,
|
290 |
-
bbox_targets,
|
291 |
-
bbox_weights,
|
292 |
-
avg_factor=num_total_samples)
|
293 |
-
return loss_cls[None], loss_bbox
|
294 |
-
|
295 |
-
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'coeff_preds'))
|
296 |
-
def get_bboxes(self,
|
297 |
-
cls_scores,
|
298 |
-
bbox_preds,
|
299 |
-
coeff_preds,
|
300 |
-
img_metas,
|
301 |
-
cfg=None,
|
302 |
-
rescale=False):
|
303 |
-
""""Similiar to func:``AnchorHead.get_bboxes``, but additionally
|
304 |
-
processes coeff_preds.
|
305 |
-
|
306 |
-
Args:
|
307 |
-
cls_scores (list[Tensor]): Box scores for each scale level
|
308 |
-
with shape (N, num_anchors * num_classes, H, W)
|
309 |
-
bbox_preds (list[Tensor]): Box energies / deltas for each scale
|
310 |
-
level with shape (N, num_anchors * 4, H, W)
|
311 |
-
coeff_preds (list[Tensor]): Mask coefficients for each scale
|
312 |
-
level with shape (N, num_anchors * num_protos, H, W)
|
313 |
-
img_metas (list[dict]): Meta information of each image, e.g.,
|
314 |
-
image size, scaling factor, etc.
|
315 |
-
cfg (mmcv.Config | None): Test / postprocessing configuration,
|
316 |
-
if None, test_cfg would be used
|
317 |
-
rescale (bool): If True, return boxes in original image space.
|
318 |
-
Default: False.
|
319 |
-
|
320 |
-
Returns:
|
321 |
-
list[tuple[Tensor, Tensor, Tensor]]: Each item in result_list is
|
322 |
-
a 3-tuple. The first item is an (n, 5) tensor, where the
|
323 |
-
first 4 columns are bounding box positions
|
324 |
-
(tl_x, tl_y, br_x, br_y) and the 5-th column is a score
|
325 |
-
between 0 and 1. The second item is an (n,) tensor where each
|
326 |
-
item is the predicted class label of the corresponding box.
|
327 |
-
The third item is an (n, num_protos) tensor where each item
|
328 |
-
is the predicted mask coefficients of instance inside the
|
329 |
-
corresponding box.
|
330 |
-
"""
|
331 |
-
assert len(cls_scores) == len(bbox_preds)
|
332 |
-
num_levels = len(cls_scores)
|
333 |
-
|
334 |
-
device = cls_scores[0].device
|
335 |
-
featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]
|
336 |
-
mlvl_anchors = self.anchor_generator.grid_anchors(
|
337 |
-
featmap_sizes, device=device)
|
338 |
-
|
339 |
-
det_bboxes = []
|
340 |
-
det_labels = []
|
341 |
-
det_coeffs = []
|
342 |
-
for img_id in range(len(img_metas)):
|
343 |
-
cls_score_list = [
|
344 |
-
cls_scores[i][img_id].detach() for i in range(num_levels)
|
345 |
-
]
|
346 |
-
bbox_pred_list = [
|
347 |
-
bbox_preds[i][img_id].detach() for i in range(num_levels)
|
348 |
-
]
|
349 |
-
coeff_pred_list = [
|
350 |
-
coeff_preds[i][img_id].detach() for i in range(num_levels)
|
351 |
-
]
|
352 |
-
img_shape = img_metas[img_id]['img_shape']
|
353 |
-
scale_factor = img_metas[img_id]['scale_factor']
|
354 |
-
bbox_res = self._get_bboxes_single(cls_score_list, bbox_pred_list,
|
355 |
-
coeff_pred_list, mlvl_anchors,
|
356 |
-
img_shape, scale_factor, cfg,
|
357 |
-
rescale)
|
358 |
-
det_bboxes.append(bbox_res[0])
|
359 |
-
det_labels.append(bbox_res[1])
|
360 |
-
det_coeffs.append(bbox_res[2])
|
361 |
-
return det_bboxes, det_labels, det_coeffs
|
362 |
-
|
363 |
-
def _get_bboxes_single(self,
|
364 |
-
cls_score_list,
|
365 |
-
bbox_pred_list,
|
366 |
-
coeff_preds_list,
|
367 |
-
mlvl_anchors,
|
368 |
-
img_shape,
|
369 |
-
scale_factor,
|
370 |
-
cfg,
|
371 |
-
rescale=False):
|
372 |
-
""""Similiar to func:``AnchorHead._get_bboxes_single``, but
|
373 |
-
additionally processes coeff_preds_list and uses fast NMS instead of
|
374 |
-
traditional NMS.
|
375 |
-
|
376 |
-
Args:
|
377 |
-
cls_score_list (list[Tensor]): Box scores for a single scale level
|
378 |
-
Has shape (num_anchors * num_classes, H, W).
|
379 |
-
bbox_pred_list (list[Tensor]): Box energies / deltas for a single
|
380 |
-
scale level with shape (num_anchors * 4, H, W).
|
381 |
-
coeff_preds_list (list[Tensor]): Mask coefficients for a single
|
382 |
-
scale level with shape (num_anchors * num_protos, H, W).
|
383 |
-
mlvl_anchors (list[Tensor]): Box reference for a single scale level
|
384 |
-
with shape (num_total_anchors, 4).
|
385 |
-
img_shape (tuple[int]): Shape of the input image,
|
386 |
-
(height, width, 3).
|
387 |
-
scale_factor (ndarray): Scale factor of the image arange as
|
388 |
-
(w_scale, h_scale, w_scale, h_scale).
|
389 |
-
cfg (mmcv.Config): Test / postprocessing configuration,
|
390 |
-
if None, test_cfg would be used.
|
391 |
-
rescale (bool): If True, return boxes in original image space.
|
392 |
-
|
393 |
-
Returns:
|
394 |
-
tuple[Tensor, Tensor, Tensor]: The first item is an (n, 5) tensor,
|
395 |
-
where the first 4 columns are bounding box positions
|
396 |
-
(tl_x, tl_y, br_x, br_y) and the 5-th column is a score between
|
397 |
-
0 and 1. The second item is an (n,) tensor where each item is
|
398 |
-
the predicted class label of the corresponding box. The third
|
399 |
-
item is an (n, num_protos) tensor where each item is the
|
400 |
-
predicted mask coefficients of instance inside the
|
401 |
-
corresponding box.
|
402 |
-
"""
|
403 |
-
cfg = self.test_cfg if cfg is None else cfg
|
404 |
-
assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors)
|
405 |
-
mlvl_bboxes = []
|
406 |
-
mlvl_scores = []
|
407 |
-
mlvl_coeffs = []
|
408 |
-
for cls_score, bbox_pred, coeff_pred, anchors in \
|
409 |
-
zip(cls_score_list, bbox_pred_list,
|
410 |
-
coeff_preds_list, mlvl_anchors):
|
411 |
-
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
|
412 |
-
cls_score = cls_score.permute(1, 2,
|
413 |
-
0).reshape(-1, self.cls_out_channels)
|
414 |
-
if self.use_sigmoid_cls:
|
415 |
-
scores = cls_score.sigmoid()
|
416 |
-
else:
|
417 |
-
scores = cls_score.softmax(-1)
|
418 |
-
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
|
419 |
-
coeff_pred = coeff_pred.permute(1, 2,
|
420 |
-
0).reshape(-1, self.num_protos)
|
421 |
-
nms_pre = cfg.get('nms_pre', -1)
|
422 |
-
if nms_pre > 0 and scores.shape[0] > nms_pre:
|
423 |
-
# Get maximum scores for foreground classes.
|
424 |
-
if self.use_sigmoid_cls:
|
425 |
-
max_scores, _ = scores.max(dim=1)
|
426 |
-
else:
|
427 |
-
# remind that we set FG labels to [0, num_class-1]
|
428 |
-
# since mmdet v2.0
|
429 |
-
# BG cat_id: num_class
|
430 |
-
max_scores, _ = scores[:, :-1].max(dim=1)
|
431 |
-
_, topk_inds = max_scores.topk(nms_pre)
|
432 |
-
anchors = anchors[topk_inds, :]
|
433 |
-
bbox_pred = bbox_pred[topk_inds, :]
|
434 |
-
scores = scores[topk_inds, :]
|
435 |
-
coeff_pred = coeff_pred[topk_inds, :]
|
436 |
-
bboxes = self.bbox_coder.decode(
|
437 |
-
anchors, bbox_pred, max_shape=img_shape)
|
438 |
-
mlvl_bboxes.append(bboxes)
|
439 |
-
mlvl_scores.append(scores)
|
440 |
-
mlvl_coeffs.append(coeff_pred)
|
441 |
-
mlvl_bboxes = torch.cat(mlvl_bboxes)
|
442 |
-
if rescale:
|
443 |
-
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
|
444 |
-
mlvl_scores = torch.cat(mlvl_scores)
|
445 |
-
mlvl_coeffs = torch.cat(mlvl_coeffs)
|
446 |
-
if self.use_sigmoid_cls:
|
447 |
-
# Add a dummy background class to the backend when using sigmoid
|
448 |
-
# remind that we set FG labels to [0, num_class-1] since mmdet v2.0
|
449 |
-
# BG cat_id: num_class
|
450 |
-
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
|
451 |
-
mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
|
452 |
-
det_bboxes, det_labels, det_coeffs = fast_nms(mlvl_bboxes, mlvl_scores,
|
453 |
-
mlvl_coeffs,
|
454 |
-
cfg.score_thr,
|
455 |
-
cfg.iou_thr, cfg.top_k,
|
456 |
-
cfg.max_per_img)
|
457 |
-
return det_bboxes, det_labels, det_coeffs
|
458 |
-
|
459 |
-
|
460 |
-
@HEADS.register_module()
|
461 |
-
class YOLACTSegmHead(nn.Module):
|
462 |
-
"""YOLACT segmentation head used in https://arxiv.org/abs/1904.02689.
|
463 |
-
|
464 |
-
Apply a semantic segmentation loss on feature space using layers that are
|
465 |
-
only evaluated during training to increase performance with no speed
|
466 |
-
penalty.
|
467 |
-
|
468 |
-
Args:
|
469 |
-
in_channels (int): Number of channels in the input feature map.
|
470 |
-
num_classes (int): Number of categories excluding the background
|
471 |
-
category.
|
472 |
-
loss_segm (dict): Config of semantic segmentation loss.
|
473 |
-
"""
|
474 |
-
|
475 |
-
def __init__(self,
|
476 |
-
num_classes,
|
477 |
-
in_channels=256,
|
478 |
-
loss_segm=dict(
|
479 |
-
type='CrossEntropyLoss',
|
480 |
-
use_sigmoid=True,
|
481 |
-
loss_weight=1.0)):
|
482 |
-
super(YOLACTSegmHead, self).__init__()
|
483 |
-
self.in_channels = in_channels
|
484 |
-
self.num_classes = num_classes
|
485 |
-
self.loss_segm = build_loss(loss_segm)
|
486 |
-
self._init_layers()
|
487 |
-
self.fp16_enabled = False
|
488 |
-
|
489 |
-
def _init_layers(self):
|
490 |
-
"""Initialize layers of the head."""
|
491 |
-
self.segm_conv = nn.Conv2d(
|
492 |
-
self.in_channels, self.num_classes, kernel_size=1)
|
493 |
-
|
494 |
-
def init_weights(self):
|
495 |
-
"""Initialize weights of the head."""
|
496 |
-
xavier_init(self.segm_conv, distribution='uniform')
|
497 |
-
|
498 |
-
def forward(self, x):
|
499 |
-
"""Forward feature from the upstream network.
|
500 |
-
|
501 |
-
Args:
|
502 |
-
x (Tensor): Feature from the upstream network, which is
|
503 |
-
a 4D-tensor.
|
504 |
-
|
505 |
-
Returns:
|
506 |
-
Tensor: Predicted semantic segmentation map with shape
|
507 |
-
(N, num_classes, H, W).
|
508 |
-
"""
|
509 |
-
return self.segm_conv(x)
|
510 |
-
|
511 |
-
@force_fp32(apply_to=('segm_pred', ))
|
512 |
-
def loss(self, segm_pred, gt_masks, gt_labels):
|
513 |
-
"""Compute loss of the head.
|
514 |
-
|
515 |
-
Args:
|
516 |
-
segm_pred (list[Tensor]): Predicted semantic segmentation map
|
517 |
-
with shape (N, num_classes, H, W).
|
518 |
-
gt_masks (list[Tensor]): Ground truth masks for each image with
|
519 |
-
the same shape of the input image.
|
520 |
-
gt_labels (list[Tensor]): Class indices corresponding to each box.
|
521 |
-
|
522 |
-
Returns:
|
523 |
-
dict[str, Tensor]: A dictionary of loss components.
|
524 |
-
"""
|
525 |
-
loss_segm = []
|
526 |
-
num_imgs, num_classes, mask_h, mask_w = segm_pred.size()
|
527 |
-
for idx in range(num_imgs):
|
528 |
-
cur_segm_pred = segm_pred[idx]
|
529 |
-
cur_gt_masks = gt_masks[idx].float()
|
530 |
-
cur_gt_labels = gt_labels[idx]
|
531 |
-
segm_targets = self.get_targets(cur_segm_pred, cur_gt_masks,
|
532 |
-
cur_gt_labels)
|
533 |
-
if segm_targets is None:
|
534 |
-
loss = self.loss_segm(cur_segm_pred,
|
535 |
-
torch.zeros_like(cur_segm_pred),
|
536 |
-
torch.zeros_like(cur_segm_pred))
|
537 |
-
else:
|
538 |
-
loss = self.loss_segm(
|
539 |
-
cur_segm_pred,
|
540 |
-
segm_targets,
|
541 |
-
avg_factor=num_imgs * mask_h * mask_w)
|
542 |
-
loss_segm.append(loss)
|
543 |
-
return dict(loss_segm=loss_segm)
|
544 |
-
|
545 |
-
def get_targets(self, segm_pred, gt_masks, gt_labels):
|
546 |
-
"""Compute semantic segmentation targets for each image.
|
547 |
-
|
548 |
-
Args:
|
549 |
-
segm_pred (Tensor): Predicted semantic segmentation map
|
550 |
-
with shape (num_classes, H, W).
|
551 |
-
gt_masks (Tensor): Ground truth masks for each image with
|
552 |
-
the same shape of the input image.
|
553 |
-
gt_labels (Tensor): Class indices corresponding to each box.
|
554 |
-
|
555 |
-
Returns:
|
556 |
-
Tensor: Semantic segmentation targets with shape
|
557 |
-
(num_classes, H, W).
|
558 |
-
"""
|
559 |
-
if gt_masks.size(0) == 0:
|
560 |
-
return None
|
561 |
-
num_classes, mask_h, mask_w = segm_pred.size()
|
562 |
-
with torch.no_grad():
|
563 |
-
downsampled_masks = F.interpolate(
|
564 |
-
gt_masks.unsqueeze(0), (mask_h, mask_w),
|
565 |
-
mode='bilinear',
|
566 |
-
align_corners=False).squeeze(0)
|
567 |
-
downsampled_masks = downsampled_masks.gt(0.5).float()
|
568 |
-
segm_targets = torch.zeros_like(segm_pred, requires_grad=False)
|
569 |
-
for obj_idx in range(downsampled_masks.size(0)):
|
570 |
-
segm_targets[gt_labels[obj_idx] - 1] = torch.max(
|
571 |
-
segm_targets[gt_labels[obj_idx] - 1],
|
572 |
-
downsampled_masks[obj_idx])
|
573 |
-
return segm_targets
|
574 |
-
|
575 |
-
|
576 |
-
@HEADS.register_module()
|
577 |
-
class YOLACTProtonet(nn.Module):
|
578 |
-
"""YOLACT mask head used in https://arxiv.org/abs/1904.02689.
|
579 |
-
|
580 |
-
This head outputs the mask prototypes for YOLACT.
|
581 |
-
|
582 |
-
Args:
|
583 |
-
in_channels (int): Number of channels in the input feature map.
|
584 |
-
proto_channels (tuple[int]): Output channels of protonet convs.
|
585 |
-
proto_kernel_sizes (tuple[int]): Kernel sizes of protonet convs.
|
586 |
-
include_last_relu (Bool): If keep the last relu of protonet.
|
587 |
-
num_protos (int): Number of prototypes.
|
588 |
-
num_classes (int): Number of categories excluding the background
|
589 |
-
category.
|
590 |
-
loss_mask_weight (float): Reweight the mask loss by this factor.
|
591 |
-
max_masks_to_train (int): Maximum number of masks to train for
|
592 |
-
each image.
|
593 |
-
"""
|
594 |
-
|
595 |
-
def __init__(self,
|
596 |
-
num_classes,
|
597 |
-
in_channels=256,
|
598 |
-
proto_channels=(256, 256, 256, None, 256, 32),
|
599 |
-
proto_kernel_sizes=(3, 3, 3, -2, 3, 1),
|
600 |
-
include_last_relu=True,
|
601 |
-
num_protos=32,
|
602 |
-
loss_mask_weight=1.0,
|
603 |
-
max_masks_to_train=100):
|
604 |
-
super(YOLACTProtonet, self).__init__()
|
605 |
-
self.in_channels = in_channels
|
606 |
-
self.proto_channels = proto_channels
|
607 |
-
self.proto_kernel_sizes = proto_kernel_sizes
|
608 |
-
self.include_last_relu = include_last_relu
|
609 |
-
self.protonet = self._init_layers()
|
610 |
-
|
611 |
-
self.loss_mask_weight = loss_mask_weight
|
612 |
-
self.num_protos = num_protos
|
613 |
-
self.num_classes = num_classes
|
614 |
-
self.max_masks_to_train = max_masks_to_train
|
615 |
-
self.fp16_enabled = False
|
616 |
-
|
617 |
-
def _init_layers(self):
|
618 |
-
"""A helper function to take a config setting and turn it into a
|
619 |
-
network."""
|
620 |
-
# Possible patterns:
|
621 |
-
# ( 256, 3) -> conv
|
622 |
-
# ( 256,-2) -> deconv
|
623 |
-
# (None,-2) -> bilinear interpolate
|
624 |
-
in_channels = self.in_channels
|
625 |
-
protonets = nn.ModuleList()
|
626 |
-
for num_channels, kernel_size in zip(self.proto_channels,
|
627 |
-
self.proto_kernel_sizes):
|
628 |
-
if kernel_size > 0:
|
629 |
-
layer = nn.Conv2d(
|
630 |
-
in_channels,
|
631 |
-
num_channels,
|
632 |
-
kernel_size,
|
633 |
-
padding=kernel_size // 2)
|
634 |
-
else:
|
635 |
-
if num_channels is None:
|
636 |
-
layer = InterpolateModule(
|
637 |
-
scale_factor=-kernel_size,
|
638 |
-
mode='bilinear',
|
639 |
-
align_corners=False)
|
640 |
-
else:
|
641 |
-
layer = nn.ConvTranspose2d(
|
642 |
-
in_channels,
|
643 |
-
num_channels,
|
644 |
-
-kernel_size,
|
645 |
-
padding=kernel_size // 2)
|
646 |
-
protonets.append(layer)
|
647 |
-
protonets.append(nn.ReLU(inplace=True))
|
648 |
-
in_channels = num_channels if num_channels is not None \
|
649 |
-
else in_channels
|
650 |
-
if not self.include_last_relu:
|
651 |
-
protonets = protonets[:-1]
|
652 |
-
return nn.Sequential(*protonets)
|
653 |
-
|
654 |
-
def init_weights(self):
|
655 |
-
"""Initialize weights of the head."""
|
656 |
-
for m in self.protonet:
|
657 |
-
if isinstance(m, nn.Conv2d):
|
658 |
-
xavier_init(m, distribution='uniform')
|
659 |
-
|
660 |
-
def forward(self, x, coeff_pred, bboxes, img_meta, sampling_results=None):
|
661 |
-
"""Forward feature from the upstream network to get prototypes and
|
662 |
-
linearly combine the prototypes, using masks coefficients, into
|
663 |
-
instance masks. Finally, crop the instance masks with given bboxes.
|
664 |
-
|
665 |
-
Args:
|
666 |
-
x (Tensor): Feature from the upstream network, which is
|
667 |
-
a 4D-tensor.
|
668 |
-
coeff_pred (list[Tensor]): Mask coefficients for each scale
|
669 |
-
level with shape (N, num_anchors * num_protos, H, W).
|
670 |
-
bboxes (list[Tensor]): Box used for cropping with shape
|
671 |
-
(N, num_anchors * 4, H, W). During training, they are
|
672 |
-
ground truth boxes. During testing, they are predicted
|
673 |
-
boxes.
|
674 |
-
img_meta (list[dict]): Meta information of each image, e.g.,
|
675 |
-
image size, scaling factor, etc.
|
676 |
-
sampling_results (List[:obj:``SamplingResult``]): Sampler results
|
677 |
-
for each image.
|
678 |
-
|
679 |
-
Returns:
|
680 |
-
list[Tensor]: Predicted instance segmentation masks.
|
681 |
-
"""
|
682 |
-
prototypes = self.protonet(x)
|
683 |
-
prototypes = prototypes.permute(0, 2, 3, 1).contiguous()
|
684 |
-
|
685 |
-
num_imgs = x.size(0)
|
686 |
-
# Training state
|
687 |
-
if self.training:
|
688 |
-
coeff_pred_list = []
|
689 |
-
for coeff_pred_per_level in coeff_pred:
|
690 |
-
coeff_pred_per_level = \
|
691 |
-
coeff_pred_per_level.permute(0, 2, 3, 1)\
|
692 |
-
.reshape(num_imgs, -1, self.num_protos)
|
693 |
-
coeff_pred_list.append(coeff_pred_per_level)
|
694 |
-
coeff_pred = torch.cat(coeff_pred_list, dim=1)
|
695 |
-
|
696 |
-
mask_pred_list = []
|
697 |
-
for idx in range(num_imgs):
|
698 |
-
cur_prototypes = prototypes[idx]
|
699 |
-
cur_coeff_pred = coeff_pred[idx]
|
700 |
-
cur_bboxes = bboxes[idx]
|
701 |
-
cur_img_meta = img_meta[idx]
|
702 |
-
|
703 |
-
# Testing state
|
704 |
-
if not self.training:
|
705 |
-
bboxes_for_cropping = cur_bboxes
|
706 |
-
else:
|
707 |
-
cur_sampling_results = sampling_results[idx]
|
708 |
-
pos_assigned_gt_inds = \
|
709 |
-
cur_sampling_results.pos_assigned_gt_inds
|
710 |
-
bboxes_for_cropping = cur_bboxes[pos_assigned_gt_inds].clone()
|
711 |
-
pos_inds = cur_sampling_results.pos_inds
|
712 |
-
cur_coeff_pred = cur_coeff_pred[pos_inds]
|
713 |
-
|
714 |
-
# Linearly combine the prototypes with the mask coefficients
|
715 |
-
mask_pred = cur_prototypes @ cur_coeff_pred.t()
|
716 |
-
mask_pred = torch.sigmoid(mask_pred)
|
717 |
-
|
718 |
-
h, w = cur_img_meta['img_shape'][:2]
|
719 |
-
bboxes_for_cropping[:, 0] /= w
|
720 |
-
bboxes_for_cropping[:, 1] /= h
|
721 |
-
bboxes_for_cropping[:, 2] /= w
|
722 |
-
bboxes_for_cropping[:, 3] /= h
|
723 |
-
|
724 |
-
mask_pred = self.crop(mask_pred, bboxes_for_cropping)
|
725 |
-
mask_pred = mask_pred.permute(2, 0, 1).contiguous()
|
726 |
-
mask_pred_list.append(mask_pred)
|
727 |
-
return mask_pred_list
|
728 |
-
|
729 |
-
@force_fp32(apply_to=('mask_pred', ))
|
730 |
-
def loss(self, mask_pred, gt_masks, gt_bboxes, img_meta, sampling_results):
|
731 |
-
"""Compute loss of the head.
|
732 |
-
|
733 |
-
Args:
|
734 |
-
mask_pred (list[Tensor]): Predicted prototypes with shape
|
735 |
-
(num_classes, H, W).
|
736 |
-
gt_masks (list[Tensor]): Ground truth masks for each image with
|
737 |
-
the same shape of the input image.
|
738 |
-
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
|
739 |
-
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
|
740 |
-
img_meta (list[dict]): Meta information of each image, e.g.,
|
741 |
-
image size, scaling factor, etc.
|
742 |
-
sampling_results (List[:obj:``SamplingResult``]): Sampler results
|
743 |
-
for each image.
|
744 |
-
|
745 |
-
Returns:
|
746 |
-
dict[str, Tensor]: A dictionary of loss components.
|
747 |
-
"""
|
748 |
-
loss_mask = []
|
749 |
-
num_imgs = len(mask_pred)
|
750 |
-
total_pos = 0
|
751 |
-
for idx in range(num_imgs):
|
752 |
-
cur_mask_pred = mask_pred[idx]
|
753 |
-
cur_gt_masks = gt_masks[idx].float()
|
754 |
-
cur_gt_bboxes = gt_bboxes[idx]
|
755 |
-
cur_img_meta = img_meta[idx]
|
756 |
-
cur_sampling_results = sampling_results[idx]
|
757 |
-
|
758 |
-
pos_assigned_gt_inds = cur_sampling_results.pos_assigned_gt_inds
|
759 |
-
num_pos = pos_assigned_gt_inds.size(0)
|
760 |
-
# Since we're producing (near) full image masks,
|
761 |
-
# it'd take too much vram to backprop on every single mask.
|
762 |
-
# Thus we select only a subset.
|
763 |
-
if num_pos > self.max_masks_to_train:
|
764 |
-
perm = torch.randperm(num_pos)
|
765 |
-
select = perm[:self.max_masks_to_train]
|
766 |
-
cur_mask_pred = cur_mask_pred[select]
|
767 |
-
pos_assigned_gt_inds = pos_assigned_gt_inds[select]
|
768 |
-
num_pos = self.max_masks_to_train
|
769 |
-
total_pos += num_pos
|
770 |
-
|
771 |
-
gt_bboxes_for_reweight = cur_gt_bboxes[pos_assigned_gt_inds]
|
772 |
-
|
773 |
-
mask_targets = self.get_targets(cur_mask_pred, cur_gt_masks,
|
774 |
-
pos_assigned_gt_inds)
|
775 |
-
if num_pos == 0:
|
776 |
-
loss = cur_mask_pred.sum() * 0.
|
777 |
-
elif mask_targets is None:
|
778 |
-
loss = F.binary_cross_entropy(cur_mask_pred,
|
779 |
-
torch.zeros_like(cur_mask_pred),
|
780 |
-
torch.zeros_like(cur_mask_pred))
|
781 |
-
else:
|
782 |
-
cur_mask_pred = torch.clamp(cur_mask_pred, 0, 1)
|
783 |
-
loss = F.binary_cross_entropy(
|
784 |
-
cur_mask_pred, mask_targets,
|
785 |
-
reduction='none') * self.loss_mask_weight
|
786 |
-
|
787 |
-
h, w = cur_img_meta['img_shape'][:2]
|
788 |
-
gt_bboxes_width = (gt_bboxes_for_reweight[:, 2] -
|
789 |
-
gt_bboxes_for_reweight[:, 0]) / w
|
790 |
-
gt_bboxes_height = (gt_bboxes_for_reweight[:, 3] -
|
791 |
-
gt_bboxes_for_reweight[:, 1]) / h
|
792 |
-
loss = loss.mean(dim=(1,
|
793 |
-
2)) / gt_bboxes_width / gt_bboxes_height
|
794 |
-
loss = torch.sum(loss)
|
795 |
-
loss_mask.append(loss)
|
796 |
-
|
797 |
-
if total_pos == 0:
|
798 |
-
total_pos += 1 # avoid nan
|
799 |
-
loss_mask = [x / total_pos for x in loss_mask]
|
800 |
-
|
801 |
-
return dict(loss_mask=loss_mask)
|
802 |
-
|
803 |
-
def get_targets(self, mask_pred, gt_masks, pos_assigned_gt_inds):
|
804 |
-
"""Compute instance segmentation targets for each image.
|
805 |
-
|
806 |
-
Args:
|
807 |
-
mask_pred (Tensor): Predicted prototypes with shape
|
808 |
-
(num_classes, H, W).
|
809 |
-
gt_masks (Tensor): Ground truth masks for each image with
|
810 |
-
the same shape of the input image.
|
811 |
-
pos_assigned_gt_inds (Tensor): GT indices of the corresponding
|
812 |
-
positive samples.
|
813 |
-
Returns:
|
814 |
-
Tensor: Instance segmentation targets with shape
|
815 |
-
(num_instances, H, W).
|
816 |
-
"""
|
817 |
-
if gt_masks.size(0) == 0:
|
818 |
-
return None
|
819 |
-
mask_h, mask_w = mask_pred.shape[-2:]
|
820 |
-
gt_masks = F.interpolate(
|
821 |
-
gt_masks.unsqueeze(0), (mask_h, mask_w),
|
822 |
-
mode='bilinear',
|
823 |
-
align_corners=False).squeeze(0)
|
824 |
-
gt_masks = gt_masks.gt(0.5).float()
|
825 |
-
mask_targets = gt_masks[pos_assigned_gt_inds]
|
826 |
-
return mask_targets
|
827 |
-
|
828 |
-
def get_seg_masks(self, mask_pred, label_pred, img_meta, rescale):
|
829 |
-
"""Resize, binarize, and format the instance mask predictions.
|
830 |
-
|
831 |
-
Args:
|
832 |
-
mask_pred (Tensor): shape (N, H, W).
|
833 |
-
label_pred (Tensor): shape (N, ).
|
834 |
-
img_meta (dict): Meta information of each image, e.g.,
|
835 |
-
image size, scaling factor, etc.
|
836 |
-
rescale (bool): If rescale is False, then returned masks will
|
837 |
-
fit the scale of imgs[0].
|
838 |
-
Returns:
|
839 |
-
list[ndarray]: Mask predictions grouped by their predicted classes.
|
840 |
-
"""
|
841 |
-
ori_shape = img_meta['ori_shape']
|
842 |
-
scale_factor = img_meta['scale_factor']
|
843 |
-
if rescale:
|
844 |
-
img_h, img_w = ori_shape[:2]
|
845 |
-
else:
|
846 |
-
img_h = np.round(ori_shape[0] * scale_factor[1]).astype(np.int32)
|
847 |
-
img_w = np.round(ori_shape[1] * scale_factor[0]).astype(np.int32)
|
848 |
-
|
849 |
-
cls_segms = [[] for _ in range(self.num_classes)]
|
850 |
-
if mask_pred.size(0) == 0:
|
851 |
-
return cls_segms
|
852 |
-
|
853 |
-
mask_pred = F.interpolate(
|
854 |
-
mask_pred.unsqueeze(0), (img_h, img_w),
|
855 |
-
mode='bilinear',
|
856 |
-
align_corners=False).squeeze(0) > 0.5
|
857 |
-
mask_pred = mask_pred.cpu().numpy().astype(np.uint8)
|
858 |
-
|
859 |
-
for m, l in zip(mask_pred, label_pred):
|
860 |
-
cls_segms[l].append(m)
|
861 |
-
return cls_segms
|
862 |
-
|
863 |
-
def crop(self, masks, boxes, padding=1):
|
864 |
-
"""Crop predicted masks by zeroing out everything not in the predicted
|
865 |
-
bbox.
|
866 |
-
|
867 |
-
Args:
|
868 |
-
masks (Tensor): shape [H, W, N].
|
869 |
-
boxes (Tensor): bbox coords in relative point form with
|
870 |
-
shape [N, 4].
|
871 |
-
|
872 |
-
Return:
|
873 |
-
Tensor: The cropped masks.
|
874 |
-
"""
|
875 |
-
h, w, n = masks.size()
|
876 |
-
x1, x2 = self.sanitize_coordinates(
|
877 |
-
boxes[:, 0], boxes[:, 2], w, padding, cast=False)
|
878 |
-
y1, y2 = self.sanitize_coordinates(
|
879 |
-
boxes[:, 1], boxes[:, 3], h, padding, cast=False)
|
880 |
-
|
881 |
-
rows = torch.arange(
|
882 |
-
w, device=masks.device, dtype=x1.dtype).view(1, -1,
|
883 |
-
1).expand(h, w, n)
|
884 |
-
cols = torch.arange(
|
885 |
-
h, device=masks.device, dtype=x1.dtype).view(-1, 1,
|
886 |
-
1).expand(h, w, n)
|
887 |
-
|
888 |
-
masks_left = rows >= x1.view(1, 1, -1)
|
889 |
-
masks_right = rows < x2.view(1, 1, -1)
|
890 |
-
masks_up = cols >= y1.view(1, 1, -1)
|
891 |
-
masks_down = cols < y2.view(1, 1, -1)
|
892 |
-
|
893 |
-
crop_mask = masks_left * masks_right * masks_up * masks_down
|
894 |
-
|
895 |
-
return masks * crop_mask.float()
|
896 |
-
|
897 |
-
def sanitize_coordinates(self, x1, x2, img_size, padding=0, cast=True):
|
898 |
-
"""Sanitizes the input coordinates so that x1 < x2, x1 != x2, x1 >= 0,
|
899 |
-
and x2 <= image_size. Also converts from relative to absolute
|
900 |
-
coordinates and casts the results to long tensors.
|
901 |
-
|
902 |
-
Warning: this does things in-place behind the scenes so
|
903 |
-
copy if necessary.
|
904 |
-
|
905 |
-
Args:
|
906 |
-
_x1 (Tensor): shape (N, ).
|
907 |
-
_x2 (Tensor): shape (N, ).
|
908 |
-
img_size (int): Size of the input image.
|
909 |
-
padding (int): x1 >= padding, x2 <= image_size-padding.
|
910 |
-
cast (bool): If cast is false, the result won't be cast to longs.
|
911 |
-
|
912 |
-
Returns:
|
913 |
-
tuple:
|
914 |
-
x1 (Tensor): Sanitized _x1.
|
915 |
-
x2 (Tensor): Sanitized _x2.
|
916 |
-
"""
|
917 |
-
x1 = x1 * img_size
|
918 |
-
x2 = x2 * img_size
|
919 |
-
if cast:
|
920 |
-
x1 = x1.long()
|
921 |
-
x2 = x2.long()
|
922 |
-
x1 = torch.min(x1, x2)
|
923 |
-
x2 = torch.max(x1, x2)
|
924 |
-
x1 = torch.clamp(x1 - padding, min=0)
|
925 |
-
x2 = torch.clamp(x2 + padding, max=img_size)
|
926 |
-
return x1, x2
|
927 |
-
|
928 |
-
|
929 |
-
class InterpolateModule(nn.Module):
|
930 |
-
"""This is a module version of F.interpolate.
|
931 |
-
|
932 |
-
Any arguments you give it just get passed along for the ride.
|
933 |
-
"""
|
934 |
-
|
935 |
-
def __init__(self, *args, **kwargs):
|
936 |
-
super().__init__()
|
937 |
-
|
938 |
-
self.args = args
|
939 |
-
self.kwargs = kwargs
|
940 |
-
|
941 |
-
def forward(self, x):
|
942 |
-
"""Forward features from the upstream network."""
|
943 |
-
return F.interpolate(x, *self.args, **self.kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/resnet.py
DELETED
@@ -1,316 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
import logging
|
3 |
-
|
4 |
-
import torch.nn as nn
|
5 |
-
import torch.utils.checkpoint as cp
|
6 |
-
|
7 |
-
from .utils import constant_init, kaiming_init
|
8 |
-
|
9 |
-
|
10 |
-
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
|
11 |
-
"""3x3 convolution with padding."""
|
12 |
-
return nn.Conv2d(
|
13 |
-
in_planes,
|
14 |
-
out_planes,
|
15 |
-
kernel_size=3,
|
16 |
-
stride=stride,
|
17 |
-
padding=dilation,
|
18 |
-
dilation=dilation,
|
19 |
-
bias=False)
|
20 |
-
|
21 |
-
|
22 |
-
class BasicBlock(nn.Module):
|
23 |
-
expansion = 1
|
24 |
-
|
25 |
-
def __init__(self,
|
26 |
-
inplanes,
|
27 |
-
planes,
|
28 |
-
stride=1,
|
29 |
-
dilation=1,
|
30 |
-
downsample=None,
|
31 |
-
style='pytorch',
|
32 |
-
with_cp=False):
|
33 |
-
super(BasicBlock, self).__init__()
|
34 |
-
assert style in ['pytorch', 'caffe']
|
35 |
-
self.conv1 = conv3x3(inplanes, planes, stride, dilation)
|
36 |
-
self.bn1 = nn.BatchNorm2d(planes)
|
37 |
-
self.relu = nn.ReLU(inplace=True)
|
38 |
-
self.conv2 = conv3x3(planes, planes)
|
39 |
-
self.bn2 = nn.BatchNorm2d(planes)
|
40 |
-
self.downsample = downsample
|
41 |
-
self.stride = stride
|
42 |
-
self.dilation = dilation
|
43 |
-
assert not with_cp
|
44 |
-
|
45 |
-
def forward(self, x):
|
46 |
-
residual = x
|
47 |
-
|
48 |
-
out = self.conv1(x)
|
49 |
-
out = self.bn1(out)
|
50 |
-
out = self.relu(out)
|
51 |
-
|
52 |
-
out = self.conv2(out)
|
53 |
-
out = self.bn2(out)
|
54 |
-
|
55 |
-
if self.downsample is not None:
|
56 |
-
residual = self.downsample(x)
|
57 |
-
|
58 |
-
out += residual
|
59 |
-
out = self.relu(out)
|
60 |
-
|
61 |
-
return out
|
62 |
-
|
63 |
-
|
64 |
-
class Bottleneck(nn.Module):
|
65 |
-
expansion = 4
|
66 |
-
|
67 |
-
def __init__(self,
|
68 |
-
inplanes,
|
69 |
-
planes,
|
70 |
-
stride=1,
|
71 |
-
dilation=1,
|
72 |
-
downsample=None,
|
73 |
-
style='pytorch',
|
74 |
-
with_cp=False):
|
75 |
-
"""Bottleneck block.
|
76 |
-
|
77 |
-
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
|
78 |
-
it is "caffe", the stride-two layer is the first 1x1 conv layer.
|
79 |
-
"""
|
80 |
-
super(Bottleneck, self).__init__()
|
81 |
-
assert style in ['pytorch', 'caffe']
|
82 |
-
if style == 'pytorch':
|
83 |
-
conv1_stride = 1
|
84 |
-
conv2_stride = stride
|
85 |
-
else:
|
86 |
-
conv1_stride = stride
|
87 |
-
conv2_stride = 1
|
88 |
-
self.conv1 = nn.Conv2d(
|
89 |
-
inplanes, planes, kernel_size=1, stride=conv1_stride, bias=False)
|
90 |
-
self.conv2 = nn.Conv2d(
|
91 |
-
planes,
|
92 |
-
planes,
|
93 |
-
kernel_size=3,
|
94 |
-
stride=conv2_stride,
|
95 |
-
padding=dilation,
|
96 |
-
dilation=dilation,
|
97 |
-
bias=False)
|
98 |
-
|
99 |
-
self.bn1 = nn.BatchNorm2d(planes)
|
100 |
-
self.bn2 = nn.BatchNorm2d(planes)
|
101 |
-
self.conv3 = nn.Conv2d(
|
102 |
-
planes, planes * self.expansion, kernel_size=1, bias=False)
|
103 |
-
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
|
104 |
-
self.relu = nn.ReLU(inplace=True)
|
105 |
-
self.downsample = downsample
|
106 |
-
self.stride = stride
|
107 |
-
self.dilation = dilation
|
108 |
-
self.with_cp = with_cp
|
109 |
-
|
110 |
-
def forward(self, x):
|
111 |
-
|
112 |
-
def _inner_forward(x):
|
113 |
-
residual = x
|
114 |
-
|
115 |
-
out = self.conv1(x)
|
116 |
-
out = self.bn1(out)
|
117 |
-
out = self.relu(out)
|
118 |
-
|
119 |
-
out = self.conv2(out)
|
120 |
-
out = self.bn2(out)
|
121 |
-
out = self.relu(out)
|
122 |
-
|
123 |
-
out = self.conv3(out)
|
124 |
-
out = self.bn3(out)
|
125 |
-
|
126 |
-
if self.downsample is not None:
|
127 |
-
residual = self.downsample(x)
|
128 |
-
|
129 |
-
out += residual
|
130 |
-
|
131 |
-
return out
|
132 |
-
|
133 |
-
if self.with_cp and x.requires_grad:
|
134 |
-
out = cp.checkpoint(_inner_forward, x)
|
135 |
-
else:
|
136 |
-
out = _inner_forward(x)
|
137 |
-
|
138 |
-
out = self.relu(out)
|
139 |
-
|
140 |
-
return out
|
141 |
-
|
142 |
-
|
143 |
-
def make_res_layer(block,
|
144 |
-
inplanes,
|
145 |
-
planes,
|
146 |
-
blocks,
|
147 |
-
stride=1,
|
148 |
-
dilation=1,
|
149 |
-
style='pytorch',
|
150 |
-
with_cp=False):
|
151 |
-
downsample = None
|
152 |
-
if stride != 1 or inplanes != planes * block.expansion:
|
153 |
-
downsample = nn.Sequential(
|
154 |
-
nn.Conv2d(
|
155 |
-
inplanes,
|
156 |
-
planes * block.expansion,
|
157 |
-
kernel_size=1,
|
158 |
-
stride=stride,
|
159 |
-
bias=False),
|
160 |
-
nn.BatchNorm2d(planes * block.expansion),
|
161 |
-
)
|
162 |
-
|
163 |
-
layers = []
|
164 |
-
layers.append(
|
165 |
-
block(
|
166 |
-
inplanes,
|
167 |
-
planes,
|
168 |
-
stride,
|
169 |
-
dilation,
|
170 |
-
downsample,
|
171 |
-
style=style,
|
172 |
-
with_cp=with_cp))
|
173 |
-
inplanes = planes * block.expansion
|
174 |
-
for _ in range(1, blocks):
|
175 |
-
layers.append(
|
176 |
-
block(inplanes, planes, 1, dilation, style=style, with_cp=with_cp))
|
177 |
-
|
178 |
-
return nn.Sequential(*layers)
|
179 |
-
|
180 |
-
|
181 |
-
class ResNet(nn.Module):
|
182 |
-
"""ResNet backbone.
|
183 |
-
|
184 |
-
Args:
|
185 |
-
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
|
186 |
-
num_stages (int): Resnet stages, normally 4.
|
187 |
-
strides (Sequence[int]): Strides of the first block of each stage.
|
188 |
-
dilations (Sequence[int]): Dilation of each stage.
|
189 |
-
out_indices (Sequence[int]): Output from which stages.
|
190 |
-
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
|
191 |
-
layer is the 3x3 conv layer, otherwise the stride-two layer is
|
192 |
-
the first 1x1 conv layer.
|
193 |
-
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
|
194 |
-
not freezing any parameters.
|
195 |
-
bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze
|
196 |
-
running stats (mean and var).
|
197 |
-
bn_frozen (bool): Whether to freeze weight and bias of BN layers.
|
198 |
-
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
|
199 |
-
memory while slowing down the training speed.
|
200 |
-
"""
|
201 |
-
|
202 |
-
arch_settings = {
|
203 |
-
18: (BasicBlock, (2, 2, 2, 2)),
|
204 |
-
34: (BasicBlock, (3, 4, 6, 3)),
|
205 |
-
50: (Bottleneck, (3, 4, 6, 3)),
|
206 |
-
101: (Bottleneck, (3, 4, 23, 3)),
|
207 |
-
152: (Bottleneck, (3, 8, 36, 3))
|
208 |
-
}
|
209 |
-
|
210 |
-
def __init__(self,
|
211 |
-
depth,
|
212 |
-
num_stages=4,
|
213 |
-
strides=(1, 2, 2, 2),
|
214 |
-
dilations=(1, 1, 1, 1),
|
215 |
-
out_indices=(0, 1, 2, 3),
|
216 |
-
style='pytorch',
|
217 |
-
frozen_stages=-1,
|
218 |
-
bn_eval=True,
|
219 |
-
bn_frozen=False,
|
220 |
-
with_cp=False):
|
221 |
-
super(ResNet, self).__init__()
|
222 |
-
if depth not in self.arch_settings:
|
223 |
-
raise KeyError(f'invalid depth {depth} for resnet')
|
224 |
-
assert num_stages >= 1 and num_stages <= 4
|
225 |
-
block, stage_blocks = self.arch_settings[depth]
|
226 |
-
stage_blocks = stage_blocks[:num_stages]
|
227 |
-
assert len(strides) == len(dilations) == num_stages
|
228 |
-
assert max(out_indices) < num_stages
|
229 |
-
|
230 |
-
self.out_indices = out_indices
|
231 |
-
self.style = style
|
232 |
-
self.frozen_stages = frozen_stages
|
233 |
-
self.bn_eval = bn_eval
|
234 |
-
self.bn_frozen = bn_frozen
|
235 |
-
self.with_cp = with_cp
|
236 |
-
|
237 |
-
self.inplanes = 64
|
238 |
-
self.conv1 = nn.Conv2d(
|
239 |
-
3, 64, kernel_size=7, stride=2, padding=3, bias=False)
|
240 |
-
self.bn1 = nn.BatchNorm2d(64)
|
241 |
-
self.relu = nn.ReLU(inplace=True)
|
242 |
-
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
|
243 |
-
|
244 |
-
self.res_layers = []
|
245 |
-
for i, num_blocks in enumerate(stage_blocks):
|
246 |
-
stride = strides[i]
|
247 |
-
dilation = dilations[i]
|
248 |
-
planes = 64 * 2**i
|
249 |
-
res_layer = make_res_layer(
|
250 |
-
block,
|
251 |
-
self.inplanes,
|
252 |
-
planes,
|
253 |
-
num_blocks,
|
254 |
-
stride=stride,
|
255 |
-
dilation=dilation,
|
256 |
-
style=self.style,
|
257 |
-
with_cp=with_cp)
|
258 |
-
self.inplanes = planes * block.expansion
|
259 |
-
layer_name = f'layer{i + 1}'
|
260 |
-
self.add_module(layer_name, res_layer)
|
261 |
-
self.res_layers.append(layer_name)
|
262 |
-
|
263 |
-
self.feat_dim = block.expansion * 64 * 2**(len(stage_blocks) - 1)
|
264 |
-
|
265 |
-
def init_weights(self, pretrained=None):
|
266 |
-
if isinstance(pretrained, str):
|
267 |
-
logger = logging.getLogger()
|
268 |
-
from ..runner import load_checkpoint
|
269 |
-
load_checkpoint(self, pretrained, strict=False, logger=logger)
|
270 |
-
elif pretrained is None:
|
271 |
-
for m in self.modules():
|
272 |
-
if isinstance(m, nn.Conv2d):
|
273 |
-
kaiming_init(m)
|
274 |
-
elif isinstance(m, nn.BatchNorm2d):
|
275 |
-
constant_init(m, 1)
|
276 |
-
else:
|
277 |
-
raise TypeError('pretrained must be a str or None')
|
278 |
-
|
279 |
-
def forward(self, x):
|
280 |
-
x = self.conv1(x)
|
281 |
-
x = self.bn1(x)
|
282 |
-
x = self.relu(x)
|
283 |
-
x = self.maxpool(x)
|
284 |
-
outs = []
|
285 |
-
for i, layer_name in enumerate(self.res_layers):
|
286 |
-
res_layer = getattr(self, layer_name)
|
287 |
-
x = res_layer(x)
|
288 |
-
if i in self.out_indices:
|
289 |
-
outs.append(x)
|
290 |
-
if len(outs) == 1:
|
291 |
-
return outs[0]
|
292 |
-
else:
|
293 |
-
return tuple(outs)
|
294 |
-
|
295 |
-
def train(self, mode=True):
|
296 |
-
super(ResNet, self).train(mode)
|
297 |
-
if self.bn_eval:
|
298 |
-
for m in self.modules():
|
299 |
-
if isinstance(m, nn.BatchNorm2d):
|
300 |
-
m.eval()
|
301 |
-
if self.bn_frozen:
|
302 |
-
for params in m.parameters():
|
303 |
-
params.requires_grad = False
|
304 |
-
if mode and self.frozen_stages >= 0:
|
305 |
-
for param in self.conv1.parameters():
|
306 |
-
param.requires_grad = False
|
307 |
-
for param in self.bn1.parameters():
|
308 |
-
param.requires_grad = False
|
309 |
-
self.bn1.eval()
|
310 |
-
self.bn1.weight.requires_grad = False
|
311 |
-
self.bn1.bias.requires_grad = False
|
312 |
-
for i in range(1, self.frozen_stages + 1):
|
313 |
-
mod = getattr(self, f'layer{i}')
|
314 |
-
mod.eval()
|
315 |
-
for param in mod.parameters():
|
316 |
-
param.requires_grad = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Apex-X/ROOPOK/app.py
DELETED
@@ -1,69 +0,0 @@
|
|
1 |
-
# -* coding:UTF-8 -*
|
2 |
-
# !/usr/bin/env python
|
3 |
-
import numpy as np
|
4 |
-
import gradio as gr
|
5 |
-
import roop.globals
|
6 |
-
from roop.core import (
|
7 |
-
start,
|
8 |
-
decode_execution_providers,
|
9 |
-
suggest_max_memory,
|
10 |
-
suggest_execution_threads,
|
11 |
-
)
|
12 |
-
from roop.processors.frame.core import get_frame_processors_modules
|
13 |
-
from roop.utilities import normalize_output_path
|
14 |
-
import os
|
15 |
-
from PIL import Image
|
16 |
-
|
17 |
-
|
18 |
-
def swap_face(source_file, target_file):
|
19 |
-
|
20 |
-
source_path = "input.jpg"
|
21 |
-
target_path = "target.jpg"
|
22 |
-
|
23 |
-
source_image = Image.fromarray(source_file)
|
24 |
-
source_image.save(source_path)
|
25 |
-
target_image = Image.fromarray(target_file)
|
26 |
-
target_image.save(target_path)
|
27 |
-
|
28 |
-
print("source_path: ", source_path)
|
29 |
-
print("target_path: ", target_path)
|
30 |
-
|
31 |
-
roop.globals.source_path = source_path
|
32 |
-
roop.globals.target_path = target_path
|
33 |
-
output_path = "output.jpg"
|
34 |
-
roop.globals.output_path = normalize_output_path(
|
35 |
-
roop.globals.source_path, roop.globals.target_path, output_path
|
36 |
-
)
|
37 |
-
roop.globals.frame_processors = ["face_swapper"]
|
38 |
-
roop.globals.headless = True
|
39 |
-
roop.globals.keep_fps = True
|
40 |
-
roop.globals.keep_audio = True
|
41 |
-
roop.globals.keep_frames = False
|
42 |
-
roop.globals.many_faces = False
|
43 |
-
roop.globals.video_encoder = "libx264"
|
44 |
-
roop.globals.video_quality = 18
|
45 |
-
roop.globals.max_memory = suggest_max_memory()
|
46 |
-
roop.globals.execution_providers = decode_execution_providers(["cpu"])
|
47 |
-
roop.globals.execution_threads = suggest_execution_threads()
|
48 |
-
|
49 |
-
print(
|
50 |
-
"start process",
|
51 |
-
roop.globals.source_path,
|
52 |
-
roop.globals.target_path,
|
53 |
-
roop.globals.output_path,
|
54 |
-
)
|
55 |
-
|
56 |
-
for frame_processor in get_frame_processors_modules(
|
57 |
-
roop.globals.frame_processors
|
58 |
-
):
|
59 |
-
if not frame_processor.pre_check():
|
60 |
-
return
|
61 |
-
|
62 |
-
start()
|
63 |
-
return output_path
|
64 |
-
|
65 |
-
|
66 |
-
app = gr.Interface(
|
67 |
-
fn=swap_face, inputs=[gr.Image(), gr.Image()], outputs="image"
|
68 |
-
)
|
69 |
-
app.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Apex-X/Tm/roop/processors/frame/__init__.py
DELETED
File without changes
|
spaces/ArkanDash/rvc-models-new/lib/infer_pack/modules/F0Predictor/__init__.py
DELETED
File without changes
|
spaces/Audio-AGI/AudioSep/models/CLAP/open_clip/openai.py
DELETED
@@ -1,156 +0,0 @@
|
|
1 |
-
""" OpenAI pretrained model functions
|
2 |
-
|
3 |
-
Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
|
4 |
-
"""
|
5 |
-
|
6 |
-
import os
|
7 |
-
import warnings
|
8 |
-
from typing import Union, List
|
9 |
-
|
10 |
-
import torch
|
11 |
-
|
12 |
-
from .model import build_model_from_openai_state_dict
|
13 |
-
from .pretrained import (
|
14 |
-
get_pretrained_url,
|
15 |
-
list_pretrained_tag_models,
|
16 |
-
download_pretrained,
|
17 |
-
)
|
18 |
-
|
19 |
-
__all__ = ["list_openai_models", "load_openai_model"]
|
20 |
-
|
21 |
-
|
22 |
-
def list_openai_models() -> List[str]:
|
23 |
-
"""Returns the names of available CLIP models"""
|
24 |
-
return list_pretrained_tag_models("openai")
|
25 |
-
|
26 |
-
|
27 |
-
def load_openai_model(
|
28 |
-
name: str,
|
29 |
-
model_cfg,
|
30 |
-
device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
|
31 |
-
jit=True,
|
32 |
-
cache_dir=os.path.expanduser("~/.cache/clip"),
|
33 |
-
enable_fusion: bool = False,
|
34 |
-
fusion_type: str = "None",
|
35 |
-
):
|
36 |
-
"""Load a CLIP model, preserve its text pretrained part, and set in the CLAP model
|
37 |
-
|
38 |
-
Parameters
|
39 |
-
----------
|
40 |
-
name : str
|
41 |
-
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
|
42 |
-
device : Union[str, torch.device]
|
43 |
-
The device to put the loaded model
|
44 |
-
jit : bool
|
45 |
-
Whether to load the optimized JIT model (default) or more hackable non-JIT model.
|
46 |
-
|
47 |
-
Returns
|
48 |
-
-------
|
49 |
-
model : torch.nn.Module
|
50 |
-
The CLAP model
|
51 |
-
preprocess : Callable[[PIL.Image], torch.Tensor]
|
52 |
-
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
|
53 |
-
"""
|
54 |
-
if get_pretrained_url(name, "openai"):
|
55 |
-
model_path = download_pretrained(
|
56 |
-
get_pretrained_url(name, "openai"), root=cache_dir
|
57 |
-
)
|
58 |
-
elif os.path.isfile(name):
|
59 |
-
model_path = name
|
60 |
-
else:
|
61 |
-
raise RuntimeError(
|
62 |
-
f"Model {name} not found; available models = {list_openai_models()}"
|
63 |
-
)
|
64 |
-
|
65 |
-
try:
|
66 |
-
# loading JIT archive
|
67 |
-
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
|
68 |
-
state_dict = None
|
69 |
-
except RuntimeError:
|
70 |
-
# loading saved state dict
|
71 |
-
if jit:
|
72 |
-
warnings.warn(
|
73 |
-
f"File {model_path} is not a JIT archive. Loading as a state dict instead"
|
74 |
-
)
|
75 |
-
jit = False
|
76 |
-
state_dict = torch.load(model_path, map_location="cpu")
|
77 |
-
|
78 |
-
if not jit:
|
79 |
-
try:
|
80 |
-
model = build_model_from_openai_state_dict(
|
81 |
-
state_dict or model.state_dict(), model_cfg, enable_fusion, fusion_type
|
82 |
-
).to(device)
|
83 |
-
except KeyError:
|
84 |
-
sd = {k[7:]: v for k, v in state_dict["state_dict"].items()}
|
85 |
-
model = build_model_from_openai_state_dict(
|
86 |
-
sd, model_cfg, enable_fusion, fusion_type
|
87 |
-
).to(device)
|
88 |
-
|
89 |
-
if str(device) == "cpu":
|
90 |
-
model.float()
|
91 |
-
return model
|
92 |
-
|
93 |
-
# patch the device names
|
94 |
-
device_holder = torch.jit.trace(
|
95 |
-
lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]
|
96 |
-
)
|
97 |
-
device_node = [
|
98 |
-
n
|
99 |
-
for n in device_holder.graph.findAllNodes("prim::Constant")
|
100 |
-
if "Device" in repr(n)
|
101 |
-
][-1]
|
102 |
-
|
103 |
-
def patch_device(module):
|
104 |
-
try:
|
105 |
-
graphs = [module.graph] if hasattr(module, "graph") else []
|
106 |
-
except RuntimeError:
|
107 |
-
graphs = []
|
108 |
-
|
109 |
-
if hasattr(module, "forward1"):
|
110 |
-
graphs.append(module.forward1.graph)
|
111 |
-
|
112 |
-
for graph in graphs:
|
113 |
-
for node in graph.findAllNodes("prim::Constant"):
|
114 |
-
if "value" in node.attributeNames() and str(node["value"]).startswith(
|
115 |
-
"cuda"
|
116 |
-
):
|
117 |
-
node.copyAttributes(device_node)
|
118 |
-
|
119 |
-
model.apply(patch_device)
|
120 |
-
patch_device(model.encode_audio)
|
121 |
-
patch_device(model.encode_text)
|
122 |
-
|
123 |
-
# patch dtype to float32 on CPU
|
124 |
-
if str(device) == "cpu":
|
125 |
-
float_holder = torch.jit.trace(
|
126 |
-
lambda: torch.ones([]).float(), example_inputs=[]
|
127 |
-
)
|
128 |
-
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
|
129 |
-
float_node = float_input.node()
|
130 |
-
|
131 |
-
def patch_float(module):
|
132 |
-
try:
|
133 |
-
graphs = [module.graph] if hasattr(module, "graph") else []
|
134 |
-
except RuntimeError:
|
135 |
-
graphs = []
|
136 |
-
|
137 |
-
if hasattr(module, "forward1"):
|
138 |
-
graphs.append(module.forward1.graph)
|
139 |
-
|
140 |
-
for graph in graphs:
|
141 |
-
for node in graph.findAllNodes("aten::to"):
|
142 |
-
inputs = list(node.inputs())
|
143 |
-
for i in [
|
144 |
-
1,
|
145 |
-
2,
|
146 |
-
]: # dtype can be the second or third argument to aten::to()
|
147 |
-
if inputs[i].node()["value"] == 5:
|
148 |
-
inputs[i].node().copyAttributes(float_node)
|
149 |
-
|
150 |
-
model.apply(patch_float)
|
151 |
-
patch_float(model.encode_audio)
|
152 |
-
patch_float(model.encode_text)
|
153 |
-
model.float()
|
154 |
-
|
155 |
-
model.audio_branch.audio_length = model.audio_cfg.audio_length
|
156 |
-
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Audio-AGI/AudioSep/models/CLAP/open_clip/pann_model.py
DELETED
@@ -1,704 +0,0 @@
|
|
1 |
-
# PANNs: Large-Scale Pretrained Audio Neural Networks for Audio Pattern Recognition
|
2 |
-
# Reference from https://github.com/qiuqiangkong/audioset_tagging_cnn
|
3 |
-
# Some layers are re-designed for CLAP
|
4 |
-
import os
|
5 |
-
|
6 |
-
os.environ["NUMBA_CACHE_DIR"] = "/tmp/"
|
7 |
-
|
8 |
-
import torch
|
9 |
-
import torch.nn as nn
|
10 |
-
import torch.nn.functional as F
|
11 |
-
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
|
12 |
-
from torchlibrosa.augmentation import SpecAugmentation
|
13 |
-
|
14 |
-
from .utils import do_mixup, interpolate, pad_framewise_output
|
15 |
-
from .feature_fusion import iAFF, AFF, DAF
|
16 |
-
|
17 |
-
|
18 |
-
def init_layer(layer):
|
19 |
-
"""Initialize a Linear or Convolutional layer."""
|
20 |
-
nn.init.xavier_uniform_(layer.weight)
|
21 |
-
|
22 |
-
if hasattr(layer, "bias"):
|
23 |
-
if layer.bias is not None:
|
24 |
-
layer.bias.data.fill_(0.0)
|
25 |
-
|
26 |
-
|
27 |
-
def init_bn(bn):
|
28 |
-
"""Initialize a Batchnorm layer."""
|
29 |
-
bn.bias.data.fill_(0.0)
|
30 |
-
bn.weight.data.fill_(1.0)
|
31 |
-
|
32 |
-
|
33 |
-
class ConvBlock(nn.Module):
|
34 |
-
def __init__(self, in_channels, out_channels):
|
35 |
-
|
36 |
-
super(ConvBlock, self).__init__()
|
37 |
-
|
38 |
-
self.conv1 = nn.Conv2d(
|
39 |
-
in_channels=in_channels,
|
40 |
-
out_channels=out_channels,
|
41 |
-
kernel_size=(3, 3),
|
42 |
-
stride=(1, 1),
|
43 |
-
padding=(1, 1),
|
44 |
-
bias=False,
|
45 |
-
)
|
46 |
-
|
47 |
-
self.conv2 = nn.Conv2d(
|
48 |
-
in_channels=out_channels,
|
49 |
-
out_channels=out_channels,
|
50 |
-
kernel_size=(3, 3),
|
51 |
-
stride=(1, 1),
|
52 |
-
padding=(1, 1),
|
53 |
-
bias=False,
|
54 |
-
)
|
55 |
-
|
56 |
-
self.bn1 = nn.BatchNorm2d(out_channels)
|
57 |
-
self.bn2 = nn.BatchNorm2d(out_channels)
|
58 |
-
|
59 |
-
self.init_weight()
|
60 |
-
|
61 |
-
def init_weight(self):
|
62 |
-
init_layer(self.conv1)
|
63 |
-
init_layer(self.conv2)
|
64 |
-
init_bn(self.bn1)
|
65 |
-
init_bn(self.bn2)
|
66 |
-
|
67 |
-
def forward(self, input, pool_size=(2, 2), pool_type="avg"):
|
68 |
-
|
69 |
-
x = input
|
70 |
-
x = F.relu_(self.bn1(self.conv1(x)))
|
71 |
-
x = F.relu_(self.bn2(self.conv2(x)))
|
72 |
-
if pool_type == "max":
|
73 |
-
x = F.max_pool2d(x, kernel_size=pool_size)
|
74 |
-
elif pool_type == "avg":
|
75 |
-
x = F.avg_pool2d(x, kernel_size=pool_size)
|
76 |
-
elif pool_type == "avg+max":
|
77 |
-
x1 = F.avg_pool2d(x, kernel_size=pool_size)
|
78 |
-
x2 = F.max_pool2d(x, kernel_size=pool_size)
|
79 |
-
x = x1 + x2
|
80 |
-
else:
|
81 |
-
raise Exception("Incorrect argument!")
|
82 |
-
|
83 |
-
return x
|
84 |
-
|
85 |
-
|
86 |
-
class ConvBlock5x5(nn.Module):
|
87 |
-
def __init__(self, in_channels, out_channels):
|
88 |
-
|
89 |
-
super(ConvBlock5x5, self).__init__()
|
90 |
-
|
91 |
-
self.conv1 = nn.Conv2d(
|
92 |
-
in_channels=in_channels,
|
93 |
-
out_channels=out_channels,
|
94 |
-
kernel_size=(5, 5),
|
95 |
-
stride=(1, 1),
|
96 |
-
padding=(2, 2),
|
97 |
-
bias=False,
|
98 |
-
)
|
99 |
-
|
100 |
-
self.bn1 = nn.BatchNorm2d(out_channels)
|
101 |
-
|
102 |
-
self.init_weight()
|
103 |
-
|
104 |
-
def init_weight(self):
|
105 |
-
init_layer(self.conv1)
|
106 |
-
init_bn(self.bn1)
|
107 |
-
|
108 |
-
def forward(self, input, pool_size=(2, 2), pool_type="avg"):
|
109 |
-
|
110 |
-
x = input
|
111 |
-
x = F.relu_(self.bn1(self.conv1(x)))
|
112 |
-
if pool_type == "max":
|
113 |
-
x = F.max_pool2d(x, kernel_size=pool_size)
|
114 |
-
elif pool_type == "avg":
|
115 |
-
x = F.avg_pool2d(x, kernel_size=pool_size)
|
116 |
-
elif pool_type == "avg+max":
|
117 |
-
x1 = F.avg_pool2d(x, kernel_size=pool_size)
|
118 |
-
x2 = F.max_pool2d(x, kernel_size=pool_size)
|
119 |
-
x = x1 + x2
|
120 |
-
else:
|
121 |
-
raise Exception("Incorrect argument!")
|
122 |
-
|
123 |
-
return x
|
124 |
-
|
125 |
-
|
126 |
-
class AttBlock(nn.Module):
|
127 |
-
def __init__(self, n_in, n_out, activation="linear", temperature=1.0):
|
128 |
-
super(AttBlock, self).__init__()
|
129 |
-
|
130 |
-
self.activation = activation
|
131 |
-
self.temperature = temperature
|
132 |
-
self.att = nn.Conv1d(
|
133 |
-
in_channels=n_in,
|
134 |
-
out_channels=n_out,
|
135 |
-
kernel_size=1,
|
136 |
-
stride=1,
|
137 |
-
padding=0,
|
138 |
-
bias=True,
|
139 |
-
)
|
140 |
-
self.cla = nn.Conv1d(
|
141 |
-
in_channels=n_in,
|
142 |
-
out_channels=n_out,
|
143 |
-
kernel_size=1,
|
144 |
-
stride=1,
|
145 |
-
padding=0,
|
146 |
-
bias=True,
|
147 |
-
)
|
148 |
-
|
149 |
-
self.bn_att = nn.BatchNorm1d(n_out)
|
150 |
-
self.init_weights()
|
151 |
-
|
152 |
-
def init_weights(self):
|
153 |
-
init_layer(self.att)
|
154 |
-
init_layer(self.cla)
|
155 |
-
init_bn(self.bn_att)
|
156 |
-
|
157 |
-
def forward(self, x):
|
158 |
-
# x: (n_samples, n_in, n_time)
|
159 |
-
norm_att = torch.softmax(torch.clamp(self.att(x), -10, 10), dim=-1)
|
160 |
-
cla = self.nonlinear_transform(self.cla(x))
|
161 |
-
x = torch.sum(norm_att * cla, dim=2)
|
162 |
-
return x, norm_att, cla
|
163 |
-
|
164 |
-
def nonlinear_transform(self, x):
|
165 |
-
if self.activation == "linear":
|
166 |
-
return x
|
167 |
-
elif self.activation == "sigmoid":
|
168 |
-
return torch.sigmoid(x)
|
169 |
-
|
170 |
-
|
171 |
-
class Cnn14(nn.Module):
|
172 |
-
def __init__(
|
173 |
-
self,
|
174 |
-
sample_rate,
|
175 |
-
window_size,
|
176 |
-
hop_size,
|
177 |
-
mel_bins,
|
178 |
-
fmin,
|
179 |
-
fmax,
|
180 |
-
classes_num,
|
181 |
-
enable_fusion=False,
|
182 |
-
fusion_type="None",
|
183 |
-
):
|
184 |
-
|
185 |
-
super(Cnn14, self).__init__()
|
186 |
-
|
187 |
-
window = "hann"
|
188 |
-
center = True
|
189 |
-
pad_mode = "reflect"
|
190 |
-
ref = 1.0
|
191 |
-
amin = 1e-10
|
192 |
-
top_db = None
|
193 |
-
|
194 |
-
self.enable_fusion = enable_fusion
|
195 |
-
self.fusion_type = fusion_type
|
196 |
-
|
197 |
-
# Spectrogram extractor
|
198 |
-
self.spectrogram_extractor = Spectrogram(
|
199 |
-
n_fft=window_size,
|
200 |
-
hop_length=hop_size,
|
201 |
-
win_length=window_size,
|
202 |
-
window=window,
|
203 |
-
center=center,
|
204 |
-
pad_mode=pad_mode,
|
205 |
-
freeze_parameters=True,
|
206 |
-
)
|
207 |
-
|
208 |
-
# Logmel feature extractor
|
209 |
-
self.logmel_extractor = LogmelFilterBank(
|
210 |
-
sr=sample_rate,
|
211 |
-
n_fft=window_size,
|
212 |
-
n_mels=mel_bins,
|
213 |
-
fmin=fmin,
|
214 |
-
fmax=fmax,
|
215 |
-
ref=ref,
|
216 |
-
amin=amin,
|
217 |
-
top_db=top_db,
|
218 |
-
freeze_parameters=True,
|
219 |
-
)
|
220 |
-
|
221 |
-
# Spec augmenter
|
222 |
-
self.spec_augmenter = SpecAugmentation(
|
223 |
-
time_drop_width=64,
|
224 |
-
time_stripes_num=2,
|
225 |
-
freq_drop_width=8,
|
226 |
-
freq_stripes_num=2,
|
227 |
-
)
|
228 |
-
|
229 |
-
self.bn0 = nn.BatchNorm2d(64)
|
230 |
-
|
231 |
-
if (self.enable_fusion) and (self.fusion_type == "channel_map"):
|
232 |
-
self.conv_block1 = ConvBlock(in_channels=4, out_channels=64)
|
233 |
-
else:
|
234 |
-
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
|
235 |
-
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
|
236 |
-
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
|
237 |
-
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
|
238 |
-
self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
|
239 |
-
self.conv_block6 = ConvBlock(in_channels=1024, out_channels=2048)
|
240 |
-
|
241 |
-
self.fc1 = nn.Linear(2048, 2048, bias=True)
|
242 |
-
self.fc_audioset = nn.Linear(2048, classes_num, bias=True)
|
243 |
-
|
244 |
-
if (self.enable_fusion) and (
|
245 |
-
self.fusion_type in ["daf_1d", "aff_1d", "iaff_1d"]
|
246 |
-
):
|
247 |
-
self.mel_conv1d = nn.Sequential(
|
248 |
-
nn.Conv1d(64, 64, kernel_size=5, stride=3, padding=2),
|
249 |
-
nn.BatchNorm1d(64), # No Relu
|
250 |
-
)
|
251 |
-
if self.fusion_type == "daf_1d":
|
252 |
-
self.fusion_model = DAF()
|
253 |
-
elif self.fusion_type == "aff_1d":
|
254 |
-
self.fusion_model = AFF(channels=64, type="1D")
|
255 |
-
elif self.fusion_type == "iaff_1d":
|
256 |
-
self.fusion_model = iAFF(channels=64, type="1D")
|
257 |
-
|
258 |
-
if (self.enable_fusion) and (
|
259 |
-
self.fusion_type in ["daf_2d", "aff_2d", "iaff_2d"]
|
260 |
-
):
|
261 |
-
self.mel_conv2d = nn.Sequential(
|
262 |
-
nn.Conv2d(1, 64, kernel_size=(5, 5), stride=(6, 2), padding=(2, 2)),
|
263 |
-
nn.BatchNorm2d(64),
|
264 |
-
nn.ReLU(inplace=True),
|
265 |
-
)
|
266 |
-
|
267 |
-
if self.fusion_type == "daf_2d":
|
268 |
-
self.fusion_model = DAF()
|
269 |
-
elif self.fusion_type == "aff_2d":
|
270 |
-
self.fusion_model = AFF(channels=64, type="2D")
|
271 |
-
elif self.fusion_type == "iaff_2d":
|
272 |
-
self.fusion_model = iAFF(channels=64, type="2D")
|
273 |
-
self.init_weight()
|
274 |
-
|
275 |
-
def init_weight(self):
|
276 |
-
init_bn(self.bn0)
|
277 |
-
init_layer(self.fc1)
|
278 |
-
init_layer(self.fc_audioset)
|
279 |
-
|
280 |
-
def forward(self, input, mixup_lambda=None, device=None):
|
281 |
-
"""
|
282 |
-
Input: (batch_size, data_length)"""
|
283 |
-
|
284 |
-
if self.enable_fusion and input["longer"].sum() == 0:
|
285 |
-
# if no audio is longer than 10s, then randomly select one audio to be longer
|
286 |
-
input["longer"][torch.randint(0, input["longer"].shape[0], (1,))] = True
|
287 |
-
|
288 |
-
if not self.enable_fusion:
|
289 |
-
x = self.spectrogram_extractor(
|
290 |
-
input["waveform"].to(device=device, non_blocking=True)
|
291 |
-
) # (batch_size, 1, time_steps, freq_bins)
|
292 |
-
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
|
293 |
-
|
294 |
-
x = x.transpose(1, 3)
|
295 |
-
x = self.bn0(x)
|
296 |
-
x = x.transpose(1, 3)
|
297 |
-
else:
|
298 |
-
longer_list = input["longer"].to(device=device, non_blocking=True)
|
299 |
-
x = input["mel_fusion"].to(device=device, non_blocking=True)
|
300 |
-
longer_list_idx = torch.where(longer_list)[0]
|
301 |
-
x = x.transpose(1, 3)
|
302 |
-
x = self.bn0(x)
|
303 |
-
x = x.transpose(1, 3)
|
304 |
-
if self.fusion_type in ["daf_1d", "aff_1d", "iaff_1d"]:
|
305 |
-
new_x = x[:, 0:1, :, :].clone().contiguous()
|
306 |
-
# local processing
|
307 |
-
if len(longer_list_idx) > 0:
|
308 |
-
fusion_x_local = x[longer_list_idx, 1:, :, :].clone().contiguous()
|
309 |
-
FB, FC, FT, FF = fusion_x_local.size()
|
310 |
-
fusion_x_local = fusion_x_local.view(FB * FC, FT, FF)
|
311 |
-
fusion_x_local = torch.permute(
|
312 |
-
fusion_x_local, (0, 2, 1)
|
313 |
-
).contiguous()
|
314 |
-
fusion_x_local = self.mel_conv1d(fusion_x_local)
|
315 |
-
fusion_x_local = fusion_x_local.view(
|
316 |
-
FB, FC, FF, fusion_x_local.size(-1)
|
317 |
-
)
|
318 |
-
fusion_x_local = (
|
319 |
-
torch.permute(fusion_x_local, (0, 2, 1, 3))
|
320 |
-
.contiguous()
|
321 |
-
.flatten(2)
|
322 |
-
)
|
323 |
-
if fusion_x_local.size(-1) < FT:
|
324 |
-
fusion_x_local = torch.cat(
|
325 |
-
[
|
326 |
-
fusion_x_local,
|
327 |
-
torch.zeros(
|
328 |
-
(FB, FF, FT - fusion_x_local.size(-1)),
|
329 |
-
device=device,
|
330 |
-
),
|
331 |
-
],
|
332 |
-
dim=-1,
|
333 |
-
)
|
334 |
-
else:
|
335 |
-
fusion_x_local = fusion_x_local[:, :, :FT]
|
336 |
-
# 1D fusion
|
337 |
-
new_x = new_x.squeeze(1).permute((0, 2, 1)).contiguous()
|
338 |
-
new_x[longer_list_idx] = self.fusion_model(
|
339 |
-
new_x[longer_list_idx], fusion_x_local
|
340 |
-
)
|
341 |
-
x = new_x.permute((0, 2, 1)).contiguous()[:, None, :, :]
|
342 |
-
else:
|
343 |
-
x = new_x
|
344 |
-
elif self.fusion_type in ["daf_2d", "aff_2d", "iaff_2d", "channel_map"]:
|
345 |
-
x = x # no change
|
346 |
-
|
347 |
-
if self.training:
|
348 |
-
x = self.spec_augmenter(x)
|
349 |
-
# Mixup on spectrogram
|
350 |
-
if self.training and mixup_lambda is not None:
|
351 |
-
x = do_mixup(x, mixup_lambda)
|
352 |
-
if (self.enable_fusion) and (
|
353 |
-
self.fusion_type in ["daf_2d", "aff_2d", "iaff_2d"]
|
354 |
-
):
|
355 |
-
global_x = x[:, 0:1, :, :]
|
356 |
-
|
357 |
-
# global processing
|
358 |
-
B, C, H, W = global_x.shape
|
359 |
-
global_x = self.conv_block1(global_x, pool_size=(2, 2), pool_type="avg")
|
360 |
-
if len(longer_list_idx) > 0:
|
361 |
-
local_x = x[longer_list_idx, 1:, :, :].contiguous()
|
362 |
-
TH = global_x.size(-2)
|
363 |
-
# local processing
|
364 |
-
B, C, H, W = local_x.shape
|
365 |
-
local_x = local_x.view(B * C, 1, H, W)
|
366 |
-
local_x = self.mel_conv2d(local_x)
|
367 |
-
local_x = local_x.view(
|
368 |
-
B, C, local_x.size(1), local_x.size(2), local_x.size(3)
|
369 |
-
)
|
370 |
-
local_x = local_x.permute((0, 2, 1, 3, 4)).contiguous().flatten(2, 3)
|
371 |
-
TB, TC, _, TW = local_x.size()
|
372 |
-
if local_x.size(-2) < TH:
|
373 |
-
local_x = torch.cat(
|
374 |
-
[
|
375 |
-
local_x,
|
376 |
-
torch.zeros(
|
377 |
-
(TB, TC, TH - local_x.size(-2), TW),
|
378 |
-
device=global_x.device,
|
379 |
-
),
|
380 |
-
],
|
381 |
-
dim=-2,
|
382 |
-
)
|
383 |
-
else:
|
384 |
-
local_x = local_x[:, :, :TH, :]
|
385 |
-
|
386 |
-
global_x[longer_list_idx] = self.fusion_model(
|
387 |
-
global_x[longer_list_idx], local_x
|
388 |
-
)
|
389 |
-
x = global_x
|
390 |
-
else:
|
391 |
-
x = self.conv_block1(x, pool_size=(2, 2), pool_type="avg")
|
392 |
-
|
393 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
394 |
-
x = self.conv_block2(x, pool_size=(2, 2), pool_type="avg")
|
395 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
396 |
-
x = self.conv_block3(x, pool_size=(2, 2), pool_type="avg")
|
397 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
398 |
-
x = self.conv_block4(x, pool_size=(2, 2), pool_type="avg")
|
399 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
400 |
-
x = self.conv_block5(x, pool_size=(2, 2), pool_type="avg")
|
401 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
402 |
-
x = self.conv_block6(x, pool_size=(1, 1), pool_type="avg")
|
403 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
404 |
-
x = torch.mean(x, dim=3)
|
405 |
-
|
406 |
-
latent_x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1)
|
407 |
-
latent_x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1)
|
408 |
-
latent_x = latent_x1 + latent_x2
|
409 |
-
latent_x = latent_x.transpose(1, 2)
|
410 |
-
latent_x = F.relu_(self.fc1(latent_x))
|
411 |
-
latent_output = interpolate(latent_x, 32)
|
412 |
-
|
413 |
-
(x1, _) = torch.max(x, dim=2)
|
414 |
-
x2 = torch.mean(x, dim=2)
|
415 |
-
x = x1 + x2
|
416 |
-
x = F.dropout(x, p=0.5, training=self.training)
|
417 |
-
x = F.relu_(self.fc1(x))
|
418 |
-
embedding = F.dropout(x, p=0.5, training=self.training)
|
419 |
-
clipwise_output = torch.sigmoid(self.fc_audioset(x))
|
420 |
-
|
421 |
-
output_dict = {
|
422 |
-
"clipwise_output": clipwise_output,
|
423 |
-
"embedding": embedding,
|
424 |
-
"fine_grained_embedding": latent_output,
|
425 |
-
}
|
426 |
-
return output_dict
|
427 |
-
|
428 |
-
|
429 |
-
class Cnn6(nn.Module):
|
430 |
-
def __init__(
|
431 |
-
self,
|
432 |
-
sample_rate,
|
433 |
-
window_size,
|
434 |
-
hop_size,
|
435 |
-
mel_bins,
|
436 |
-
fmin,
|
437 |
-
fmax,
|
438 |
-
classes_num,
|
439 |
-
enable_fusion=False,
|
440 |
-
fusion_type="None",
|
441 |
-
):
|
442 |
-
|
443 |
-
super(Cnn6, self).__init__()
|
444 |
-
|
445 |
-
window = "hann"
|
446 |
-
center = True
|
447 |
-
pad_mode = "reflect"
|
448 |
-
ref = 1.0
|
449 |
-
amin = 1e-10
|
450 |
-
top_db = None
|
451 |
-
|
452 |
-
self.enable_fusion = enable_fusion
|
453 |
-
self.fusion_type = fusion_type
|
454 |
-
|
455 |
-
# Spectrogram extractor
|
456 |
-
self.spectrogram_extractor = Spectrogram(
|
457 |
-
n_fft=window_size,
|
458 |
-
hop_length=hop_size,
|
459 |
-
win_length=window_size,
|
460 |
-
window=window,
|
461 |
-
center=center,
|
462 |
-
pad_mode=pad_mode,
|
463 |
-
freeze_parameters=True,
|
464 |
-
)
|
465 |
-
|
466 |
-
# Logmel feature extractor
|
467 |
-
self.logmel_extractor = LogmelFilterBank(
|
468 |
-
sr=sample_rate,
|
469 |
-
n_fft=window_size,
|
470 |
-
n_mels=mel_bins,
|
471 |
-
fmin=fmin,
|
472 |
-
fmax=fmax,
|
473 |
-
ref=ref,
|
474 |
-
amin=amin,
|
475 |
-
top_db=top_db,
|
476 |
-
freeze_parameters=True,
|
477 |
-
)
|
478 |
-
|
479 |
-
# Spec augmenter
|
480 |
-
self.spec_augmenter = SpecAugmentation(
|
481 |
-
time_drop_width=64,
|
482 |
-
time_stripes_num=2,
|
483 |
-
freq_drop_width=8,
|
484 |
-
freq_stripes_num=2,
|
485 |
-
)
|
486 |
-
|
487 |
-
self.bn0 = nn.BatchNorm2d(64)
|
488 |
-
|
489 |
-
self.conv_block1 = ConvBlock5x5(in_channels=1, out_channels=64)
|
490 |
-
self.conv_block2 = ConvBlock5x5(in_channels=64, out_channels=128)
|
491 |
-
self.conv_block3 = ConvBlock5x5(in_channels=128, out_channels=256)
|
492 |
-
self.conv_block4 = ConvBlock5x5(in_channels=256, out_channels=512)
|
493 |
-
|
494 |
-
self.fc1 = nn.Linear(512, 512, bias=True)
|
495 |
-
self.fc_audioset = nn.Linear(512, classes_num, bias=True)
|
496 |
-
|
497 |
-
self.init_weight()
|
498 |
-
|
499 |
-
def init_weight(self):
|
500 |
-
init_bn(self.bn0)
|
501 |
-
init_layer(self.fc1)
|
502 |
-
init_layer(self.fc_audioset)
|
503 |
-
|
504 |
-
def forward(self, input, mixup_lambda=None, device=None):
|
505 |
-
"""
|
506 |
-
Input: (batch_size, data_length)"""
|
507 |
-
|
508 |
-
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
|
509 |
-
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
|
510 |
-
|
511 |
-
x = x.transpose(1, 3)
|
512 |
-
x = self.bn0(x)
|
513 |
-
x = x.transpose(1, 3)
|
514 |
-
|
515 |
-
if self.training:
|
516 |
-
x = self.spec_augmenter(x)
|
517 |
-
|
518 |
-
# Mixup on spectrogram
|
519 |
-
if self.training and mixup_lambda is not None:
|
520 |
-
x = do_mixup(x, mixup_lambda)
|
521 |
-
|
522 |
-
x = self.conv_block1(x, pool_size=(2, 2), pool_type="avg")
|
523 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
524 |
-
x = self.conv_block2(x, pool_size=(2, 2), pool_type="avg")
|
525 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
526 |
-
x = self.conv_block3(x, pool_size=(2, 2), pool_type="avg")
|
527 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
528 |
-
x = self.conv_block4(x, pool_size=(2, 2), pool_type="avg")
|
529 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
530 |
-
x = torch.mean(x, dim=3)
|
531 |
-
|
532 |
-
latent_x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1)
|
533 |
-
latent_x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1)
|
534 |
-
latent_x = latent_x1 + latent_x2
|
535 |
-
latent_x = latent_x.transpose(1, 2)
|
536 |
-
latent_x = F.relu_(self.fc1(latent_x))
|
537 |
-
latent_output = interpolate(latent_x, 16)
|
538 |
-
|
539 |
-
(x1, _) = torch.max(x, dim=2)
|
540 |
-
x2 = torch.mean(x, dim=2)
|
541 |
-
x = x1 + x2
|
542 |
-
x = F.dropout(x, p=0.5, training=self.training)
|
543 |
-
x = F.relu_(self.fc1(x))
|
544 |
-
embedding = F.dropout(x, p=0.5, training=self.training)
|
545 |
-
clipwise_output = torch.sigmoid(self.fc_audioset(x))
|
546 |
-
|
547 |
-
output_dict = {
|
548 |
-
"clipwise_output": clipwise_output,
|
549 |
-
"embedding": embedding,
|
550 |
-
"fine_grained_embedding": latent_output,
|
551 |
-
}
|
552 |
-
|
553 |
-
return output_dict
|
554 |
-
|
555 |
-
|
556 |
-
class Cnn10(nn.Module):
|
557 |
-
def __init__(
|
558 |
-
self,
|
559 |
-
sample_rate,
|
560 |
-
window_size,
|
561 |
-
hop_size,
|
562 |
-
mel_bins,
|
563 |
-
fmin,
|
564 |
-
fmax,
|
565 |
-
classes_num,
|
566 |
-
enable_fusion=False,
|
567 |
-
fusion_type="None",
|
568 |
-
):
|
569 |
-
|
570 |
-
super(Cnn10, self).__init__()
|
571 |
-
|
572 |
-
window = "hann"
|
573 |
-
center = True
|
574 |
-
pad_mode = "reflect"
|
575 |
-
ref = 1.0
|
576 |
-
amin = 1e-10
|
577 |
-
top_db = None
|
578 |
-
|
579 |
-
self.enable_fusion = enable_fusion
|
580 |
-
self.fusion_type = fusion_type
|
581 |
-
|
582 |
-
# Spectrogram extractor
|
583 |
-
self.spectrogram_extractor = Spectrogram(
|
584 |
-
n_fft=window_size,
|
585 |
-
hop_length=hop_size,
|
586 |
-
win_length=window_size,
|
587 |
-
window=window,
|
588 |
-
center=center,
|
589 |
-
pad_mode=pad_mode,
|
590 |
-
freeze_parameters=True,
|
591 |
-
)
|
592 |
-
|
593 |
-
# Logmel feature extractor
|
594 |
-
self.logmel_extractor = LogmelFilterBank(
|
595 |
-
sr=sample_rate,
|
596 |
-
n_fft=window_size,
|
597 |
-
n_mels=mel_bins,
|
598 |
-
fmin=fmin,
|
599 |
-
fmax=fmax,
|
600 |
-
ref=ref,
|
601 |
-
amin=amin,
|
602 |
-
top_db=top_db,
|
603 |
-
freeze_parameters=True,
|
604 |
-
)
|
605 |
-
|
606 |
-
# Spec augmenter
|
607 |
-
self.spec_augmenter = SpecAugmentation(
|
608 |
-
time_drop_width=64,
|
609 |
-
time_stripes_num=2,
|
610 |
-
freq_drop_width=8,
|
611 |
-
freq_stripes_num=2,
|
612 |
-
)
|
613 |
-
|
614 |
-
self.bn0 = nn.BatchNorm2d(64)
|
615 |
-
|
616 |
-
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
|
617 |
-
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
|
618 |
-
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
|
619 |
-
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
|
620 |
-
self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
|
621 |
-
|
622 |
-
self.fc1 = nn.Linear(1024, 1024, bias=True)
|
623 |
-
self.fc_audioset = nn.Linear(1024, classes_num, bias=True)
|
624 |
-
|
625 |
-
self.init_weight()
|
626 |
-
|
627 |
-
def init_weight(self):
|
628 |
-
init_bn(self.bn0)
|
629 |
-
init_layer(self.fc1)
|
630 |
-
init_layer(self.fc_audioset)
|
631 |
-
|
632 |
-
def forward(self, input, mixup_lambda=None, device=None):
|
633 |
-
"""
|
634 |
-
Input: (batch_size, data_length)"""
|
635 |
-
|
636 |
-
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
|
637 |
-
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
|
638 |
-
|
639 |
-
x = x.transpose(1, 3)
|
640 |
-
x = self.bn0(x)
|
641 |
-
x = x.transpose(1, 3)
|
642 |
-
|
643 |
-
if self.training:
|
644 |
-
x = self.spec_augmenter(x)
|
645 |
-
|
646 |
-
# Mixup on spectrogram
|
647 |
-
if self.training and mixup_lambda is not None:
|
648 |
-
x = do_mixup(x, mixup_lambda)
|
649 |
-
|
650 |
-
x = self.conv_block1(x, pool_size=(2, 2), pool_type="avg")
|
651 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
652 |
-
x = self.conv_block2(x, pool_size=(2, 2), pool_type="avg")
|
653 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
654 |
-
x = self.conv_block3(x, pool_size=(2, 2), pool_type="avg")
|
655 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
656 |
-
x = self.conv_block4(x, pool_size=(2, 2), pool_type="avg")
|
657 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
658 |
-
x = self.conv_block5(x, pool_size=(2, 2), pool_type="avg")
|
659 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
660 |
-
x = torch.mean(x, dim=3)
|
661 |
-
|
662 |
-
latent_x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1)
|
663 |
-
latent_x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1)
|
664 |
-
latent_x = latent_x1 + latent_x2
|
665 |
-
latent_x = latent_x.transpose(1, 2)
|
666 |
-
latent_x = F.relu_(self.fc1(latent_x))
|
667 |
-
latent_output = interpolate(latent_x, 32)
|
668 |
-
|
669 |
-
(x1, _) = torch.max(x, dim=2)
|
670 |
-
x2 = torch.mean(x, dim=2)
|
671 |
-
x = x1 + x2
|
672 |
-
x = F.dropout(x, p=0.5, training=self.training)
|
673 |
-
x = F.relu_(self.fc1(x))
|
674 |
-
embedding = F.dropout(x, p=0.5, training=self.training)
|
675 |
-
clipwise_output = torch.sigmoid(self.fc_audioset(x))
|
676 |
-
|
677 |
-
output_dict = {
|
678 |
-
"clipwise_output": clipwise_output,
|
679 |
-
"embedding": embedding,
|
680 |
-
"fine_grained_embedding": latent_output,
|
681 |
-
}
|
682 |
-
|
683 |
-
return output_dict
|
684 |
-
|
685 |
-
|
686 |
-
def create_pann_model(audio_cfg, enable_fusion=False, fusion_type="None"):
|
687 |
-
try:
|
688 |
-
ModelProto = eval(audio_cfg.model_name)
|
689 |
-
model = ModelProto(
|
690 |
-
sample_rate=audio_cfg.sample_rate,
|
691 |
-
window_size=audio_cfg.window_size,
|
692 |
-
hop_size=audio_cfg.hop_size,
|
693 |
-
mel_bins=audio_cfg.mel_bins,
|
694 |
-
fmin=audio_cfg.fmin,
|
695 |
-
fmax=audio_cfg.fmax,
|
696 |
-
classes_num=audio_cfg.class_num,
|
697 |
-
enable_fusion=enable_fusion,
|
698 |
-
fusion_type=fusion_type,
|
699 |
-
)
|
700 |
-
return model
|
701 |
-
except:
|
702 |
-
raise RuntimeError(
|
703 |
-
f"Import Model for {audio_cfg.model_name} not found, or the audio cfg parameters are not enough."
|
704 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/layers/roi_align.py
DELETED
@@ -1,74 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
from torch import nn
|
3 |
-
from torchvision.ops import roi_align
|
4 |
-
|
5 |
-
|
6 |
-
# NOTE: torchvision's RoIAlign has a different default aligned=False
|
7 |
-
class ROIAlign(nn.Module):
|
8 |
-
def __init__(self, output_size, spatial_scale, sampling_ratio, aligned=True):
|
9 |
-
"""
|
10 |
-
Args:
|
11 |
-
output_size (tuple): h, w
|
12 |
-
spatial_scale (float): scale the input boxes by this number
|
13 |
-
sampling_ratio (int): number of inputs samples to take for each output
|
14 |
-
sample. 0 to take samples densely.
|
15 |
-
aligned (bool): if False, use the legacy implementation in
|
16 |
-
Detectron. If True, align the results more perfectly.
|
17 |
-
|
18 |
-
Note:
|
19 |
-
The meaning of aligned=True:
|
20 |
-
|
21 |
-
Given a continuous coordinate c, its two neighboring pixel indices (in our
|
22 |
-
pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example,
|
23 |
-
c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled
|
24 |
-
from the underlying signal at continuous coordinates 0.5 and 1.5). But the original
|
25 |
-
roi_align (aligned=False) does not subtract the 0.5 when computing neighboring
|
26 |
-
pixel indices and therefore it uses pixels with a slightly incorrect alignment
|
27 |
-
(relative to our pixel model) when performing bilinear interpolation.
|
28 |
-
|
29 |
-
With `aligned=True`,
|
30 |
-
we first appropriately scale the ROI and then shift it by -0.5
|
31 |
-
prior to calling roi_align. This produces the correct neighbors; see
|
32 |
-
detectron2/tests/test_roi_align.py for verification.
|
33 |
-
|
34 |
-
The difference does not make a difference to the model's performance if
|
35 |
-
ROIAlign is used together with conv layers.
|
36 |
-
"""
|
37 |
-
super().__init__()
|
38 |
-
self.output_size = output_size
|
39 |
-
self.spatial_scale = spatial_scale
|
40 |
-
self.sampling_ratio = sampling_ratio
|
41 |
-
self.aligned = aligned
|
42 |
-
|
43 |
-
from torchvision import __version__
|
44 |
-
|
45 |
-
version = tuple(int(x) for x in __version__.split(".")[:2])
|
46 |
-
# https://github.com/pytorch/vision/pull/2438
|
47 |
-
assert version >= (0, 7), "Require torchvision >= 0.7"
|
48 |
-
|
49 |
-
def forward(self, input, rois):
|
50 |
-
"""
|
51 |
-
Args:
|
52 |
-
input: NCHW images
|
53 |
-
rois: Bx5 boxes. First column is the index into N. The other 4 columns are xyxy.
|
54 |
-
"""
|
55 |
-
assert rois.dim() == 2 and rois.size(1) == 5
|
56 |
-
if input.is_quantized:
|
57 |
-
input = input.dequantize()
|
58 |
-
return roi_align(
|
59 |
-
input,
|
60 |
-
rois.to(dtype=input.dtype),
|
61 |
-
self.output_size,
|
62 |
-
self.spatial_scale,
|
63 |
-
self.sampling_ratio,
|
64 |
-
self.aligned,
|
65 |
-
)
|
66 |
-
|
67 |
-
def __repr__(self):
|
68 |
-
tmpstr = self.__class__.__name__ + "("
|
69 |
-
tmpstr += "output_size=" + str(self.output_size)
|
70 |
-
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
|
71 |
-
tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
|
72 |
-
tmpstr += ", aligned=" + str(self.aligned)
|
73 |
-
tmpstr += ")"
|
74 |
-
return tmpstr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/roi_heads/rotated_fast_rcnn.py
DELETED
@@ -1,270 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import logging
|
3 |
-
import numpy as np
|
4 |
-
import torch
|
5 |
-
|
6 |
-
from detectron2.config import configurable
|
7 |
-
from detectron2.layers import ShapeSpec, batched_nms_rotated
|
8 |
-
from detectron2.structures import Instances, RotatedBoxes, pairwise_iou_rotated
|
9 |
-
from detectron2.utils.events import get_event_storage
|
10 |
-
|
11 |
-
from ..box_regression import Box2BoxTransformRotated
|
12 |
-
from ..poolers import ROIPooler
|
13 |
-
from ..proposal_generator.proposal_utils import add_ground_truth_to_proposals
|
14 |
-
from .box_head import build_box_head
|
15 |
-
from .fast_rcnn import FastRCNNOutputLayers
|
16 |
-
from .roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads
|
17 |
-
|
18 |
-
logger = logging.getLogger(__name__)
|
19 |
-
|
20 |
-
"""
|
21 |
-
Shape shorthand in this module:
|
22 |
-
|
23 |
-
N: number of images in the minibatch
|
24 |
-
R: number of ROIs, combined over all images, in the minibatch
|
25 |
-
Ri: number of ROIs in image i
|
26 |
-
K: number of foreground classes. E.g.,there are 80 foreground classes in COCO.
|
27 |
-
|
28 |
-
Naming convention:
|
29 |
-
|
30 |
-
deltas: refers to the 5-d (dx, dy, dw, dh, da) deltas that parameterize the box2box
|
31 |
-
transform (see :class:`box_regression.Box2BoxTransformRotated`).
|
32 |
-
|
33 |
-
pred_class_logits: predicted class scores in [-inf, +inf]; use
|
34 |
-
softmax(pred_class_logits) to estimate P(class).
|
35 |
-
|
36 |
-
gt_classes: ground-truth classification labels in [0, K], where [0, K) represent
|
37 |
-
foreground object classes and K represents the background class.
|
38 |
-
|
39 |
-
pred_proposal_deltas: predicted rotated box2box transform deltas for transforming proposals
|
40 |
-
to detection box predictions.
|
41 |
-
|
42 |
-
gt_proposal_deltas: ground-truth rotated box2box transform deltas
|
43 |
-
"""
|
44 |
-
|
45 |
-
|
46 |
-
def fast_rcnn_inference_rotated(
|
47 |
-
boxes, scores, image_shapes, score_thresh, nms_thresh, topk_per_image
|
48 |
-
):
|
49 |
-
"""
|
50 |
-
Call `fast_rcnn_inference_single_image_rotated` for all images.
|
51 |
-
|
52 |
-
Args:
|
53 |
-
boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic
|
54 |
-
boxes for each image. Element i has shape (Ri, K * 5) if doing
|
55 |
-
class-specific regression, or (Ri, 5) if doing class-agnostic
|
56 |
-
regression, where Ri is the number of predicted objects for image i.
|
57 |
-
This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`.
|
58 |
-
scores (list[Tensor]): A list of Tensors of predicted class scores for each image.
|
59 |
-
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
|
60 |
-
for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`.
|
61 |
-
image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch.
|
62 |
-
score_thresh (float): Only return detections with a confidence score exceeding this
|
63 |
-
threshold.
|
64 |
-
nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].
|
65 |
-
topk_per_image (int): The number of top scoring detections to return. Set < 0 to return
|
66 |
-
all detections.
|
67 |
-
|
68 |
-
Returns:
|
69 |
-
instances: (list[Instances]): A list of N instances, one for each image in the batch,
|
70 |
-
that stores the topk most confidence detections.
|
71 |
-
kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates
|
72 |
-
the corresponding boxes/scores index in [0, Ri) from the input, for image i.
|
73 |
-
"""
|
74 |
-
result_per_image = [
|
75 |
-
fast_rcnn_inference_single_image_rotated(
|
76 |
-
boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image
|
77 |
-
)
|
78 |
-
for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes)
|
79 |
-
]
|
80 |
-
return [x[0] for x in result_per_image], [x[1] for x in result_per_image]
|
81 |
-
|
82 |
-
|
83 |
-
def fast_rcnn_inference_single_image_rotated(
|
84 |
-
boxes, scores, image_shape, score_thresh, nms_thresh, topk_per_image
|
85 |
-
):
|
86 |
-
"""
|
87 |
-
Single-image inference. Return rotated bounding-box detection results by thresholding
|
88 |
-
on scores and applying rotated non-maximum suppression (Rotated NMS).
|
89 |
-
|
90 |
-
Args:
|
91 |
-
Same as `fast_rcnn_inference_rotated`, but with rotated boxes, scores, and image shapes
|
92 |
-
per image.
|
93 |
-
|
94 |
-
Returns:
|
95 |
-
Same as `fast_rcnn_inference_rotated`, but for only one image.
|
96 |
-
"""
|
97 |
-
valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1)
|
98 |
-
if not valid_mask.all():
|
99 |
-
boxes = boxes[valid_mask]
|
100 |
-
scores = scores[valid_mask]
|
101 |
-
|
102 |
-
B = 5 # box dimension
|
103 |
-
scores = scores[:, :-1]
|
104 |
-
num_bbox_reg_classes = boxes.shape[1] // B
|
105 |
-
# Convert to Boxes to use the `clip` function ...
|
106 |
-
boxes = RotatedBoxes(boxes.reshape(-1, B))
|
107 |
-
boxes.clip(image_shape)
|
108 |
-
boxes = boxes.tensor.view(-1, num_bbox_reg_classes, B) # R x C x B
|
109 |
-
# Filter results based on detection scores
|
110 |
-
filter_mask = scores > score_thresh # R x K
|
111 |
-
# R' x 2. First column contains indices of the R predictions;
|
112 |
-
# Second column contains indices of classes.
|
113 |
-
filter_inds = filter_mask.nonzero()
|
114 |
-
if num_bbox_reg_classes == 1:
|
115 |
-
boxes = boxes[filter_inds[:, 0], 0]
|
116 |
-
else:
|
117 |
-
boxes = boxes[filter_mask]
|
118 |
-
scores = scores[filter_mask]
|
119 |
-
|
120 |
-
# Apply per-class Rotated NMS
|
121 |
-
keep = batched_nms_rotated(boxes, scores, filter_inds[:, 1], nms_thresh)
|
122 |
-
if topk_per_image >= 0:
|
123 |
-
keep = keep[:topk_per_image]
|
124 |
-
boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep]
|
125 |
-
|
126 |
-
result = Instances(image_shape)
|
127 |
-
result.pred_boxes = RotatedBoxes(boxes)
|
128 |
-
result.scores = scores
|
129 |
-
result.pred_classes = filter_inds[:, 1]
|
130 |
-
|
131 |
-
return result, filter_inds[:, 0]
|
132 |
-
|
133 |
-
|
134 |
-
class RotatedFastRCNNOutputLayers(FastRCNNOutputLayers):
|
135 |
-
"""
|
136 |
-
Two linear layers for predicting Rotated Fast R-CNN outputs.
|
137 |
-
"""
|
138 |
-
|
139 |
-
@classmethod
|
140 |
-
def from_config(cls, cfg, input_shape):
|
141 |
-
args = super().from_config(cfg, input_shape)
|
142 |
-
args["box2box_transform"] = Box2BoxTransformRotated(
|
143 |
-
weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS
|
144 |
-
)
|
145 |
-
return args
|
146 |
-
|
147 |
-
def inference(self, predictions, proposals):
|
148 |
-
"""
|
149 |
-
Returns:
|
150 |
-
list[Instances]: same as `fast_rcnn_inference_rotated`.
|
151 |
-
list[Tensor]: same as `fast_rcnn_inference_rotated`.
|
152 |
-
"""
|
153 |
-
boxes = self.predict_boxes(predictions, proposals)
|
154 |
-
scores = self.predict_probs(predictions, proposals)
|
155 |
-
image_shapes = [x.image_size for x in proposals]
|
156 |
-
|
157 |
-
return fast_rcnn_inference_rotated(
|
158 |
-
boxes,
|
159 |
-
scores,
|
160 |
-
image_shapes,
|
161 |
-
self.test_score_thresh,
|
162 |
-
self.test_nms_thresh,
|
163 |
-
self.test_topk_per_image,
|
164 |
-
)
|
165 |
-
|
166 |
-
|
167 |
-
@ROI_HEADS_REGISTRY.register()
|
168 |
-
class RROIHeads(StandardROIHeads):
|
169 |
-
"""
|
170 |
-
This class is used by Rotated Fast R-CNN to detect rotated boxes.
|
171 |
-
For now, it only supports box predictions but not mask or keypoints.
|
172 |
-
"""
|
173 |
-
|
174 |
-
@configurable
|
175 |
-
def __init__(self, **kwargs):
|
176 |
-
"""
|
177 |
-
NOTE: this interface is experimental.
|
178 |
-
"""
|
179 |
-
super().__init__(**kwargs)
|
180 |
-
assert (
|
181 |
-
not self.mask_on and not self.keypoint_on
|
182 |
-
), "Mask/Keypoints not supported in Rotated ROIHeads."
|
183 |
-
assert not self.train_on_pred_boxes, "train_on_pred_boxes not implemented for RROIHeads!"
|
184 |
-
|
185 |
-
@classmethod
|
186 |
-
def _init_box_head(cls, cfg, input_shape):
|
187 |
-
# fmt: off
|
188 |
-
in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
|
189 |
-
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
|
190 |
-
pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)
|
191 |
-
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
|
192 |
-
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
|
193 |
-
# fmt: on
|
194 |
-
assert pooler_type in ["ROIAlignRotated"], pooler_type
|
195 |
-
# assume all channel counts are equal
|
196 |
-
in_channels = [input_shape[f].channels for f in in_features][0]
|
197 |
-
|
198 |
-
box_pooler = ROIPooler(
|
199 |
-
output_size=pooler_resolution,
|
200 |
-
scales=pooler_scales,
|
201 |
-
sampling_ratio=sampling_ratio,
|
202 |
-
pooler_type=pooler_type,
|
203 |
-
)
|
204 |
-
box_head = build_box_head(
|
205 |
-
cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution)
|
206 |
-
)
|
207 |
-
# This line is the only difference v.s. StandardROIHeads
|
208 |
-
box_predictor = RotatedFastRCNNOutputLayers(cfg, box_head.output_shape)
|
209 |
-
return {
|
210 |
-
"box_in_features": in_features,
|
211 |
-
"box_pooler": box_pooler,
|
212 |
-
"box_head": box_head,
|
213 |
-
"box_predictor": box_predictor,
|
214 |
-
}
|
215 |
-
|
216 |
-
@torch.no_grad()
|
217 |
-
def label_and_sample_proposals(self, proposals, targets):
|
218 |
-
"""
|
219 |
-
Prepare some proposals to be used to train the RROI heads.
|
220 |
-
It performs box matching between `proposals` and `targets`, and assigns
|
221 |
-
training labels to the proposals.
|
222 |
-
It returns `self.batch_size_per_image` random samples from proposals and groundtruth boxes,
|
223 |
-
with a fraction of positives that is no larger than `self.positive_sample_fraction.
|
224 |
-
|
225 |
-
Args:
|
226 |
-
See :meth:`StandardROIHeads.forward`
|
227 |
-
|
228 |
-
Returns:
|
229 |
-
list[Instances]: length `N` list of `Instances`s containing the proposals
|
230 |
-
sampled for training. Each `Instances` has the following fields:
|
231 |
-
- proposal_boxes: the rotated proposal boxes
|
232 |
-
- gt_boxes: the ground-truth rotated boxes that the proposal is assigned to
|
233 |
-
(this is only meaningful if the proposal has a label > 0; if label = 0
|
234 |
-
then the ground-truth box is random)
|
235 |
-
- gt_classes: the ground-truth classification lable for each proposal
|
236 |
-
"""
|
237 |
-
if self.proposal_append_gt:
|
238 |
-
proposals = add_ground_truth_to_proposals(targets, proposals)
|
239 |
-
|
240 |
-
proposals_with_gt = []
|
241 |
-
|
242 |
-
num_fg_samples = []
|
243 |
-
num_bg_samples = []
|
244 |
-
for proposals_per_image, targets_per_image in zip(proposals, targets):
|
245 |
-
has_gt = len(targets_per_image) > 0
|
246 |
-
match_quality_matrix = pairwise_iou_rotated(
|
247 |
-
targets_per_image.gt_boxes, proposals_per_image.proposal_boxes
|
248 |
-
)
|
249 |
-
matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix)
|
250 |
-
sampled_idxs, gt_classes = self._sample_proposals(
|
251 |
-
matched_idxs, matched_labels, targets_per_image.gt_classes
|
252 |
-
)
|
253 |
-
|
254 |
-
proposals_per_image = proposals_per_image[sampled_idxs]
|
255 |
-
proposals_per_image.gt_classes = gt_classes
|
256 |
-
|
257 |
-
if has_gt:
|
258 |
-
sampled_targets = matched_idxs[sampled_idxs]
|
259 |
-
proposals_per_image.gt_boxes = targets_per_image.gt_boxes[sampled_targets]
|
260 |
-
|
261 |
-
num_bg_samples.append((gt_classes == self.num_classes).sum().item())
|
262 |
-
num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1])
|
263 |
-
proposals_with_gt.append(proposals_per_image)
|
264 |
-
|
265 |
-
# Log the number of fg/bg samples that are selected for training ROI heads
|
266 |
-
storage = get_event_storage()
|
267 |
-
storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples))
|
268 |
-
storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples))
|
269 |
-
|
270 |
-
return proposals_with_gt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Banbri/zcvzcv/src/components/icons/hugging-clap.tsx
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
export function HuggingClap() {
|
2 |
-
return (
|
3 |
-
<svg xmlns="http://www.w3.org/2000/svg" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32">
|
4 |
-
<path d="M20.6081 3C21.7684 3 22.8053 3.49196 23.5284 4.38415C23.9756 4.93678 24.4428 5.82749 24.4808 7.16133C24.9674 7.01707 25.4353 6.93643 25.8725 6.93643C26.9833 6.93643 27.9865 7.37587 28.696 8.17411C29.6075 9.19872 30.0124 10.4579 29.8361 11.7177C29.7523 12.3177 29.5581 12.8555 29.2678 13.3534C29.8798 13.8646 30.3306 14.5763 30.5485 15.4322C30.719 16.1032 30.8939 17.5006 29.9808 18.9403C30.0389 19.0342 30.0934 19.1319 30.1442 19.2318C30.6932 20.3074 30.7283 21.5229 30.2439 22.6548C29.5093 24.3704 27.6841 25.7219 24.1397 27.1727C21.9347 28.0753 19.9174 28.6523 19.8994 28.6575C16.9842 29.4379 14.3477 29.8345 12.0653 29.8345C7.87017 29.8345 4.8668 28.508 3.13831 25.8921C0.356375 21.6797 0.754104 17.8269 4.35369 14.1131C6.34591 12.058 7.67023 9.02782 7.94613 8.36275C8.50224 6.39343 9.97271 4.20438 12.4172 4.20438H12.4179C12.6236 4.20438 12.8314 4.2214 13.0364 4.25468C14.107 4.42854 15.0428 5.06476 15.7115 6.02205C16.4331 5.09583 17.134 4.359 17.7682 3.94323C18.7242 3.31737 19.6794 3 20.6081 3ZM20.6081 5.95917C20.2427 5.95917 19.7963 6.1197 19.3039 6.44225C17.7754 7.44319 14.8258 12.6772 13.7458 14.7131C13.3839 15.3952 12.7655 15.6837 12.2086 15.6837C11.1036 15.6837 10.2408 14.5497 12.1076 13.1085C14.9146 10.9402 13.9299 7.39584 12.5898 7.1776C12.5311 7.16799 12.4731 7.16355 12.4172 7.16355C11.1989 7.16355 10.6615 9.33114 10.6615 9.33114C10.6615 9.33114 9.0863 13.4148 6.38031 16.206C3.67434 18.998 3.5346 21.2388 5.50675 24.2246C6.85185 26.2606 9.42666 26.8753 12.0653 26.8753C14.8021 26.8753 17.6077 26.2139 19.1799 25.793C19.2574 25.7723 28.8193 22.984 27.6081 20.6107C27.4046 20.212 27.0693 20.0522 26.6471 20.0522C24.9416 20.0522 21.8393 22.6726 20.5057 22.6726C20.2076 22.6726 19.9976 22.5416 19.9116 22.222C19.3433 20.1173 28.552 19.2325 27.7758 16.1839C27.639 15.6445 27.2677 15.4256 26.746 15.4263C24.4923 15.4263 19.4358 19.5181 18.3759 19.5181C18.2949 19.5181 18.2368 19.4937 18.2053 19.4419C17.6743 18.557 17.9653 17.9394 21.7082 15.6009C25.4511 13.2617 28.0783 11.8545 26.5841 10.1752C26.4121 9.98141 26.1684 9.8956 25.8725 9.8956C23.6001 9.89634 18.2311 14.9403 18.2311 14.9403C18.2311 14.9403 16.7821 16.496 15.9057 16.496C15.7043 16.496 15.533 16.4139 15.4169 16.2112C14.7956 15.1296 21.1879 10.1286 21.5484 8.06535C21.7928 6.66715 21.3771 5.95917 20.6081 5.95917Z" fill="#FF9D00"></path>
|
5 |
-
<path d="M5.50686 24.2246C3.53472 21.2387 3.67446 18.9979 6.38043 16.206C9.08641 13.4147 10.6615 9.33111 10.6615 9.33111C10.6615 9.33111 11.2499 6.95933 12.59 7.17757C13.93 7.39581 14.9139 10.9401 12.1069 13.1084C9.29997 15.276 12.6659 16.7489 13.7459 14.713C14.8258 12.6772 17.7747 7.44316 19.304 6.44221C20.8326 5.44128 21.9089 6.00204 21.5484 8.06532C21.188 10.1286 14.795 15.1295 15.4171 16.2118C16.0391 17.2934 18.2312 14.9402 18.2312 14.9402C18.2312 14.9402 25.0907 8.49588 26.5842 10.1752C28.0776 11.8545 25.4512 13.2616 21.7082 15.6008C17.9646 17.9393 17.6744 18.557 18.2054 19.4418C18.7372 20.3266 26.9998 13.1351 27.7759 16.1838C28.5513 19.2324 19.3434 20.1173 19.9117 22.2219C20.48 24.3274 26.3979 18.2382 27.6082 20.6107C28.8193 22.9839 19.2574 25.7722 19.18 25.7929C16.0914 26.62 8.24723 28.3726 5.50686 24.2246Z" fill="#FFD21E"></path>
|
6 |
-
</svg>
|
7 |
-
)
|
8 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Apk Descargar Templo Ejecutar 3.md
DELETED
@@ -1,122 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Descargar APK Temple Run 3: Cómo jugar la última versión del popular juego Endless Runner</h1>
|
3 |
-
<p>Temple Run es uno de los juegos más exitosos y adictivos en dispositivos móviles. Ha sido descargado más de 500 millones de veces y ha generado varios spin-offs y secuelas. La última entrega, Temple Run 3, está aquí y promete ofrecer más diversión, maravilla, peligro y aventura que nunca. En este artículo, le mostraremos cómo descargar e instalar Temple Run 3 APK en su dispositivo Android, cómo jugar el juego y disfrutar de sus nuevas características, y cómo obtener altas puntuaciones y toneladas de monedas en sus carreras. </p>
|
4 |
-
<h2>apk descargar templo ejecutar 3</h2><br /><p><b><b>Download</b> ★★★ <a href="https://bltlly.com/2v6KZ7">https://bltlly.com/2v6KZ7</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es Temple Run 3?</h2>
|
6 |
-
<p>Temple Run 3 es la secuela de los juegos de éxito Temple Run y Temple Run 2, desarrollado por Imangi Studios. Es un juego de corredor sin fin en 3D donde controlas a un explorador que ha robado un ídolo maldito de un antiguo templo y debe correr por su vida de los malvados monos demoníacos que lo persiguen. Tienes que probar tus reflejos mientras corres por las antiguas paredes del templo y a lo largo de escarpados acantilados, deslizando para girar, saltar y deslizarte para evitar obstáculos, recoger monedas y comprar power-ups, desbloquear nuevos personajes y ver hasta dónde puedes correr. </p>
|
7 |
-
<h3>Las características y mejoras de Temple Run 3</h3>
|
8 |
-
<p>Temple Run 3 se basa en los sólidos cimientos de sus predecesores, pero también añade algunas nuevas características y mejoras que lo hacen más emocionante y desafiante. Algunos de ellos son:</p>
|
9 |
-
<ul>
|
10 |
-
<li><b>Gráficos mejorados:</b> El juego tiene colores vibrantes y entornos más detallados que se suman al atractivo de la ciudad perdida en el cielo. El juego también tiene efectos más realistas como cascadas, fuego, sombras y reflexiones. </li>
|
11 |
-
<li><b>Nuevos obstáculos:</b> El juego introduce nuevos obstáculos que requieren más habilidad y tiempo para superarse, como estatuas que respiran fuego, ejes oscilantes, picos giratorios, rocas que caen, brechas y más. </li>
|
12 |
-
|
13 |
-
<li><b>Nuevos potenciadores:</b> El juego también tiene algunos nuevos potenciadores que pueden ayudarte en tu carrera, como la invisibilidad que te hace inmune a los obstáculos por un corto tiempo, impulso que te da una ráfaga de velocidad e invencibilidad por una corta distancia, escudo que te protege de un solo golpe, bono de gema que duplica el valor de las gemas que recoger, bono de moneda que duplica el valor de las monedas que recoger, bono de puntuación que aumenta su multiplicador de puntuación, y el imán que atrae a todas las monedas y gemas en su vecindad. </li>
|
14 |
-
<li><b>Nuevos personajes:</b> El juego también tiene algunos nuevos personajes que puedes desbloquear y jugar como, cada uno con sus propias habilidades y fondos. Estos incluyen Scarlett Fox, un ladrón astuto y ágil que puede utilizar su gancho de agarre para balancearse a través de los huecos, Barry Bones, un policía duro y valiente que puede usar su bastón para romper obstáculos, Karma Lee, un artista marcial rápido y elegante que puede usar sus nunchucks para desviar obstáculos, Montana Smith, un explorador robusto y aventurero que puede usar su látigo para agarrar monedas y gemas, Zack Wonder, un doble de riesgo audaz y carismático que puede usar su casco para protegerse de un solo golpe, y Francisco Montoya, un conquistador encantador y extravagante que puede usar su estoque para cortar obstáculos. </li>
|
15 |
-
</ul>
|
16 |
-
<h2> ¿Cómo descargar e instalar Temple Run 3 APK en su dispositivo Android? </h2>
|
17 |
-
<p>Si quieres jugar Temple Run 3 en tu dispositivo Android, tendrás que descargar e instalar el archivo APK del juego. APK significa Android Package Kit, y es un formato de archivo que contiene todos los elementos de una aplicación Android. Al instalar un archivo APK, puedes disfrutar de aplicaciones que no están disponibles en Google Play Store o que no son compatibles con tu dispositivo. Sin embargo, también tendrá que tomar algunas precauciones y cumplir con algunos requisitos antes de instalar un archivo APK. </p>
|
18 |
-
<h3>Los pasos para descargar e instalar el archivo APK</h3>
|
19 |
-
<p>Aquí están los pasos para descargar e instalar el archivo APK Temple Run 3 en su dispositivo Android:</p>
|
20 |
-
<ol>
|
21 |
-
|
22 |
-
<li><b>Descargar el archivo APK:</b> Una vez que haya encontrado una fuente confiable, haga clic en el botón de descarga o enlace y guarde el archivo APK en su dispositivo. Es posible que necesite permitir descargas de fuentes desconocidas en la configuración de su navegador. </li>
|
23 |
-
<li><b>Habilitar fuentes desconocidas en la configuración del dispositivo:</b> Antes de instalar el archivo APK, tendrá que habilitar fuentes desconocidas en la configuración del dispositivo. Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play Store. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. Puede ver un mensaje de advertencia de que instalar aplicaciones de fuentes desconocidas puede dañar su dispositivo. Pulse Aceptar para continuar. </li>
|
24 |
-
<li><b>Instalar el archivo APK:</b> Después de haber habilitado fuentes desconocidas, busque el archivo APK en el administrador de archivos o la carpeta de descargas de su dispositivo. Pulse sobre él y siga las instrucciones en la pantalla para instalarlo. Puede ver un mensaje que le pide que conceda permisos a la aplicación. Pulse Instalar para continuar. </li>
|
25 |
-
<li><b>Iniciar el juego y disfrutar:</b> Una vez que la instalación se ha completado, puede iniciar el juego desde el cajón de la aplicación o la pantalla de inicio. Puede ver un mensaje que le pide que verifique su edad y acepte los términos del servicio. Toque OK para comenzar a jugar Temple Run 3.</li>
|
26 |
-
</ol>
|
27 |
-
<h3>Las precauciones y requisitos para instalar el archivo APK</h3>
|
28 |
-
<p>Si bien instalar un archivo APK puede ser una forma conveniente de disfrutar de aplicaciones que no están disponibles o compatibles con su dispositivo, también viene con algunos riesgos y limitaciones. Estas son algunas de las precauciones y requisitos que debe tener en cuenta antes de instalar un archivo APK:</p>
|
29 |
-
<p></p>
|
30 |
-
<ul>
|
31 |
-
|
32 |
-
<li><b>Escanear el archivo APK en busca de virus y malware:</b> No todos los archivos APK son seguros. Algunos pueden contener virus, malware, spyware u otro código dañino que puede dañar su dispositivo o comprometer su privacidad. Antes de instalar un archivo APK, asegúrese de escanearlo con una aplicación antivirus o anti-malware de buena reputación. También puede utilizar herramientas en línea como VirusTotal o MetaDefender para comprobar el archivo APK para cualquier amenaza. </li>
|
33 |
-
<li><b>Copia de seguridad de los datos y el dispositivo:</b> La instalación de un archivo APK puede causar algunos problemas o errores en el dispositivo, como bloqueos, bloqueos, fallos o pérdida de datos. Para evitar perder datos o configuraciones importantes, asegúrese de hacer una copia de seguridad de su dispositivo antes de instalar un archivo APK. Puede usar aplicaciones como Google Drive, Dropbox , o Titanium Backup para realizar copias de seguridad de sus datos y dispositivos en la nube o su tarjeta SD. </li>
|
34 |
-
<li><b>Actualizar la aplicación regularmente:</b> Instalar un archivo APK puede no darle acceso a las últimas actualizaciones y características de la aplicación. Algunos archivos APK pueden estar desactualizados o tener errores, y pueden no funcionar correctamente en su dispositivo. Para asegurarte de que tienes la mejor experiencia con la aplicación, asegúrate de actualizarla regularmente. Puede hacer esto comprobando el sitio web donde descargó el archivo APK para cualquier versión nueva, o utilizando aplicaciones como APKUpdater o APKPure que pueden notificarle de cualquier actualización disponible para sus aplicaciones instaladas. </li>
|
35 |
-
</ul>
|
36 |
-
<h2>Cómo jugar Temple Run 3 y disfrutar de sus nuevos elementos de juego? </h2>
|
37 |
-
<p>Ahora que has descargado e instalado Temple Run 3 en tu dispositivo Android, estás listo para jugar el juego y disfrutar de sus nuevos elementos de juego. Temple Run 3 es fácil de aprender, pero difícil de dominar, y te mantendrá enganchado durante horas con su juego adictivo y emocionante. Estos son algunos de los conceptos básicos y consejos sobre cómo jugar Temple Run 3 y divertirse con sus nuevas características. </p>
|
38 |
-
<h3>Los controles y objetivos básicos de Temple Run 3</h3>
|
39 |
-
|
40 |
-
<h3>Las nuevas secciones de zipwire y carro de la mina de Temple Run 3</h3>
|
41 |
-
<p>Una de las nuevas características de Temple Run 3 es la adición de zipwire y secciones de carro de la mina que añaden más variedad y desafío al juego. Estas secciones aparecen aleatoriamente en tu carrera, y requieren diferentes habilidades y estrategias para completarse. </p>
|
42 |
-
<ul>
|
43 |
-
<li><b>Secciones de Zipwire:</b> En estas secciones, tienes que deslizarte por una tirolina de cuerda mientras evitas obstáculos como ramas, rocas, bolas de fuego y monos demonios. Puedes deslizar hacia la izquierda o hacia la derecha para moverte hacia la izquierda o hacia la derecha en la tirolina, deslizar hacia arriba para saltar sobre los obstáculos, deslizar hacia abajo para agacharte bajo los obstáculos e inclinar tu dispositivo para recoger monedas y gemas. También puede tocar la pantalla para usar potenciadores si tiene alguno. </li>
|
44 |
-
<li><b>Secciones del carro de la mina:</b> En estas secciones, usted tiene que saltar a un carro de minas y navegar por una red de pistas de minería en las profundidades de las cuevas. Puede deslizar hacia la izquierda o hacia la derecha para cambiar las pistas, deslizar hacia arriba para saltar sobre los huecos u obstáculos, deslizar hacia abajo para agacharse en el carro de minas, e inclinar el dispositivo para recoger monedas y gemas. También puede tocar la pantalla para usar potenciadores si tiene alguno. </li>
|
45 |
-
</ul>
|
46 |
-
<h3>Los potenciadores y personajes de Temple Run 3</h3>
|
47 |
-
<p>Otra nueva característica de Temple Run 3 es la introducción de algunos nuevos power-ups y personajes que pueden mejorar su experiencia de juego. Los power-ups son artículos especiales que pueden darte una ventaja en tu carrera, como aumento de velocidad, invencibilidad, imán de monedas, etc. Puedes comprar power-ups en la tienda del juego usando monedas o gemas, o encontrarlos aleatoriamente en tu carrera. Puede activarlos tocando la pantalla cuando aparecen en la esquina superior izquierda. Los personajes son diferentes avatares que puedes jugar como en el juego, cada uno con sus propias habilidades y antecedentes. Puedes desbloquear personajes en la tienda del juego usando monedas o gemas, o completando ciertos logros. Puede cambiar caracteres en el menú principal antes de iniciar una ejecución. </p>
|
48 |
-
<tabla>
|
49 |
-
<tr><th>Encendido</th><th>Descripción</th></tr>
|
50 |
-
|
51 |
-
<tr><td>Boost</td><td>Te da una ráfaga de velocidad e invencibilidad para una distancia corta</td></tr>
|
52 |
-
<tr><td>Shield</td><td>Te protege de un golpe</td></tr>
|
53 |
-
<tr><td>Gem Bonus</td><td>Duplica el valor de las gemas que recopilas</td></tr>
|
54 |
-
<tr><td>Coin Bonus</td><td>Duplica el valor de las monedas que recoges</td></tr>
|
55 |
-
<tr><td>Bonus de puntuación</td><td>Aumenta el multiplicador de puntuación</td></tr>
|
56 |
-
<tr><td>Magnet</td><td>Atrae a todas las monedas y gemas en su vecindad</td></tr>
|
57 |
-
</tabla>
|
58 |
-
<tabla <tabla>
|
59 |
-
<tr><th>Carácter</th><th>Descripción</th></tr>
|
60 |
-
<tr><td>Scarlett Fox</td><td>Una ladrona astuta y ágil que puede usar su gancho para balancearse a través de los huecos</td></tr>
|
61 |
-
<tr><td>Barry Bones</td><td>Un policía duro y valiente que puede usar su bastón para romper obstáculos</td></tr>
|
62 |
-
<tr><td>Karma Lee</td><td>Una artista marcial rápida y elegante que puede usar sus nunchucks para desviar los obstáculos</td></tr>
|
63 |
-
<tr><td>Montana Smith</td><td>Un explorador robusto y aventurero que puede usar su látigo para agarrar monedas y gemas</td></tr>
|
64 |
-
<tr><td>Zack Wonder</td><td>Un especialista audaz y carismático que puede usar su casco para protegerse de un golpe</td></tr>
|
65 |
-
<tr><td>Francisco Montoya</td><td>Un conquistador encantador y extravagante que puede usar su estoque para cortar obstáculos</td></tr>
|
66 |
-
</tabla>
|
67 |
-
<h2>¿Cómo obtener puntuaciones altas y toneladas de monedas en Temple Run 3?</h2>
|
68 |
-
<p>Temple Run 3 no es solo un juego de supervivencia, sino también un juego de competición. Usted querrá obtener altas puntuaciones y toneladas de monedas en sus carreras, para que pueda mostrar sus habilidades, desbloquear más artículos, y desafiar a sus amigos. Aquí hay algunos consejos y trucos sobre cómo lograrlo. </p>
|
69 |
-
<h3>Los consejos y trucos para evitar obstáculos y recoger monedas</h3>
|
70 |
-
<p>La forma más básica de conseguir altas puntuaciones y toneladas de monedas en Temple Run 3 es evitar obstáculos y recoger monedas tanto como sea posible. Aquí hay algunos consejos y trucos sobre cómo hacerlo:</p>
|
71 |
-
<ul>
|
72 |
-
|
73 |
-
<li><b>Usa los power-ups correctos en el momento adecuado:</b> Los power-ups pueden darte una ventaja en tu carrera, pero tienes que usarlos sabiamente. No los desperdicie en secciones fáciles o cuando no los necesite. Guárdelos para secciones difíciles o peligrosas o cuando esté en problemas. Por ejemplo, utilice la invisibilidad cuando se encuentra con un montón de obstáculos, utilice impulso cuando se está quedando atrás o necesita un escape rápido, utilice el escudo cuando está a punto de golpear algo, utilice la gema de bonificación o bono de moneda cuando vea un montón de gemas o monedas, utilice la prima de la cuenta cuando usted tiene un multiplicador alto, y utilice el imán cuando usted desea recoger todo en su trayectoria. </li>
|
74 |
-
<li><b>Sigue los rastros de monedas:</b> El juego a menudo te guiará con rastros de monedas que te llevarán al mejor camino o la ruta más segura. Síguelos tanto como sea posible, ya que te ayudarán a evitar obstáculos, recoger monedas, y encontrar power-ups. Sin embargo, no los sigas ciegamente si te llevan a un callejón sin salida o a una trampa. Usa tu juicio e intuición para decidir cuándo seguirlos o desviarte de ellos. </li>
|
75 |
-
<li><b>Evita los bordes:</b> El juego también intentará engañarte con caminos que te lleven a los bordes de los acantilados o las paredes. Evítalos tanto como sea posible, ya que te harán más vulnerable a caerte o golpear algo. Manténgase en el centro o en el lado interno del camino tanto como sea posible, ya que le darán más espacio para maniobrar y recoger monedas. </li>
|
76 |
-
<li><b>No seas codicioso:</b> El juego también te tentará con monedas y gemas que se colocan en lugares arriesgados o difíciles de alcanzar. No se vuelva codicioso y vaya por ellos si son demasiado arriesgados o demasiado lejos. Puede terminar perdiendo más de lo que gana. Enfócate en mantenerte vivo y recoger lo que es fácil y seguro. </li>
|
77 |
-
</ul>
|
78 |
-
<h3>Las mejoras y utilidades que pueden ayudarte en tu ejecución</h3>
|
79 |
-
|
80 |
-
<tabla <tabla>
|
81 |
-
<tr><th>Actualización</th><th>Descripción</th></tr>
|
82 |
-
<tr><td>Duración de invisibilidad</td><td>Aumenta la duración del encendido de invisibilidad</td></tr>
|
83 |
-
<tr><td>Aumentar la distancia</td><td>Aumenta la distancia del aumento de potencia</td></tr>
|
84 |
-
<tr><td>Duración del escudo</td><td>Aumenta la duración del encendido del escudo</td></tr>
|
85 |
-
<tr><td>Gem Value</td><td>Aumenta el valor de las gemas que recopilas</td></tr>
|
86 |
-
<tr><td>Valor de la moneda</td><td>Aumenta el valor de las monedas que recoge</td></tr>
|
87 |
-
<tr><td>Multiplicador de puntuación</td><td>Aumenta el multiplicador de puntuación</td></tr>
|
88 |
-
<tr><td>Duración del imán</td><td>Aumenta la duración del encendido del imán</td></tr>
|
89 |
-
</tabla>
|
90 |
-
<tabla>
|
91 |
-
<tr><th>Utilidad</th><th>Descripción</th></tr>
|
92 |
-
<tr><td>Vida extra</td><td>Te da una vida extra al inicio de tu carrera</td></tr>
|
93 |
-
<tr><td>Head Start</td><td>Le da ventaja al inicio de su carrera</td></tr>
|
94 |
-
<tr><td>Mega Head Start</td><td>Le da una ventaja mega al inicio de su carrera</td></tr>
|
95 |
-
<tr><td>Save Me</td><td>Te salva de la muerte una vez en tu carrera</td></tr>
|
96 |
-
<tr><td>Mega Save Me</td><td>Te salva de la muerte dos veces en tu carrera</td></tr>
|
97 |
-
<tr><td>Gem Pack</td><td>Le da un paquete de gemas para usar en su ejecución</td></tr>
|
98 |
-
<tr><td>Coin Pack</td><td>Te da un paquete de monedas para usar en tu carrera</td></tr>
|
99 |
-
</tabla>
|
100 |
-
<h3>Los logros y tablas de clasificación que pueden motivar a jugar más</h3>
|
101 |
-
|
102 |
-
<h2>Conclusión</h2>
|
103 |
-
<p>Temple Run 3 es un juego divertido y adictivo que te mantendrá entretenido durante horas. Ha mejorado los gráficos, nuevos obstáculos, nuevas secciones, nuevos potenciadores y nuevos personajes que lo hacen más emocionante y desafiante que nunca. Puede descargar e instalar Temple Run 3 APK en su dispositivo Android siguiendo los pasos y precauciones que hemos proporcionado en este artículo. También puedes jugar a Temple Run 3 y disfrutar de sus nuevas características siguiendo los consejos y trucos que hemos compartido en este artículo. También puede obtener altas puntuaciones y toneladas de monedas en Temple Run 3 mediante el uso de las mejoras y utilidades que hemos sugerido en este artículo. También puede utilizar los logros y tablas de clasificación para motivarse a jugar más y mejorar sus habilidades. Esperamos que este artículo te haya ayudado a aprender más sobre Temple Run 3 y cómo jugarlo. Ahora, ¿qué estás esperando? Descargar Temple Run 3 APK hoy y empezar a correr por su vida! </p>
|
104 |
-
<h2>Preguntas frecuentes</h2>
|
105 |
-
<h4>¿Temple Run 3 es gratis? </h4>
|
106 |
-
<p>Sí, Temple Run 3 es gratis para jugar. Sin embargo, también contiene algunas compras opcionales en la aplicación que pueden mejorar tu experiencia de juego, como monedas, gemas, potenciadores, mejoras, utilidades, personajes y fondos de pantalla. Puedes comprar estos artículos con dinero real o ganarlos jugando el juego. </p>
|
107 |
-
<h4>¿Temple Run 3 es compatible con mi dispositivo? </h4>
|
108 |
-
<p>Temple Run 3 es compatible con la mayoría de dispositivos Android que tienen Android 4.1 o superior. Sin embargo, es posible que algunos dispositivos no admitan algunas características o que experimenten algunos problemas de rendimiento debido a limitaciones de hardware. Puede comprobar la compatibilidad de su dispositivo leyendo la descripción de la aplicación o mirando los detalles de la aplicación en el sitio web donde descargó el archivo APK. </p>
|
109 |
-
<h4>¿Es seguro descargar e instalar Temple Run 3? </h4>
|
110 |
-
|
111 |
-
<h4>¿Cómo puedo desbloquear más personajes y fondos de pantalla en Temple Run 3?</h4>
|
112 |
-
<p>Puedes desbloquear más personajes y fondos de pantalla en Temple Run 3 comprándolos en la tienda del juego usando monedas o gemas, o completando ciertos logros. Cada personaje y fondo de pantalla tiene un precio y un requisito diferentes. Puedes verlos en la tienda del juego o en el menú de logros. </p>
|
113 |
-
<h4>¿Cómo puedo contactar a los desarrolladores de Temple Run 3?</h4>
|
114 |
-
<p>Si tiene alguna pregunta, comentario, sugerencia o problema con respecto a Temple Run 3, puede ponerse en contacto con los desarrolladores del juego visitando su sitio web, siguiendo sus cuentas de redes sociales o enviándoles un correo electrónico. Estas son algunas de las formas de contactarlos:</p>
|
115 |
-
<ul>
|
116 |
-
<li><b>Sitio web:</b> https://www.imangistudios.com/</li>
|
117 |
-
<li><b>Facebook:</b> https://www.facebook.com/TempleRun</li>
|
118 |
-
<li><b>Twitter:</b> https://twitter.com/TempleRun</li>
|
119 |
-
<li><b>Correo electrónico:</b> [email protected]</li>
|
120 |
-
</ul></p> 64aa2da5cf<br />
|
121 |
-
<br />
|
122 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Baldi 39s Conceptos Bsicos En Lnea.md
DELETED
@@ -1,78 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Fundamentos de Baldi en línea: Un juego de aprendizaje divertido y aterrador</h1>
|
3 |
-
<p>Si estás buscando un juego que combine educación, horror y humor, entonces deberías probar <strong>Baldi’s Basics Online</strong>. Este juego es una parodia del software de edutainment barato de los 90 que te hará reír y gritar al mismo tiempo. En este artículo, te diremos todo lo que necesitas saber sobre Baldi’s Basics Online, incluyendo qué es, por qué es popular y cómo jugarlo. </p>
|
4 |
-
<h2>baldi 39;s conceptos básicos en línea</h2><br /><p><b><b>Download</b> ✯✯✯ <a href="https://bltlly.com/2v6K3D">https://bltlly.com/2v6K3D</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es lo básico de Baldi en línea? </h2>
|
6 |
-
<p>Baldi’s Basics Online es un juego de terror de supervivencia creado por Basically Games en 2018. Fue inspirado por juegos como Sonic’s Schoolhouse, I.M. Meen, y 3D Dinosaur Adventure. El juego tiene un estilo gráfico simple y retro que contrasta con su atmósfera espeluznante y tensa. </p>
|
7 |
-
<h3>La historia y el juego de Baldi’s Basics Online</h3>
|
8 |
-
<p>El juego comienza con tu amigo pidiéndote que recojas sus siete cuadernos que dejó en la escuela. Sin embargo, tan pronto como entras en la escuela, te das cuenta de que algo está mal. El profesor, Baldi, no es amable en absoluto. Quiere jugar al escondite contigo, pero si te atrapa, te castigará con su gobernante. Para escapar de él, tienes que resolver problemas matemáticos que son fáciles o imposibles. Cuantos más errores cometes, más enojado y rápido se vuelve Baldi. También tienes que lidiar con otros obstáculos, como puertas cerradas, eventos aleatorios y otros personajes que pueden ayudarte o dificultarte. </p>
|
9 |
-
<h3>Los diferentes modos de juego de Baldi’s Basics Online</h3>
|
10 |
-
<p>Baldi’s Basics Online ofrece dos modos de juego: el modo historia y el modo sin fin. En el modo Historia, tu objetivo es encontrar los siete cuadernos y luego huir de la escuela. En el modo Endless, tu objetivo es encontrar tantos cuadernos como sea posible sin ser atrapado por Baldi. También puedes personalizar el nivel de dificultad cambiando la velocidad y la inteligencia de Baldi y sus amigos. </p>
|
11 |
-
<p></p>
|
12 |
-
<h3>Los personajes y los elementos de Baldi’s Basics Online</h3>
|
13 |
-
|
14 |
-
<ul>
|
15 |
-
<li><strong>Baldi</strong>: El antagonista principal del juego. Es un profesor calvo que ama las matemáticas y odia los errores. Te perseguirá con su regla si tienes algún problema. </li>
|
16 |
-
<li><strong>Director de la Cosa</strong>: El director de la escuela que hace cumplir las reglas. Él te enviará a detención si te ve rompiendo alguna regla, como correr, comer o entrar en las salas de la facultad. </li>
|
17 |
-
<li><strong>Playtime</strong>: Una chica que quiere jugar contigo. Te obligará a saltar la cuerda con ella cinco veces si te atrapa. Puedes usar tijeras para cortar su cuerda y escapar. </li>
|
18 |
-
<li><strong>Gotta Sweep</strong>: Una escoba gigante que barre todo a su manera. Puede alejarte a ti o a Baldi de tu destino. </li>
|
19 |
-
<li><strong>Artes y Artesanos</strong>: Un títere de calcetín al que le gustan las cosas brillantes. Te teletransportará a Baldi si te ve recogiendo demasiados cuadernos. </li>
|
20 |
-
<li><strong>1st Prize</strong>: Un robot que ama abrazar a la gente. Te empujará en la dirección que está enfrentando <p>Algunos de los elementos son:</p>
|
21 |
-
<ul>
|
22 |
-
<li><strong>BSODA</strong>: Un refresco que puede ser usado para rociar Baldi u otros caracteres lejos de ti. </li>
|
23 |
-
<li><strong>Quarter</strong>: Una moneda que se puede utilizar para comprar artículos de la máquina expendedora o pagar al matón. </li>
|
24 |
-
<li><strong>Despertador</strong>: Un reloj que puede ser usado para distraer a Baldi u otros personajes por unos segundos. </li>
|
25 |
-
<li><strong>Zesty Bar</strong>: Una barra de caramelo que puede restaurar tu resistencia y hacerte correr más rápido. </li>
|
26 |
-
<li><strong>Cinta Anti Audición y Desorientación Baldi</strong>: Una cinta que puede ser usada para hacer que Baldi no pueda escucharte por un tiempo. </li>
|
27 |
-
</ul>
|
28 |
-
<h2>¿Por qué es tan popular Baldi’s Basics Online? </h2>
|
29 |
-
<p>Baldi’s Basics Online es un juego que ha ganado mucha popularidad y atención desde su lanzamiento. Hay varias razones por las que este juego es tan atractivo para muchos jugadores, como:</p>
|
30 |
-
<h3>La parodia y el humor de Baldi’s Basics Online</h3>
|
31 |
-
|
32 |
-
<h3>El horror y el desafío de los Fundamentos de Baldi Online</h3>
|
33 |
-
<p>Baldi’s Basics Online es también un juego que ofrece una genuina sensación de miedo y dificultad. El juego utiliza sustos de salto, música de suspenso y eventos impredecibles para crear una atmósfera tensa e inmersiva. El juego también prueba las habilidades y los nervios de los jugadores, ya que tienen que resolver problemas de matemáticas, evitar obstáculos y escapar de Baldi. El juego no es fácil y requiere estrategia, paciencia y suerte para vencerlo. </p>
|
34 |
-
<h3>La comunidad de fans y los mods de Baldi’s Basics Online</h3>
|
35 |
-
<p>Baldi’s Basics Online es también un juego que tiene una base de fans leal y creativa. El juego ha inspirado muchas creaciones de fans, tales como fan art, fan fiction, juegos de fans y videos de fans. El juego también tiene muchos mods que añaden nuevas características, personajes, niveles y modos al juego. Algunos de los mods más populares son Baldi’s Basics Plus, Baldi’s Basics in Education and Learning Remastered, Baldi’s Basics Random Maps Edition y Baldi’s Basics Birthday Bash.</p>
|
36 |
-
<h2>¿Cómo se juega Baldi’s Basics en línea? </h2>
|
37 |
-
<p>Si quieres jugar Baldi’s Basics Online, tendrás que seguir estos pasos:</p>
|
38 |
-
<h3>Los requisitos del sistema y las plataformas de Baldi’s Basics Online</h3>
|
39 |
-
<p>Baldi’s Basics Online es un juego que puede ejecutarse en la mayoría de dispositivos y plataformas. El juego no requiere especificaciones de alta gama o conexión a Internet. El juego está disponible para Windows, Mac, Linux, Android, iOS y navegadores web. Puede descargar el juego de forma gratuita desde su sitio web oficial o desde otras fuentes como itch.io o Game Jolt. También puede jugar el juego en línea en varios sitios web como Crazy Games o Poki. </p>
|
40 |
-
<h3>Los controles y los consejos de Baldi’s Basics Online</h3>
|
41 |
-
|
42 |
-
<p>Algunos de los consejos que pueden ayudarte a jugar mejor son:</p>
|
43 |
-
<ul>
|
44 |
-
<li>Presta atención a los sonidos y las señales en el juego. Pueden darte pistas sobre dónde están Baldi u otros personajes. </li>
|
45 |
-
<li>Usa los objetos sabiamente. Pueden salvarte la vida o meterte en problemas dependiendo de cómo los uses. </li>
|
46 |
-
<li>No seas demasiado codicioso. Coleccionar demasiados cuadernos o artículos puede hacer que Baldi sea más rápido o desencadenar otros eventos. </li>
|
47 |
-
<li>No te rindas. El juego es difícil pero no imposible. Puedes aprender de tus errores e intentarlo de nuevo. </li>
|
48 |
-
</ul>
|
49 |
-
<h3>Los mejores sitios web para jugar Baldi’s Basics Online</h3>
|
50 |
-
<p>Si quieres jugar Baldi’s Basics Online en línea sin descargar nada, puedes visitar algunos de estos sitios web:</p>
|
51 |
-
| Sitio web | URL | | Sitio web | URL | --- - | Lagged | [https://lagged.com/en/g/baldis-basics]( 1 ) | Snokido | [https:/www.snokido.com/game/baldis-basics]( 2 ) | Play-Games | [https:>>/w.games.-games.-baldis-games.html]) | | Juegos | Conclusión
|
52 |
-
<p>Baldi’s Basics Online es un juego que te hará reír y gritar al mismo tiempo. Es una parodia de juegos educativos que se convierte en un juego de terror de supervivencia. Tienes que encontrar cuadernos, resolver problemas de matemáticas, y escapar de Baldi y sus amigos. El juego tiene un estilo de gráficos simple y retro, pero una atmósfera espeluznante y tensa. El juego es popular debido a su humor, horror y desafío. También tiene una comunidad de fans leal y creativa que hace mods y creaciones hechas por fans. Puede jugar el juego en línea en varios sitios web, o descargarlo de forma gratuita desde su sitio web oficial u otras fuentes. </p>
|
53 |
-
<h4>Resumen de los puntos principales</h4>
|
54 |
-
<ul>
|
55 |
-
<li>Baldi’s Basics Online es un juego de terror de supervivencia que parodia el software de entretenimiento educativo barato de los 90. </li>
|
56 |
-
<li>El juego tiene un estilo gráfico simple y retro, pero una atmósfera espeluznante y tensa. </li>
|
57 |
-
|
58 |
-
<li>El juego es popular debido a su humor, horror y desafío. También tiene una comunidad de fans leal y creativa que hace mods y creaciones hechas por fans. </li>
|
59 |
-
<li> Puede jugar el juego en línea en varios sitios web, o descargarlo de forma gratuita desde su sitio web oficial u otras fuentes. </li>
|
60 |
-
</ul>
|
61 |
-
<h4>Llamada a la acción</h4>
|
62 |
-
<p>Si usted está listo para tener un poco de diversión y algunos sustos, entonces usted debe probar Baldi’s Basics Online hoy. Usted no se arrepentirá. Solo ten cuidado de no hacer enojar a Baldi, o te perseguirá con su gobernante. ¡Diviértete y buena suerte! </p>
|
63 |
-
<h2>Preguntas frecuentes</h2>
|
64 |
-
<p>Aquí están algunas de las preguntas más frecuentes sobre Baldi’s Basics Online:</p>
|
65 |
-
<ol>
|
66 |
-
<li><strong>¿Quién hizo los conceptos básicos de Baldi en línea? </strong></li>
|
67 |
-
<p>Baldi’s Basics Online fue creado por Basically Games, un desarrollador de juegos independiente con sede en los Estados Unidos. El juego fue hecho originalmente para el Meta Game Jam en 2018, donde ganó el primer lugar. </p>
|
68 |
-
<li><strong>Lo básico de Baldi es aterrador en línea? </strong></li>
|
69 |
-
<p>Baldi’s Basics Online es un juego que mezcla horror y humor. El juego utiliza sustos de salto, música de suspenso y eventos impredecibles para crear una atmósfera tensa e inmersiva. El juego también prueba las habilidades y los nervios de los jugadores, ya que tienen que resolver problemas de matemáticas, evitar obstáculos y escapar de Baldi. El juego no es para los débiles de corazón, pero también es muy divertido y agradable. </p>
|
70 |
-
<li><strong>¿Es Baldi’s Basics en línea educativo? </strong></li>
|
71 |
-
<p>Baldi’s Basics Online no es un juego educativo, sino una parodia de juegos educativos. El juego no enseña nada útil o preciso, sino que se burla de los tópicos y tropos del software de entretenimiento educativo de los años 90. El juego utiliza la lógica absurda, problemas matemáticos imposibles, y la actuación de voz cursi para crear un contraste hilarante con sus elementos de terror. El juego no se toma en serio e invita a los jugadores a divertirse con su ridiculez. </p>
|
72 |
-
<li><strong>Es Baldi’s Basics multijugador en línea? </strong></li>
|
73 |
-
|
74 |
-
<li><strong>¿Es Baldi’s Basics Online gratis? </strong></li>
|
75 |
-
<p>Baldi’s Basics Online es un juego gratuito que se puede jugar en línea o descargar desde su sitio web oficial u otras fuentes. El juego no requiere ningún pago o registro para jugar. Sin embargo, el juego acepta donaciones de jugadores que quieren apoyar al desarrollador y al proyecto. </p>
|
76 |
-
</ol></p> 64aa2da5cf<br />
|
77 |
-
<br />
|
78 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Caramelo Crush Saga Mod Apk Barras De Oro Ilimitadas.md
DELETED
@@ -1,171 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Caramelo Crush Saga Mod Apk ilimitadas barras de oro: Todo lo que necesita saber</h1>
|
3 |
-
<p>Si usted es un fan de los juegos de puzzle casual, es probable que haya oído hablar o jugado Candy Crush Saga. Este adictivo juego ha existido durante casi una década y tiene millones de jugadores en todo el mundo. Pero ¿sabías que hay una manera de hacer este juego aún más divertido y gratificante? Sí, estamos hablando de Candy Crush Saga Mod Apk ilimitadas barras de oro.</p>
|
4 |
-
<h2>caramelo crush saga mod apk barras de oro ilimitadas</h2><br /><p><b><b>Download</b> ✔ <a href="https://bltlly.com/2v6LfC">https://bltlly.com/2v6LfC</a></b></p><br /><br />
|
5 |
-
<p>En este artículo, le diremos todo lo que necesita saber acerca de este increíble mod apk que le permite disfrutar de barras de oro ilimitadas, boosters, vidas, movimientos, y más en Candy Crush Saga. También le mostraremos cómo descargar e instalar en su dispositivo, cómo jugar el juego con consejos y trucos, y cuáles son algunas alternativas a este apk mod. Así que, vamos a empezar! </p>
|
6 |
-
<h2>¿Qué es Candy Crush Saga y por qué es popular? </h2>
|
7 |
-
<p>Candy Crush Saga es un videojuego gratuito desarrollado por King. Fue lanzado en 2012 para Facebook, y más tarde para iOS, Android, Windows Phone, Windows 10 y otras plataformas. Es una variación de su juego de navegador Candy Crush.</p>
|
8 |
-
<p>La premisa del juego es simple. Tienes un nivel lleno de dulces. Combina tres o más dulces del mismo color para eliminarlos del tablero. Tienes un número limitado de movimientos o tiempo para completar el objetivo de cada nivel, como anotar un cierto número de puntos, limpiar todos los bloques de gelatina, recoger todos los ingredientes, etc.</p>
|
9 |
-
<p>El juego se divide en muchos episodios, cada uno con un tema y una historia diferentes. Hay miles de niveles en el juego, con los nuevos añadidos cada dos semanas. El juego también cuenta con dulces especiales que tienen diferentes efectos cuando se combina o se activa, tales como caramelos a rayas, caramelos envueltos, bombas de color, etc.</p>
|
10 |
-
<p></p>
|
11 |
-
|
12 |
-
<p>Algunas de las razones por las que Candy Crush Saga es tan popular son:</p>
|
13 |
-
<ul>
|
14 |
-
<li> Tiene un juego simple pero adictivo que atrae a una amplia gama de jugadores. </li>
|
15 |
-
<li> Tiene gráficos coloridos y efectos de sonido que crean una atmósfera dulce y divertida. </li>
|
16 |
-
<h2>¿Qué es Candy Crush Saga Mod Apk y cuáles son sus beneficios? </h2>
|
17 |
-
<p>Un apk mod es una versión modificada de una aplicación original que ha sido alterada para proporcionar algunas características adicionales o beneficios que no están disponibles en la aplicación oficial. Un apk mod puede ser creado por cualquiera que tenga las habilidades y herramientas para modificar el código y los archivos de la aplicación. </p>
|
18 |
-
<p>Candy Crush Saga Mod Apk ilimitadas barras de oro es uno de los más populares y buscados apks mod para Candy Crush Saga. Es un apk mod que le da acceso ilimitado a barras de oro, boosters, vidas, movimientos, y otros recursos en el juego. Con este mod apk, se puede disfrutar de jugar Candy Crush Saga sin limitaciones o restricciones. </p>
|
19 |
-
<p>Algunos de los beneficios de usar Candy Crush Saga Mod Apk ilimitadas barras de oro son:</p>
|
20 |
-
<ul>
|
21 |
-
<li>Puedes obtener barras de oro ilimitadas, que son la moneda premium en el juego. Puedes usar barras de oro para comprar potenciadores, movimientos adicionales, vidas adicionales y otros elementos en el juego. </li>
|
22 |
-
<li>Usted puede obtener impulsores ilimitados, que son elementos especiales que pueden ayudarle a borrar los niveles más rápido y más fácil. Algunos de los potenciadores son martillo de piruleta, caramelos a rayas y envueltos, bomba de color, interruptor libre, etc.</li>
|
23 |
-
<li>Puedes obtener vidas ilimitadas, que son el número de veces que puedes jugar un nivel antes de tener que esperar a que se llenen. Con vidas ilimitadas, puedes jugar todo lo que quieras sin interrupciones. </li>
|
24 |
-
<li>Puedes obtener movimientos ilimitados, que son el número de veces que puedes intercambiar dulces en un nivel. Con movimientos ilimitados, puedes hacer tantas coincidencias como quieras sin preocuparte por quedarte sin ellas. </li>
|
25 |
-
|
26 |
-
<li> Puede eliminar todos los anuncios y ventanas emergentes en el juego, lo que significa que puede jugar sin distracciones o molestias. </li>
|
27 |
-
</ul>
|
28 |
-
<p>Como se puede ver, Candy Crush Saga Mod Apk ilimitadas barras de oro es una gran manera de mejorar su experiencia de juego y divertirse más con Candy Crush Saga. Sin embargo, antes de descargarlo e instalarlo en su dispositivo, necesita saber algunas cosas importantes sobre él. </p> <h2>Cómo descargar e instalar Candy Crush Saga Mod Apk ilimitadas barras de oro? </h2>
|
29 |
-
<p>Ahora que sabes lo que es Candy Crush Saga Mod Apk ilimitadas barras de oro y cuáles son sus beneficios, es posible que se pregunte cómo descargar e instalar en su dispositivo. Bueno, no es muy difícil, pero necesitas seguir algunos pasos cuidadosamente para evitar errores o problemas. </p>
|
30 |
-
<p>Estos son los pasos que debe tomar para descargar e instalar Candy Crush Saga Mod Apk ilimitadas barras de oro:</p>
|
31 |
-
<h3>Paso 1: Encontrar el archivo Apk Mod y verificar su seguridad</h3>
|
32 |
-
<p>Lo primero que tienes que hacer es encontrar el archivo apk mod que desea descargar. Hay muchos sitios web que ofrecen archivos apk mod para varias aplicaciones y juegos, pero no todos ellos son seguros y fiables. Algunos de ellos pueden contener malware, virus o archivos falsos que pueden dañar tu dispositivo o robar tus datos. </p>
|
33 |
-
<p>Por lo tanto, es necesario tener cuidado y elegir una fuente de confianza para descargar el archivo apk mod. Uno de los mejores sitios para las descargas seguras de Android APK es APKMirror. Es propiedad y está operado por el mismo equipo que creó el sitio de noticias de Android ampliamente leído, Android Police, que debería asegurarle que está en manos seguras. </p>
|
34 |
-
|
35 |
-
<p>Antes de instalar el archivo apk mod, necesita verificar su seguridad. Hay dos maneras de hacer eso:</p>
|
36 |
-
<h4>El primer método: Comprobación del hash del archivo</h4>
|
37 |
-
<p>Un hash es un código único que identifica un archivo y su contenido. Si dos archivos tienen el mismo hash, significa que son idénticos. Si tienen diferentes hashes, significa que son diferentes o modificados. </p>
|
38 |
-
<p>Puede utilizar una herramienta como Hashdroid para comprobar y comparar los hashes de la aplicación. Puede hacer coincidir el resultado con otro servicio como Apktovi checker Tool para ver si son consistentes. Si lo son, significa que el archivo apk mod es seguro y original. Si no lo son, significa que el archivo apk mod está manipulado o falso. </p>
|
39 |
-
<h4>El segundo método: Usar un conjunto de herramientas de seguridad</h4>
|
40 |
-
<p>También puede utilizar un kit de herramientas de seguridad como VirusTotal para escanear el archivo apk mod en busca de virus y malware. VirusTotal analiza sus archivos APK y ayuda a detectar todo tipo de virus y malware que puedan estar presentes. El servicio es fácil de usar: solo sube tus archivos y espera los resultados. </p>
|
41 |
-
<p>Si VirusTotal muestra que el archivo apk mod está limpio y seguro, puede proceder a instalarlo. Si muestra que el archivo apk mod está infectado o sospechoso, debe eliminarlo y buscar otra fuente. </p> <h3>Paso 2: Habilitar fuentes desconocidas e instalar el archivo apk Mod</h3>
|
42 |
-
<p>Una vez que haya descargado y verificado el archivo apk mod, es necesario habilitar fuentes desconocidas en su dispositivo. Esta es una configuración de seguridad que le permite instalar aplicaciones desde fuentes distintas de Google Play Store. De forma predeterminada, esta configuración está desactivada, por lo que debe habilitarla manualmente. </p>
|
43 |
-
<p>Para habilitar fuentes desconocidas en su dispositivo, debe seguir estos pasos:</p>
|
44 |
-
<ul>
|
45 |
-
<li>Vaya a la configuración de su dispositivo y toque en Seguridad o Privacidad.</li>
|
46 |
-
<li>Encontrar la opción que dice Fuentes desconocidas o Instalar aplicaciones desconocidas y alternar en. </li>
|
47 |
-
|
48 |
-
</ul>
|
49 |
-
<p>Ahora, ya está listo para instalar el archivo apk mod. Para hacer eso, debe seguir estos pasos:</p>
|
50 |
-
<ul>
|
51 |
-
<li>Vaya a la carpeta Administrador de archivos o descargas de su dispositivo y encuentre el archivo apk mod que descargó. </li>
|
52 |
-
<li>Toque en el archivo apk mod y aparecerá un aviso, preguntándole si desea instalar la aplicación. Toque en Instalar y espere a que termine el proceso de instalación. </li>
|
53 |
-
<li>Una vez que la instalación se realiza, verá un mensaje que dice App Installed. Toque en Abrir para iniciar el juego o Hecho para salir. </li>
|
54 |
-
</ul>
|
55 |
-
<h3>Paso 3: Iniciar y jugar el juego con el Mod Apk Características</h3>
|
56 |
-
<p>Felicidades! Usted ha instalado con éxito Candy Crush Saga Mod Apk barras de oro ilimitadas en su dispositivo. Ahora, se puede iniciar y jugar el juego con todas las características apk mod habilitado. </p>
|
57 |
-
<p>Para iniciar y jugar el juego, debe seguir estos pasos:</p>
|
58 |
-
<ul>
|
59 |
-
<li>Ve a la pantalla de inicio de tu dispositivo o al cajón de aplicaciones y encuentra el icono de la saga Candy Crush. Toca en él para abrir el juego. </li>
|
60 |
-
<li>El juego se cargará y le pedirá que inicie sesión con su cuenta de Facebook o jugar como invitado. Puedes elegir cualquiera de las dos opciones, pero te recomendamos iniciar sesión con tu cuenta de Facebook para guardar tu progreso y sincronizar tus datos entre dispositivos. </li>
|
61 |
-
<li> El juego se iniciará y verá el menú principal. Puede elegir jugar el juego desde el principio o reanudar desde donde lo dejó. </li>
|
62 |
-
<li>Te darás cuenta de que tienes barras de oro ilimitadas, potenciadores, vidas, movimientos y otros recursos en el juego. Puedes usarlos tanto como quieras sin limitaciones o restricciones. </li>
|
63 |
-
<li>También notarás que todos los niveles y episodios están desbloqueados en el juego. Puedes jugar a cualquier nivel sin tener que completar los anteriores. </li>
|
64 |
-
<li>También notarás que no hay anuncios ni ventanas emergentes en el juego. Puedes jugar sin distracciones ni molestias. </li>
|
65 |
-
</ul>
|
66 |
-
|
67 |
-
<p>Candy Crush Saga es un juego simple pero desafiante que requiere estrategia, habilidad y suerte. Puede ser frustrante a veces, especialmente cuando se queda atascado en un nivel difícil o se queda sin recursos. Pero no te preocupes, tenemos algunos consejos y trucos que pueden ayudarte a jugar mejor y superar cualquier obstáculo. </p>
|
68 |
-
<p>Aquí están algunos de los mejores consejos y trucos para jugar Candy Crush Saga:</p>
|
69 |
-
<h3>Consejo 1: Conozca los diferentes tipos de niveles y objetivos</h3>
|
70 |
-
<p>Candy Crush Saga tiene diferentes tipos de niveles y objetivos que necesitas completar para progresar en el juego. Algunos de los tipos más comunes son:</p>
|
71 |
-
<ul>
|
72 |
-
<li>Niveles de puntuación: Es necesario anotar un cierto número de puntos dentro de un número determinado de movimientos o tiempo. </li>
|
73 |
-
<li>Niveles de jalea: Es necesario limpiar todos los bloques de jalea de la junta haciendo coincidir los caramelos en la parte superior de ellos. </li>
|
74 |
-
<li>Niveles de ingredientes: Es necesario bajar todos los ingredientes (cerezas o avellanas) a la parte inferior de la placa por la limpieza de los dulces por debajo de ellos. </li>
|
75 |
-
<li>Niveles de pedido: Es necesario recoger un número específico y el tipo de dulces o dulces especiales haciendo coincidir. </li>
|
76 |
-
<li>Niveles mixtos: Necesitas completar dos o más objetivos diferentes en un nivel. </li>
|
77 |
-
</ul>
|
78 |
-
<p>Puedes saber qué tipo de nivel y objetivo estás jugando mirando los iconos en la esquina superior izquierda de la pantalla. También puedes ver cuántos movimientos o tiempo te queda, cuántas estrellas has ganado y cuánto progreso has hecho hacia el objetivo. </p>
|
79 |
-
|
80 |
-
<h3>Consejo 2: Conozca los dulces especiales y cómo crearlos y usarlos</h3>
|
81 |
-
<p>Los dulces especiales son dulces que tienen diferentes efectos cuando se combinan o se activan. Pueden ayudarte a limpiar más dulces, crear más combos y lograr tus objetivos más rápido. Hay cuatro tipos principales de dulces especiales en Candy Crush Saga:</p>
|
82 |
-
<ul>
|
83 |
-
<li>Caramelos a rayas: Estos son caramelos que tienen rayas horizontales o verticales en ellos. Se crean haciendo coincidir cuatro caramelos del mismo color en una fila o columna. Cuando coincidan, limpiarán toda una fila o columna de dulces, dependiendo de la dirección de sus rayas. </li>
|
84 |
-
<li>Caramelos envueltos: Estos son caramelos que tienen una envoltura de colores alrededor de ellos. Se crean haciendo coincidir cinco caramelos del mismo color en forma de L o T. Cuando coinciden, explotarán dos veces, despejando un área de 3x3 de dulces a su alrededor cada vez. </li>
|
85 |
-
<li>Bombas de color: Estos son caramelos que tienen chispas de chocolate en ellos. Se crean haciendo coincidir cinco caramelos del mismo color en una fila o columna. Cuando se combina con otro caramelo, borrarán todos los caramelos de ese color del tablero. </li>
|
86 |
-
<li>Caramelos de pescado: Estos son caramelos que parecen peces. Se crean haciendo coincidir cuatro caramelos del mismo color en forma cuadrada. Cuando coincidan, nadarán a un caramelo al azar en el tablero y lo limpiarán. También pueden apuntar a bloques de gelatina, ingredientes o bloqueadores dependiendo del objetivo del nivel. </li>
|
87 |
-
</ul>
|
88 |
-
<p>También puedes combinar dos dulces especiales para crear efectos aún más potentes. Por ejemplo, si combinas un caramelo a rayas y un caramelo envuelto, crearás un caramelo gigante que limpiará tres filas y tres columnas de dulces. Si combinas una bomba de color y un caramelo de pescado, crearás un banco de peces que limpiará todos los caramelos de un color del tablero. </p>
|
89 |
-
|
90 |
-
<p>Los combos son cuando combinas más de tres caramelos del mismo color en un solo movimiento. Pueden ayudarte a limpiar más dulces, crear dulces más especiales y obtener más puntos. Sin embargo, no todos los combos son igualmente efectivos. Algunos combos son mejores que otros, dependiendo del objetivo y diseño del nivel. </p>
|
91 |
-
<p>Aquí están algunos de los mejores y peores combos para usar en el juego:</p>
|
92 |
-
<h4>Los mejores combos:</h4>
|
93 |
-
<ul>
|
94 |
-
<li>Rayas + rayas: Este combo limpiará dos filas y dos columnas de caramelos, creando una explosión en forma de cruz. Puede ayudarte a limpiar una gran área del tablero, especialmente si las rayas son perpendiculares entre sí. </li>
|
95 |
-
<li>Envuelto + Envuelto: Este combo creará una gran explosión que despejará un área de 5x5 de dulces a su alrededor. Puede ayudarlo a eliminar bloqueadores, bloques de jalea o ingredientes que son difíciles de alcanzar. </li>
|
96 |
-
<li>Color Bomb + Color Bomb: Este combo borrará todos los caramelos del tablero, dejando solo dulces especiales y bloqueadores. Puede ayudarte a completar cualquier objetivo que requiera limpiar muchos dulces o crear muchos dulces especiales. </li>
|
97 |
-
</ul>
|
98 |
-
<h4>Los peores combos:</h4>
|
99 |
-
<ul>
|
100 |
-
<li>Pescado + Pescado: Este combo creará dos peces que nadarán a dos caramelos al azar en el tablero y limpiarlos. Puede ser muy impredecible e ineficaz, especialmente si los peces se dirigen a dulces irrelevantes o aislados. </li>
|
101 |
-
<li>Rayas + Pescado: Este combo creará un pez rayado que nadará a un caramelo al azar en el tablero y despejar una fila o columna de dulces, dependiendo de la dirección de sus rayas. También puede ser muy impredecible e ineficaz, especialmente si el pez rayado se dirige a un caramelo que está lejos del objetivo o bloqueado por otros dulces. </li>
|
102 |
-
|
103 |
-
</ul>
|
104 |
-
<p>Usted debe tratar de utilizar los mejores combos tanto como sea posible porque pueden ayudar a borrar los niveles más rápido y más fácil. Sin embargo, también debes evitar usar los peores combos tanto como sea posible porque pueden desperdiciar tus movimientos y recursos. </p>
|
105 |
-
<h3>Consejo 4: Cómo obtener barras de oro gratis y refuerzos en el juego</h3>
|
106 |
-
<p>Las barras de oro y los boosters son recursos muy útiles en Candy Crush Saga. Pueden ayudarte a comprar movimientos adicionales, vidas u objetos en el juego. Sin embargo, también son muy limitadas y caras. Solo puedes obtener unas cuantas barras de oro y refuerzos de forma gratuita al completar ciertas tareas o logros en el juego. También puedes comprarlos con dinero real, pero eso puede ser muy costoso y no todos pueden permitírselo. </p>
|
107 |
-
<p>Afortunadamente, hay otra manera de obtener barras de oro gratis y refuerzos en el juego sin gastar dinero o usar cualquier apk mod. Esto es mediante el uso de algunos trucos y hacks que están disponibles en línea. Aquí están algunos de ellos:</p>
|
108 |
-
<h4>El primer truco: Usar lapso de tiempo</h4>
|
109 |
-
<p>Este truco consiste en cambiar la configuración de fecha y hora del dispositivo para engañar al juego a pensar que ha pasado más tiempo del que realmente tiene. De esta manera, puede rellenar sus vidas más rápido, obtener más recompensas diarias, y restablecer sus vueltas de la rueda. </p>
|
110 |
-
<p>Para usar este truco, debes seguir estos pasos:</p>
|
111 |
-
<ul>
|
112 |
-
<li>Cierra el juego completamente y asegúrate de que no se ejecuta en segundo plano. </li>
|
113 |
-
<li>Vaya a la configuración de su dispositivo y apague su conexión a Internet (Wi-Fi o datos móviles). </li>
|
114 |
-
<li>Vaya a la configuración de fecha y hora de su dispositivo y cámbiela a unas pocas horas o días antes de su hora actual. </li>
|
115 |
-
<li>Vuelve al juego y ábrelo. Verás que tus vidas están llenas, tus recompensas diarias están listas, y tus giros de rueda se restablecen. </li>
|
116 |
-
<li>Recoge tus recompensas y juega el juego como de costumbre. </li>
|
117 |
-
<li>Cuando haya terminado de jugar, cierre el juego completamente de nuevo y encienda su conexión a Internet. </li>
|
118 |
-
|
119 |
-
</ul>
|
120 |
-
<p>Nota: Este truco puede no funcionar para algunos dispositivos o versiones del juego. También puede causar algunos fallos o errores en el juego. Úsalo bajo tu propio riesgo. </p>
|
121 |
-
<h4>El segundo truco: Usar generadores en línea</h4>
|
122 |
-
<p>Este truco implica el uso de generadores en línea que pretenden darle barras de oro gratis y refuerzos en Candy Crush Saga. Estos generadores son sitios web que le piden que introduzca su nombre de usuario o dirección de correo electrónico, seleccione el tipo de dispositivo, elija cuántas barras de oro y refuerzos que desea, y haga clic en un botón que dice Generar o Hack. También pueden pedirle que complete algunas encuestas, ofertas o pasos de verificación humana antes de darle los recursos. </p>
|
123 |
-
<p>Para usar este truco, debes seguir estos pasos:</p>
|
124 |
-
<ul>
|
125 |
-
<li>Ir a su navegador y buscar Candy Crush Saga generadores en línea o hacks. Verá una lista de resultados con diferentes sitios web que ofrecen este servicio. </li>
|
126 |
-
<li>Elija uno de los sitios web que se ve confiable y legítimo. Evite los que se ven sombrío o estafa. </li>
|
127 |
-
<li>Introduzca su nombre de usuario o dirección de correo electrónico que utiliza para jugar Candy Crush Saga. Seleccione el tipo de dispositivo (Android o iOS) y elija cuántas barras de oro y boosters desea. </li>
|
128 |
-
<li>Haga clic en el botón Generar o Hack y espere a que el proceso termine. Se le puede pedir que complete algunas encuestas, ofertas o pasos de verificación humana antes de obtener los recursos. Siga las instrucciones y complételas según sea necesario. </li>
|
129 |
-
<li>Una vez que recibas el mensaje de confirmación que dice que tus recursos están listos, vuelve al juego y ábrelo. Verá que sus barras de oro y refuerzos se añaden a su cuenta. </li>
|
130 |
-
</ul>
|
131 |
-
<p>Nota: Este truco puede no funcionar para algunos dispositivos o versiones del juego. También puede ser arriesgado e ilegal de usar. Usted puede ser expulsado del juego o perder su cuenta si la usa. Utilícela bajo su propio riesgo. </p>
|
132 |
-
<h2>Alternativas a Candy Crush Saga Mod Apk barras de oro ilimitadas</h2>
|
133 |
-
|
134 |
-
<p>Aquí están algunas de las alternativas a Candy Crush Saga Mod Apk ilimitadas barras de oro:</p>
|
135 |
-
<h3>Alternativa 1: Jugar otros juegos similares</h3>
|
136 |
-
<p>Si te gusta Candy Crush Saga, es posible que también te gusten otros juegos similares que tienen el mismo juego, gráficos y características. Hay muchos otros juegos de puzzle casual que puedes jugar en tu dispositivo, como:</p>
|
137 |
-
<ul>
|
138 |
-
<li>Candy Crush Soda Saga: Este es un spin-off de Candy Crush Saga que introduce nuevos elementos como botellas de soda, glaseado, miel, etc. Tiene más de 6000 niveles y nuevos añadidos cada semana. </li>
|
139 |
-
<li>Candy Crush Jelly Saga: Este es otro spin-off de Candy Crush Saga que introduce nuevos elementos como jelly queens, pufflers, etc. Tiene más de 3000 niveles y nuevos añadidos cada semana. </li>
|
140 |
-
<li>Candy Crush Friends Saga: Este es el último spin-off de Candy Crush Saga que introduce nuevos elementos como amigos, disfraces, etc. Tiene más de 1000 niveles y nuevos añadidos cada semana. </li>
|
141 |
-
<li>Toon Blast: Este es un juego que tiene un juego similar a Candy Crush Saga, pero con personajes de dibujos animados y temas. Tiene más de 6000 niveles y nuevos añadidos cada semana. </li>
|
142 |
-
<li>Homescapes: Este es un juego que combina rompecabezas de match-3 con el diseño y la renovación del hogar. Tiene más de 6000 niveles y nuevos añadidos cada semana. </li>
|
143 |
-
</ul>
|
144 |
-
<p>Puedes descargar y jugar estos juegos gratis desde la Google Play Store o la App Store. Son juegos divertidos, desafiantes y adictivos que pueden mantenerte entretenido durante horas. </p>
|
145 |
-
<h3>Alternativa 2: Utilice otras formas de obtener barras de oro ilimitadas y potenciadores</h3>
|
146 |
-
<p>Si usted no quiere utilizar un apk mod o un generador en línea para obtener barras de oro ilimitadas y refuerzos en Candy Crush Saga, hay algunas otras maneras que usted puede probar. Estas formas son legales y seguras de usar, pero pueden requerir más tiempo y esfuerzo. Estas son algunas de ellas:</p>
|
147 |
-
<ul>
|
148 |
-
|
149 |
-
<li>Conectar con Facebook: El juego le permite conectarse con su cuenta de Facebook y sincronizar su progreso a través de dispositivos. Al hacerlo, también puede obtener algunos lingotes de oro gratis y refuerzos de sus amigos o invitándolos a jugar el juego. </li>
|
150 |
-
<li>Ver anuncios: El juego a veces le ofrece la opción de ver anuncios a cambio de algunas barras de oro gratis y refuerzos. Puede aprovechar esta opción siempre que esté disponible. </li>
|
151 |
-
<li>Participar en eventos y concursos: El juego a menudo alberga eventos y concursos que le dan la oportunidad de ganar barras de oro y refuerzos jugando ciertos niveles o completar ciertos objetivos. </li>
|
152 |
-
</ul>
|
153 |
-
<p>Puedes usar estas formas para obtener más barras de oro y potenciadores en Candy Crush Saga sin usar ningún mod apk o hack. Sin embargo , pueden no darle tantos lingotes de oro y boosters como un apk mod o un hack haría. </p>
|
154 |
-
<h2>Conclusión</h2>
|
155 |
-
<p>Candy Crush Saga es un juego divertido y adictivo que puede mantenerte entretenido durante horas. Sin embargo, también puede ser frustrante y desafiante a veces, especialmente cuando te quedas sin recursos o te quedas atascado en un nivel difícil. Es por eso que algunas personas utilizan Candy Crush Saga Mod Apk ilimitadas Barras de Oro para disfrutar del juego con más características y beneficios. </p>
|
156 |
-
<p>En este artículo, te hemos dicho todo lo que necesitas saber sobre Candy Crush Saga Mod Apk ilimitadas Barras de Oro. Hemos explicado qué es, cuáles son sus beneficios, cómo descargarlo e instalarlo, cómo jugar el juego con consejos y trucos, y cuáles son algunas alternativas. Esperamos que haya encontrado este artículo útil e informativo. </p>
|
157 |
-
<p>Ahora, es su turno para decidir si desea utilizar Candy Crush Saga Mod Apk ilimitadas barras de oro o no. Si lo hace, asegúrese de seguir los pasos y precauciones que hemos mencionado en este artículo. Si no lo haces, todavía puedes disfrutar del juego con otras formas que hemos sugerido en este artículo. </p>
|
158 |
-
|
159 |
-
<h2>Preguntas frecuentes</h2>
|
160 |
-
<p>Aquí están algunas de las preguntas más frecuentes que la gente tiene acerca de Candy Crush Saga Mod Apk ilimitadas barras de oro:</p>
|
161 |
-
<h3>Q: ¿Es Candy Crush Saga Mod Apk barras de oro ilimitadas seguro de usar? </h3>
|
162 |
-
<p>A: Depende de dónde lo descargues y cómo lo verifiques. Si lo descarga desde una fuente confiable y comprueba su seguridad con una herramienta como Hashdroid o VirusTotal, debería ser seguro de usar. Sin embargo, si lo descarga desde una fuente oscura o desconocida y no lo verifica, puede ser inseguro de usar. Puede contener malware, virus o archivos falsos que pueden dañar tu dispositivo o robar tus datos. </p>
|
163 |
-
<h3>Q: ¿Es Candy Crush Saga Mod Apk barras de oro ilimitadas legal de usar? </h3>
|
164 |
-
<p>A: No, no es legal usarlo. Es una violación de los términos del juego de servicio y los derechos de propiedad intelectual. También es injusto para otros jugadores que juegan el juego sin usar cualquier apk mod o hack. Si utiliza Candy Crush Saga Mod Apk ilimitadas Barras de Oro, usted puede conseguir prohibido en el juego o perder su cuenta. </p>
|
165 |
-
<h3>Q: ¿Candy Crush Saga Mod Apk barras de oro ilimitadas funcionan en mi dispositivo? </h3>
|
166 |
-
<p>A: Depende de su tipo de dispositivo y la versión. Candy Crush Saga Mod Apk ilimitado Barras de Oro está diseñado para dispositivos Android solamente. Puede que no funcione en dispositivos iOS u otras plataformas. También puede no funcionar en algunos dispositivos Android o versiones que son incompatibles con el archivo apk mod. </p>
|
167 |
-
<h3>Q: ¿Cómo puedo actualizar Candy Crush Saga Mod Apk ilimitadas barras de oro? </h3>
|
168 |
-
<p>A: Usted puede actualizar Candy Crush Saga Mod Apk ilimitadas barras de oro mediante la descarga e instalación de la última versión del archivo apk mod de la misma fuente que lo descargó de antes. Sin embargo, puede perder su progreso y los datos si se actualiza el archivo apk mod sin hacer una copia de seguridad de ellos primero. </p>
|
169 |
-
<h3>Q: ¿Puedo jugar Candy Crush Saga Mod Apk ilimitadas barras de oro en línea o fuera de línea? </h3> 64aa2da5cf<br />
|
170 |
-
<br />
|
171 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Betacuckgpt/togethercomputer-GPT-JT-Moderation-6B/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/togethercomputer/GPT-JT-Moderation-6B").launch()
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/cli/req_command.py
DELETED
@@ -1,505 +0,0 @@
|
|
1 |
-
"""Contains the Command base classes that depend on PipSession.
|
2 |
-
|
3 |
-
The classes in this module are in a separate module so the commands not
|
4 |
-
needing download / PackageFinder capability don't unnecessarily import the
|
5 |
-
PackageFinder machinery and all its vendored dependencies, etc.
|
6 |
-
"""
|
7 |
-
|
8 |
-
import logging
|
9 |
-
import os
|
10 |
-
import sys
|
11 |
-
from functools import partial
|
12 |
-
from optparse import Values
|
13 |
-
from typing import TYPE_CHECKING, Any, List, Optional, Tuple
|
14 |
-
|
15 |
-
from pip._internal.cache import WheelCache
|
16 |
-
from pip._internal.cli import cmdoptions
|
17 |
-
from pip._internal.cli.base_command import Command
|
18 |
-
from pip._internal.cli.command_context import CommandContextMixIn
|
19 |
-
from pip._internal.exceptions import CommandError, PreviousBuildDirError
|
20 |
-
from pip._internal.index.collector import LinkCollector
|
21 |
-
from pip._internal.index.package_finder import PackageFinder
|
22 |
-
from pip._internal.models.selection_prefs import SelectionPreferences
|
23 |
-
from pip._internal.models.target_python import TargetPython
|
24 |
-
from pip._internal.network.session import PipSession
|
25 |
-
from pip._internal.operations.build.build_tracker import BuildTracker
|
26 |
-
from pip._internal.operations.prepare import RequirementPreparer
|
27 |
-
from pip._internal.req.constructors import (
|
28 |
-
install_req_from_editable,
|
29 |
-
install_req_from_line,
|
30 |
-
install_req_from_parsed_requirement,
|
31 |
-
install_req_from_req_string,
|
32 |
-
)
|
33 |
-
from pip._internal.req.req_file import parse_requirements
|
34 |
-
from pip._internal.req.req_install import InstallRequirement
|
35 |
-
from pip._internal.resolution.base import BaseResolver
|
36 |
-
from pip._internal.self_outdated_check import pip_self_version_check
|
37 |
-
from pip._internal.utils.temp_dir import (
|
38 |
-
TempDirectory,
|
39 |
-
TempDirectoryTypeRegistry,
|
40 |
-
tempdir_kinds,
|
41 |
-
)
|
42 |
-
from pip._internal.utils.virtualenv import running_under_virtualenv
|
43 |
-
|
44 |
-
if TYPE_CHECKING:
|
45 |
-
from ssl import SSLContext
|
46 |
-
|
47 |
-
logger = logging.getLogger(__name__)
|
48 |
-
|
49 |
-
|
50 |
-
def _create_truststore_ssl_context() -> Optional["SSLContext"]:
|
51 |
-
if sys.version_info < (3, 10):
|
52 |
-
raise CommandError("The truststore feature is only available for Python 3.10+")
|
53 |
-
|
54 |
-
try:
|
55 |
-
import ssl
|
56 |
-
except ImportError:
|
57 |
-
logger.warning("Disabling truststore since ssl support is missing")
|
58 |
-
return None
|
59 |
-
|
60 |
-
try:
|
61 |
-
import truststore
|
62 |
-
except ImportError:
|
63 |
-
raise CommandError(
|
64 |
-
"To use the truststore feature, 'truststore' must be installed into "
|
65 |
-
"pip's current environment."
|
66 |
-
)
|
67 |
-
|
68 |
-
return truststore.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
|
69 |
-
|
70 |
-
|
71 |
-
class SessionCommandMixin(CommandContextMixIn):
|
72 |
-
|
73 |
-
"""
|
74 |
-
A class mixin for command classes needing _build_session().
|
75 |
-
"""
|
76 |
-
|
77 |
-
def __init__(self) -> None:
|
78 |
-
super().__init__()
|
79 |
-
self._session: Optional[PipSession] = None
|
80 |
-
|
81 |
-
@classmethod
|
82 |
-
def _get_index_urls(cls, options: Values) -> Optional[List[str]]:
|
83 |
-
"""Return a list of index urls from user-provided options."""
|
84 |
-
index_urls = []
|
85 |
-
if not getattr(options, "no_index", False):
|
86 |
-
url = getattr(options, "index_url", None)
|
87 |
-
if url:
|
88 |
-
index_urls.append(url)
|
89 |
-
urls = getattr(options, "extra_index_urls", None)
|
90 |
-
if urls:
|
91 |
-
index_urls.extend(urls)
|
92 |
-
# Return None rather than an empty list
|
93 |
-
return index_urls or None
|
94 |
-
|
95 |
-
def get_default_session(self, options: Values) -> PipSession:
|
96 |
-
"""Get a default-managed session."""
|
97 |
-
if self._session is None:
|
98 |
-
self._session = self.enter_context(self._build_session(options))
|
99 |
-
# there's no type annotation on requests.Session, so it's
|
100 |
-
# automatically ContextManager[Any] and self._session becomes Any,
|
101 |
-
# then https://github.com/python/mypy/issues/7696 kicks in
|
102 |
-
assert self._session is not None
|
103 |
-
return self._session
|
104 |
-
|
105 |
-
def _build_session(
|
106 |
-
self,
|
107 |
-
options: Values,
|
108 |
-
retries: Optional[int] = None,
|
109 |
-
timeout: Optional[int] = None,
|
110 |
-
fallback_to_certifi: bool = False,
|
111 |
-
) -> PipSession:
|
112 |
-
cache_dir = options.cache_dir
|
113 |
-
assert not cache_dir or os.path.isabs(cache_dir)
|
114 |
-
|
115 |
-
if "truststore" in options.features_enabled:
|
116 |
-
try:
|
117 |
-
ssl_context = _create_truststore_ssl_context()
|
118 |
-
except Exception:
|
119 |
-
if not fallback_to_certifi:
|
120 |
-
raise
|
121 |
-
ssl_context = None
|
122 |
-
else:
|
123 |
-
ssl_context = None
|
124 |
-
|
125 |
-
session = PipSession(
|
126 |
-
cache=os.path.join(cache_dir, "http") if cache_dir else None,
|
127 |
-
retries=retries if retries is not None else options.retries,
|
128 |
-
trusted_hosts=options.trusted_hosts,
|
129 |
-
index_urls=self._get_index_urls(options),
|
130 |
-
ssl_context=ssl_context,
|
131 |
-
)
|
132 |
-
|
133 |
-
# Handle custom ca-bundles from the user
|
134 |
-
if options.cert:
|
135 |
-
session.verify = options.cert
|
136 |
-
|
137 |
-
# Handle SSL client certificate
|
138 |
-
if options.client_cert:
|
139 |
-
session.cert = options.client_cert
|
140 |
-
|
141 |
-
# Handle timeouts
|
142 |
-
if options.timeout or timeout:
|
143 |
-
session.timeout = timeout if timeout is not None else options.timeout
|
144 |
-
|
145 |
-
# Handle configured proxies
|
146 |
-
if options.proxy:
|
147 |
-
session.proxies = {
|
148 |
-
"http": options.proxy,
|
149 |
-
"https": options.proxy,
|
150 |
-
}
|
151 |
-
|
152 |
-
# Determine if we can prompt the user for authentication or not
|
153 |
-
session.auth.prompting = not options.no_input
|
154 |
-
session.auth.keyring_provider = options.keyring_provider
|
155 |
-
|
156 |
-
return session
|
157 |
-
|
158 |
-
|
159 |
-
class IndexGroupCommand(Command, SessionCommandMixin):
|
160 |
-
|
161 |
-
"""
|
162 |
-
Abstract base class for commands with the index_group options.
|
163 |
-
|
164 |
-
This also corresponds to the commands that permit the pip version check.
|
165 |
-
"""
|
166 |
-
|
167 |
-
def handle_pip_version_check(self, options: Values) -> None:
|
168 |
-
"""
|
169 |
-
Do the pip version check if not disabled.
|
170 |
-
|
171 |
-
This overrides the default behavior of not doing the check.
|
172 |
-
"""
|
173 |
-
# Make sure the index_group options are present.
|
174 |
-
assert hasattr(options, "no_index")
|
175 |
-
|
176 |
-
if options.disable_pip_version_check or options.no_index:
|
177 |
-
return
|
178 |
-
|
179 |
-
# Otherwise, check if we're using the latest version of pip available.
|
180 |
-
session = self._build_session(
|
181 |
-
options,
|
182 |
-
retries=0,
|
183 |
-
timeout=min(5, options.timeout),
|
184 |
-
# This is set to ensure the function does not fail when truststore is
|
185 |
-
# specified in use-feature but cannot be loaded. This usually raises a
|
186 |
-
# CommandError and shows a nice user-facing error, but this function is not
|
187 |
-
# called in that try-except block.
|
188 |
-
fallback_to_certifi=True,
|
189 |
-
)
|
190 |
-
with session:
|
191 |
-
pip_self_version_check(session, options)
|
192 |
-
|
193 |
-
|
194 |
-
KEEPABLE_TEMPDIR_TYPES = [
|
195 |
-
tempdir_kinds.BUILD_ENV,
|
196 |
-
tempdir_kinds.EPHEM_WHEEL_CACHE,
|
197 |
-
tempdir_kinds.REQ_BUILD,
|
198 |
-
]
|
199 |
-
|
200 |
-
|
201 |
-
def warn_if_run_as_root() -> None:
|
202 |
-
"""Output a warning for sudo users on Unix.
|
203 |
-
|
204 |
-
In a virtual environment, sudo pip still writes to virtualenv.
|
205 |
-
On Windows, users may run pip as Administrator without issues.
|
206 |
-
This warning only applies to Unix root users outside of virtualenv.
|
207 |
-
"""
|
208 |
-
if running_under_virtualenv():
|
209 |
-
return
|
210 |
-
if not hasattr(os, "getuid"):
|
211 |
-
return
|
212 |
-
# On Windows, there are no "system managed" Python packages. Installing as
|
213 |
-
# Administrator via pip is the correct way of updating system environments.
|
214 |
-
#
|
215 |
-
# We choose sys.platform over utils.compat.WINDOWS here to enable Mypy platform
|
216 |
-
# checks: https://mypy.readthedocs.io/en/stable/common_issues.html
|
217 |
-
if sys.platform == "win32" or sys.platform == "cygwin":
|
218 |
-
return
|
219 |
-
|
220 |
-
if os.getuid() != 0:
|
221 |
-
return
|
222 |
-
|
223 |
-
logger.warning(
|
224 |
-
"Running pip as the 'root' user can result in broken permissions and "
|
225 |
-
"conflicting behaviour with the system package manager. "
|
226 |
-
"It is recommended to use a virtual environment instead: "
|
227 |
-
"https://pip.pypa.io/warnings/venv"
|
228 |
-
)
|
229 |
-
|
230 |
-
|
231 |
-
def with_cleanup(func: Any) -> Any:
|
232 |
-
"""Decorator for common logic related to managing temporary
|
233 |
-
directories.
|
234 |
-
"""
|
235 |
-
|
236 |
-
def configure_tempdir_registry(registry: TempDirectoryTypeRegistry) -> None:
|
237 |
-
for t in KEEPABLE_TEMPDIR_TYPES:
|
238 |
-
registry.set_delete(t, False)
|
239 |
-
|
240 |
-
def wrapper(
|
241 |
-
self: RequirementCommand, options: Values, args: List[Any]
|
242 |
-
) -> Optional[int]:
|
243 |
-
assert self.tempdir_registry is not None
|
244 |
-
if options.no_clean:
|
245 |
-
configure_tempdir_registry(self.tempdir_registry)
|
246 |
-
|
247 |
-
try:
|
248 |
-
return func(self, options, args)
|
249 |
-
except PreviousBuildDirError:
|
250 |
-
# This kind of conflict can occur when the user passes an explicit
|
251 |
-
# build directory with a pre-existing folder. In that case we do
|
252 |
-
# not want to accidentally remove it.
|
253 |
-
configure_tempdir_registry(self.tempdir_registry)
|
254 |
-
raise
|
255 |
-
|
256 |
-
return wrapper
|
257 |
-
|
258 |
-
|
259 |
-
class RequirementCommand(IndexGroupCommand):
|
260 |
-
def __init__(self, *args: Any, **kw: Any) -> None:
|
261 |
-
super().__init__(*args, **kw)
|
262 |
-
|
263 |
-
self.cmd_opts.add_option(cmdoptions.no_clean())
|
264 |
-
|
265 |
-
@staticmethod
|
266 |
-
def determine_resolver_variant(options: Values) -> str:
|
267 |
-
"""Determines which resolver should be used, based on the given options."""
|
268 |
-
if "legacy-resolver" in options.deprecated_features_enabled:
|
269 |
-
return "legacy"
|
270 |
-
|
271 |
-
return "2020-resolver"
|
272 |
-
|
273 |
-
@classmethod
|
274 |
-
def make_requirement_preparer(
|
275 |
-
cls,
|
276 |
-
temp_build_dir: TempDirectory,
|
277 |
-
options: Values,
|
278 |
-
build_tracker: BuildTracker,
|
279 |
-
session: PipSession,
|
280 |
-
finder: PackageFinder,
|
281 |
-
use_user_site: bool,
|
282 |
-
download_dir: Optional[str] = None,
|
283 |
-
verbosity: int = 0,
|
284 |
-
) -> RequirementPreparer:
|
285 |
-
"""
|
286 |
-
Create a RequirementPreparer instance for the given parameters.
|
287 |
-
"""
|
288 |
-
temp_build_dir_path = temp_build_dir.path
|
289 |
-
assert temp_build_dir_path is not None
|
290 |
-
|
291 |
-
resolver_variant = cls.determine_resolver_variant(options)
|
292 |
-
if resolver_variant == "2020-resolver":
|
293 |
-
lazy_wheel = "fast-deps" in options.features_enabled
|
294 |
-
if lazy_wheel:
|
295 |
-
logger.warning(
|
296 |
-
"pip is using lazily downloaded wheels using HTTP "
|
297 |
-
"range requests to obtain dependency information. "
|
298 |
-
"This experimental feature is enabled through "
|
299 |
-
"--use-feature=fast-deps and it is not ready for "
|
300 |
-
"production."
|
301 |
-
)
|
302 |
-
else:
|
303 |
-
lazy_wheel = False
|
304 |
-
if "fast-deps" in options.features_enabled:
|
305 |
-
logger.warning(
|
306 |
-
"fast-deps has no effect when used with the legacy resolver."
|
307 |
-
)
|
308 |
-
|
309 |
-
return RequirementPreparer(
|
310 |
-
build_dir=temp_build_dir_path,
|
311 |
-
src_dir=options.src_dir,
|
312 |
-
download_dir=download_dir,
|
313 |
-
build_isolation=options.build_isolation,
|
314 |
-
check_build_deps=options.check_build_deps,
|
315 |
-
build_tracker=build_tracker,
|
316 |
-
session=session,
|
317 |
-
progress_bar=options.progress_bar,
|
318 |
-
finder=finder,
|
319 |
-
require_hashes=options.require_hashes,
|
320 |
-
use_user_site=use_user_site,
|
321 |
-
lazy_wheel=lazy_wheel,
|
322 |
-
verbosity=verbosity,
|
323 |
-
)
|
324 |
-
|
325 |
-
@classmethod
|
326 |
-
def make_resolver(
|
327 |
-
cls,
|
328 |
-
preparer: RequirementPreparer,
|
329 |
-
finder: PackageFinder,
|
330 |
-
options: Values,
|
331 |
-
wheel_cache: Optional[WheelCache] = None,
|
332 |
-
use_user_site: bool = False,
|
333 |
-
ignore_installed: bool = True,
|
334 |
-
ignore_requires_python: bool = False,
|
335 |
-
force_reinstall: bool = False,
|
336 |
-
upgrade_strategy: str = "to-satisfy-only",
|
337 |
-
use_pep517: Optional[bool] = None,
|
338 |
-
py_version_info: Optional[Tuple[int, ...]] = None,
|
339 |
-
) -> BaseResolver:
|
340 |
-
"""
|
341 |
-
Create a Resolver instance for the given parameters.
|
342 |
-
"""
|
343 |
-
make_install_req = partial(
|
344 |
-
install_req_from_req_string,
|
345 |
-
isolated=options.isolated_mode,
|
346 |
-
use_pep517=use_pep517,
|
347 |
-
)
|
348 |
-
resolver_variant = cls.determine_resolver_variant(options)
|
349 |
-
# The long import name and duplicated invocation is needed to convince
|
350 |
-
# Mypy into correctly typechecking. Otherwise it would complain the
|
351 |
-
# "Resolver" class being redefined.
|
352 |
-
if resolver_variant == "2020-resolver":
|
353 |
-
import pip._internal.resolution.resolvelib.resolver
|
354 |
-
|
355 |
-
return pip._internal.resolution.resolvelib.resolver.Resolver(
|
356 |
-
preparer=preparer,
|
357 |
-
finder=finder,
|
358 |
-
wheel_cache=wheel_cache,
|
359 |
-
make_install_req=make_install_req,
|
360 |
-
use_user_site=use_user_site,
|
361 |
-
ignore_dependencies=options.ignore_dependencies,
|
362 |
-
ignore_installed=ignore_installed,
|
363 |
-
ignore_requires_python=ignore_requires_python,
|
364 |
-
force_reinstall=force_reinstall,
|
365 |
-
upgrade_strategy=upgrade_strategy,
|
366 |
-
py_version_info=py_version_info,
|
367 |
-
)
|
368 |
-
import pip._internal.resolution.legacy.resolver
|
369 |
-
|
370 |
-
return pip._internal.resolution.legacy.resolver.Resolver(
|
371 |
-
preparer=preparer,
|
372 |
-
finder=finder,
|
373 |
-
wheel_cache=wheel_cache,
|
374 |
-
make_install_req=make_install_req,
|
375 |
-
use_user_site=use_user_site,
|
376 |
-
ignore_dependencies=options.ignore_dependencies,
|
377 |
-
ignore_installed=ignore_installed,
|
378 |
-
ignore_requires_python=ignore_requires_python,
|
379 |
-
force_reinstall=force_reinstall,
|
380 |
-
upgrade_strategy=upgrade_strategy,
|
381 |
-
py_version_info=py_version_info,
|
382 |
-
)
|
383 |
-
|
384 |
-
def get_requirements(
|
385 |
-
self,
|
386 |
-
args: List[str],
|
387 |
-
options: Values,
|
388 |
-
finder: PackageFinder,
|
389 |
-
session: PipSession,
|
390 |
-
) -> List[InstallRequirement]:
|
391 |
-
"""
|
392 |
-
Parse command-line arguments into the corresponding requirements.
|
393 |
-
"""
|
394 |
-
requirements: List[InstallRequirement] = []
|
395 |
-
for filename in options.constraints:
|
396 |
-
for parsed_req in parse_requirements(
|
397 |
-
filename,
|
398 |
-
constraint=True,
|
399 |
-
finder=finder,
|
400 |
-
options=options,
|
401 |
-
session=session,
|
402 |
-
):
|
403 |
-
req_to_add = install_req_from_parsed_requirement(
|
404 |
-
parsed_req,
|
405 |
-
isolated=options.isolated_mode,
|
406 |
-
user_supplied=False,
|
407 |
-
)
|
408 |
-
requirements.append(req_to_add)
|
409 |
-
|
410 |
-
for req in args:
|
411 |
-
req_to_add = install_req_from_line(
|
412 |
-
req,
|
413 |
-
comes_from=None,
|
414 |
-
isolated=options.isolated_mode,
|
415 |
-
use_pep517=options.use_pep517,
|
416 |
-
user_supplied=True,
|
417 |
-
config_settings=getattr(options, "config_settings", None),
|
418 |
-
)
|
419 |
-
requirements.append(req_to_add)
|
420 |
-
|
421 |
-
for req in options.editables:
|
422 |
-
req_to_add = install_req_from_editable(
|
423 |
-
req,
|
424 |
-
user_supplied=True,
|
425 |
-
isolated=options.isolated_mode,
|
426 |
-
use_pep517=options.use_pep517,
|
427 |
-
config_settings=getattr(options, "config_settings", None),
|
428 |
-
)
|
429 |
-
requirements.append(req_to_add)
|
430 |
-
|
431 |
-
# NOTE: options.require_hashes may be set if --require-hashes is True
|
432 |
-
for filename in options.requirements:
|
433 |
-
for parsed_req in parse_requirements(
|
434 |
-
filename, finder=finder, options=options, session=session
|
435 |
-
):
|
436 |
-
req_to_add = install_req_from_parsed_requirement(
|
437 |
-
parsed_req,
|
438 |
-
isolated=options.isolated_mode,
|
439 |
-
use_pep517=options.use_pep517,
|
440 |
-
user_supplied=True,
|
441 |
-
config_settings=parsed_req.options.get("config_settings")
|
442 |
-
if parsed_req.options
|
443 |
-
else None,
|
444 |
-
)
|
445 |
-
requirements.append(req_to_add)
|
446 |
-
|
447 |
-
# If any requirement has hash options, enable hash checking.
|
448 |
-
if any(req.has_hash_options for req in requirements):
|
449 |
-
options.require_hashes = True
|
450 |
-
|
451 |
-
if not (args or options.editables or options.requirements):
|
452 |
-
opts = {"name": self.name}
|
453 |
-
if options.find_links:
|
454 |
-
raise CommandError(
|
455 |
-
"You must give at least one requirement to {name} "
|
456 |
-
'(maybe you meant "pip {name} {links}"?)'.format(
|
457 |
-
**dict(opts, links=" ".join(options.find_links))
|
458 |
-
)
|
459 |
-
)
|
460 |
-
else:
|
461 |
-
raise CommandError(
|
462 |
-
"You must give at least one requirement to {name} "
|
463 |
-
'(see "pip help {name}")'.format(**opts)
|
464 |
-
)
|
465 |
-
|
466 |
-
return requirements
|
467 |
-
|
468 |
-
@staticmethod
|
469 |
-
def trace_basic_info(finder: PackageFinder) -> None:
|
470 |
-
"""
|
471 |
-
Trace basic information about the provided objects.
|
472 |
-
"""
|
473 |
-
# Display where finder is looking for packages
|
474 |
-
search_scope = finder.search_scope
|
475 |
-
locations = search_scope.get_formatted_locations()
|
476 |
-
if locations:
|
477 |
-
logger.info(locations)
|
478 |
-
|
479 |
-
def _build_package_finder(
|
480 |
-
self,
|
481 |
-
options: Values,
|
482 |
-
session: PipSession,
|
483 |
-
target_python: Optional[TargetPython] = None,
|
484 |
-
ignore_requires_python: Optional[bool] = None,
|
485 |
-
) -> PackageFinder:
|
486 |
-
"""
|
487 |
-
Create a package finder appropriate to this requirement command.
|
488 |
-
|
489 |
-
:param ignore_requires_python: Whether to ignore incompatible
|
490 |
-
"Requires-Python" values in links. Defaults to False.
|
491 |
-
"""
|
492 |
-
link_collector = LinkCollector.create(session, options=options)
|
493 |
-
selection_prefs = SelectionPreferences(
|
494 |
-
allow_yanked=True,
|
495 |
-
format_control=options.format_control,
|
496 |
-
allow_all_prereleases=options.pre,
|
497 |
-
prefer_binary=options.prefer_binary,
|
498 |
-
ignore_requires_python=ignore_requires_python,
|
499 |
-
)
|
500 |
-
|
501 |
-
return PackageFinder.create(
|
502 |
-
link_collector=link_collector,
|
503 |
-
selection_prefs=selection_prefs,
|
504 |
-
target_python=target_python,
|
505 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/utf8prober.py
DELETED
@@ -1,82 +0,0 @@
|
|
1 |
-
######################## BEGIN LICENSE BLOCK ########################
|
2 |
-
# The Original Code is mozilla.org code.
|
3 |
-
#
|
4 |
-
# The Initial Developer of the Original Code is
|
5 |
-
# Netscape Communications Corporation.
|
6 |
-
# Portions created by the Initial Developer are Copyright (C) 1998
|
7 |
-
# the Initial Developer. All Rights Reserved.
|
8 |
-
#
|
9 |
-
# Contributor(s):
|
10 |
-
# Mark Pilgrim - port to Python
|
11 |
-
#
|
12 |
-
# This library is free software; you can redistribute it and/or
|
13 |
-
# modify it under the terms of the GNU Lesser General Public
|
14 |
-
# License as published by the Free Software Foundation; either
|
15 |
-
# version 2.1 of the License, or (at your option) any later version.
|
16 |
-
#
|
17 |
-
# This library is distributed in the hope that it will be useful,
|
18 |
-
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
19 |
-
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
20 |
-
# Lesser General Public License for more details.
|
21 |
-
#
|
22 |
-
# You should have received a copy of the GNU Lesser General Public
|
23 |
-
# License along with this library; if not, write to the Free Software
|
24 |
-
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
25 |
-
# 02110-1301 USA
|
26 |
-
######################### END LICENSE BLOCK #########################
|
27 |
-
|
28 |
-
from typing import Union
|
29 |
-
|
30 |
-
from .charsetprober import CharSetProber
|
31 |
-
from .codingstatemachine import CodingStateMachine
|
32 |
-
from .enums import MachineState, ProbingState
|
33 |
-
from .mbcssm import UTF8_SM_MODEL
|
34 |
-
|
35 |
-
|
36 |
-
class UTF8Prober(CharSetProber):
|
37 |
-
ONE_CHAR_PROB = 0.5
|
38 |
-
|
39 |
-
def __init__(self) -> None:
|
40 |
-
super().__init__()
|
41 |
-
self.coding_sm = CodingStateMachine(UTF8_SM_MODEL)
|
42 |
-
self._num_mb_chars = 0
|
43 |
-
self.reset()
|
44 |
-
|
45 |
-
def reset(self) -> None:
|
46 |
-
super().reset()
|
47 |
-
self.coding_sm.reset()
|
48 |
-
self._num_mb_chars = 0
|
49 |
-
|
50 |
-
@property
|
51 |
-
def charset_name(self) -> str:
|
52 |
-
return "utf-8"
|
53 |
-
|
54 |
-
@property
|
55 |
-
def language(self) -> str:
|
56 |
-
return ""
|
57 |
-
|
58 |
-
def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
|
59 |
-
for c in byte_str:
|
60 |
-
coding_state = self.coding_sm.next_state(c)
|
61 |
-
if coding_state == MachineState.ERROR:
|
62 |
-
self._state = ProbingState.NOT_ME
|
63 |
-
break
|
64 |
-
if coding_state == MachineState.ITS_ME:
|
65 |
-
self._state = ProbingState.FOUND_IT
|
66 |
-
break
|
67 |
-
if coding_state == MachineState.START:
|
68 |
-
if self.coding_sm.get_current_charlen() >= 2:
|
69 |
-
self._num_mb_chars += 1
|
70 |
-
|
71 |
-
if self.state == ProbingState.DETECTING:
|
72 |
-
if self.get_confidence() > self.SHORTCUT_THRESHOLD:
|
73 |
-
self._state = ProbingState.FOUND_IT
|
74 |
-
|
75 |
-
return self.state
|
76 |
-
|
77 |
-
def get_confidence(self) -> float:
|
78 |
-
unlike = 0.99
|
79 |
-
if self._num_mb_chars < 6:
|
80 |
-
unlike *= self.ONE_CHAR_PROB**self._num_mb_chars
|
81 |
-
return 1.0 - unlike
|
82 |
-
return unlike
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_resources/abc.py
DELETED
@@ -1,137 +0,0 @@
|
|
1 |
-
import abc
|
2 |
-
from typing import BinaryIO, Iterable, Text
|
3 |
-
|
4 |
-
from ._compat import runtime_checkable, Protocol
|
5 |
-
|
6 |
-
|
7 |
-
class ResourceReader(metaclass=abc.ABCMeta):
|
8 |
-
"""Abstract base class for loaders to provide resource reading support."""
|
9 |
-
|
10 |
-
@abc.abstractmethod
|
11 |
-
def open_resource(self, resource: Text) -> BinaryIO:
|
12 |
-
"""Return an opened, file-like object for binary reading.
|
13 |
-
|
14 |
-
The 'resource' argument is expected to represent only a file name.
|
15 |
-
If the resource cannot be found, FileNotFoundError is raised.
|
16 |
-
"""
|
17 |
-
# This deliberately raises FileNotFoundError instead of
|
18 |
-
# NotImplementedError so that if this method is accidentally called,
|
19 |
-
# it'll still do the right thing.
|
20 |
-
raise FileNotFoundError
|
21 |
-
|
22 |
-
@abc.abstractmethod
|
23 |
-
def resource_path(self, resource: Text) -> Text:
|
24 |
-
"""Return the file system path to the specified resource.
|
25 |
-
|
26 |
-
The 'resource' argument is expected to represent only a file name.
|
27 |
-
If the resource does not exist on the file system, raise
|
28 |
-
FileNotFoundError.
|
29 |
-
"""
|
30 |
-
# This deliberately raises FileNotFoundError instead of
|
31 |
-
# NotImplementedError so that if this method is accidentally called,
|
32 |
-
# it'll still do the right thing.
|
33 |
-
raise FileNotFoundError
|
34 |
-
|
35 |
-
@abc.abstractmethod
|
36 |
-
def is_resource(self, path: Text) -> bool:
|
37 |
-
"""Return True if the named 'path' is a resource.
|
38 |
-
|
39 |
-
Files are resources, directories are not.
|
40 |
-
"""
|
41 |
-
raise FileNotFoundError
|
42 |
-
|
43 |
-
@abc.abstractmethod
|
44 |
-
def contents(self) -> Iterable[str]:
|
45 |
-
"""Return an iterable of entries in `package`."""
|
46 |
-
raise FileNotFoundError
|
47 |
-
|
48 |
-
|
49 |
-
@runtime_checkable
|
50 |
-
class Traversable(Protocol):
|
51 |
-
"""
|
52 |
-
An object with a subset of pathlib.Path methods suitable for
|
53 |
-
traversing directories and opening files.
|
54 |
-
"""
|
55 |
-
|
56 |
-
@abc.abstractmethod
|
57 |
-
def iterdir(self):
|
58 |
-
"""
|
59 |
-
Yield Traversable objects in self
|
60 |
-
"""
|
61 |
-
|
62 |
-
def read_bytes(self):
|
63 |
-
"""
|
64 |
-
Read contents of self as bytes
|
65 |
-
"""
|
66 |
-
with self.open('rb') as strm:
|
67 |
-
return strm.read()
|
68 |
-
|
69 |
-
def read_text(self, encoding=None):
|
70 |
-
"""
|
71 |
-
Read contents of self as text
|
72 |
-
"""
|
73 |
-
with self.open(encoding=encoding) as strm:
|
74 |
-
return strm.read()
|
75 |
-
|
76 |
-
@abc.abstractmethod
|
77 |
-
def is_dir(self) -> bool:
|
78 |
-
"""
|
79 |
-
Return True if self is a directory
|
80 |
-
"""
|
81 |
-
|
82 |
-
@abc.abstractmethod
|
83 |
-
def is_file(self) -> bool:
|
84 |
-
"""
|
85 |
-
Return True if self is a file
|
86 |
-
"""
|
87 |
-
|
88 |
-
@abc.abstractmethod
|
89 |
-
def joinpath(self, child):
|
90 |
-
"""
|
91 |
-
Return Traversable child in self
|
92 |
-
"""
|
93 |
-
|
94 |
-
def __truediv__(self, child):
|
95 |
-
"""
|
96 |
-
Return Traversable child in self
|
97 |
-
"""
|
98 |
-
return self.joinpath(child)
|
99 |
-
|
100 |
-
@abc.abstractmethod
|
101 |
-
def open(self, mode='r', *args, **kwargs):
|
102 |
-
"""
|
103 |
-
mode may be 'r' or 'rb' to open as text or binary. Return a handle
|
104 |
-
suitable for reading (same as pathlib.Path.open).
|
105 |
-
|
106 |
-
When opening as text, accepts encoding parameters such as those
|
107 |
-
accepted by io.TextIOWrapper.
|
108 |
-
"""
|
109 |
-
|
110 |
-
@abc.abstractproperty
|
111 |
-
def name(self) -> str:
|
112 |
-
"""
|
113 |
-
The base name of this object without any parent references.
|
114 |
-
"""
|
115 |
-
|
116 |
-
|
117 |
-
class TraversableResources(ResourceReader):
|
118 |
-
"""
|
119 |
-
The required interface for providing traversable
|
120 |
-
resources.
|
121 |
-
"""
|
122 |
-
|
123 |
-
@abc.abstractmethod
|
124 |
-
def files(self):
|
125 |
-
"""Return a Traversable object for the loaded package."""
|
126 |
-
|
127 |
-
def open_resource(self, resource):
|
128 |
-
return self.files().joinpath(resource).open('rb')
|
129 |
-
|
130 |
-
def resource_path(self, resource):
|
131 |
-
raise FileNotFoundError(resource)
|
132 |
-
|
133 |
-
def is_resource(self, path):
|
134 |
-
return self.files().joinpath(path).is_file()
|
135 |
-
|
136 |
-
def contents(self):
|
137 |
-
return (item.name for item in self.files().iterdir())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Boadiwaa/Recipes/openai/api_resources/experimental/__init__.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
from openai.api_resources.experimental.completion_config import ( # noqa: F401
|
2 |
-
CompletionConfig,
|
3 |
-
)
|
|
|
|
|
|
|
|
spaces/CVH-vn1210/make_hair/minigpt4/datasets/datasets/caption_datasets.py
DELETED
@@ -1,85 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Copyright (c) 2022, salesforce.com, inc.
|
3 |
-
All rights reserved.
|
4 |
-
SPDX-License-Identifier: BSD-3-Clause
|
5 |
-
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
6 |
-
"""
|
7 |
-
|
8 |
-
import os
|
9 |
-
from collections import OrderedDict
|
10 |
-
|
11 |
-
from minigpt4.datasets.datasets.base_dataset import BaseDataset
|
12 |
-
from PIL import Image
|
13 |
-
|
14 |
-
|
15 |
-
class __DisplMixin:
|
16 |
-
def displ_item(self, index):
|
17 |
-
sample, ann = self.__getitem__(index), self.annotation[index]
|
18 |
-
|
19 |
-
return OrderedDict(
|
20 |
-
{
|
21 |
-
"file": ann["image"],
|
22 |
-
"caption": ann["caption"],
|
23 |
-
"image": sample["image"],
|
24 |
-
}
|
25 |
-
)
|
26 |
-
|
27 |
-
|
28 |
-
class CaptionDataset(BaseDataset, __DisplMixin):
|
29 |
-
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
|
30 |
-
"""
|
31 |
-
vis_root (string): Root directory of images (e.g. coco/images/)
|
32 |
-
ann_root (string): directory to store the annotation file
|
33 |
-
"""
|
34 |
-
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
|
35 |
-
|
36 |
-
self.img_ids = {}
|
37 |
-
n = 0
|
38 |
-
for ann in self.annotation:
|
39 |
-
img_id = ann["image_id"]
|
40 |
-
if img_id not in self.img_ids.keys():
|
41 |
-
self.img_ids[img_id] = n
|
42 |
-
n += 1
|
43 |
-
|
44 |
-
def __getitem__(self, index):
|
45 |
-
|
46 |
-
# TODO this assumes image input, not general enough
|
47 |
-
ann = self.annotation[index]
|
48 |
-
|
49 |
-
img_file = '{:0>12}.jpg'.format(ann["image_id"])
|
50 |
-
image_path = os.path.join(self.vis_root, img_file)
|
51 |
-
image = Image.open(image_path).convert("RGB")
|
52 |
-
|
53 |
-
image = self.vis_processor(image)
|
54 |
-
caption = self.text_processor(ann["caption"])
|
55 |
-
|
56 |
-
return {
|
57 |
-
"image": image,
|
58 |
-
"text_input": caption,
|
59 |
-
"image_id": self.img_ids[ann["image_id"]],
|
60 |
-
}
|
61 |
-
|
62 |
-
|
63 |
-
class CaptionEvalDataset(BaseDataset, __DisplMixin):
|
64 |
-
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
|
65 |
-
"""
|
66 |
-
vis_root (string): Root directory of images (e.g. coco/images/)
|
67 |
-
ann_root (string): directory to store the annotation file
|
68 |
-
split (string): val or test
|
69 |
-
"""
|
70 |
-
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
|
71 |
-
|
72 |
-
def __getitem__(self, index):
|
73 |
-
|
74 |
-
ann = self.annotation[index]
|
75 |
-
|
76 |
-
image_path = os.path.join(self.vis_root, ann["image"])
|
77 |
-
image = Image.open(image_path).convert("RGB")
|
78 |
-
|
79 |
-
image = self.vis_processor(image)
|
80 |
-
|
81 |
-
return {
|
82 |
-
"image": image,
|
83 |
-
"image_id": ann["image_id"],
|
84 |
-
"instance_id": ann["instance_id"],
|
85 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/proposal_generator/__init__.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
from .build import PROPOSAL_GENERATOR_REGISTRY, build_proposal_generator
|
3 |
-
from .rpn import RPN_HEAD_REGISTRY, build_rpn_head, RPN
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/roi_heads/box_head.py
DELETED
@@ -1,101 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
import numpy as np
|
3 |
-
import fvcore.nn.weight_init as weight_init
|
4 |
-
import torch
|
5 |
-
from torch import nn
|
6 |
-
from torch.nn import functional as F
|
7 |
-
|
8 |
-
from detectron2.layers import Conv2d, Linear, ShapeSpec, get_norm
|
9 |
-
from detectron2.utils.registry import Registry
|
10 |
-
|
11 |
-
ROI_BOX_HEAD_REGISTRY = Registry("ROI_BOX_HEAD")
|
12 |
-
ROI_BOX_HEAD_REGISTRY.__doc__ = """
|
13 |
-
Registry for box heads, which make box predictions from per-region features.
|
14 |
-
|
15 |
-
The registered object will be called with `obj(cfg, input_shape)`.
|
16 |
-
"""
|
17 |
-
|
18 |
-
|
19 |
-
@ROI_BOX_HEAD_REGISTRY.register()
|
20 |
-
class FastRCNNConvFCHead(nn.Module):
|
21 |
-
"""
|
22 |
-
A head with several 3x3 conv layers (each followed by norm & relu) and
|
23 |
-
several fc layers (each followed by relu).
|
24 |
-
"""
|
25 |
-
|
26 |
-
def __init__(self, cfg, input_shape: ShapeSpec):
|
27 |
-
"""
|
28 |
-
The following attributes are parsed from config:
|
29 |
-
num_conv, num_fc: the number of conv/fc layers
|
30 |
-
conv_dim/fc_dim: the dimension of the conv/fc layers
|
31 |
-
norm: normalization for the conv layers
|
32 |
-
"""
|
33 |
-
super().__init__()
|
34 |
-
|
35 |
-
# fmt: off
|
36 |
-
num_conv = cfg.MODEL.ROI_BOX_HEAD.NUM_CONV
|
37 |
-
conv_dim = cfg.MODEL.ROI_BOX_HEAD.CONV_DIM
|
38 |
-
num_fc = cfg.MODEL.ROI_BOX_HEAD.NUM_FC
|
39 |
-
fc_dim = cfg.MODEL.ROI_BOX_HEAD.FC_DIM
|
40 |
-
norm = cfg.MODEL.ROI_BOX_HEAD.NORM
|
41 |
-
# fmt: on
|
42 |
-
assert num_conv + num_fc > 0
|
43 |
-
|
44 |
-
self._output_size = (input_shape.channels, input_shape.height, input_shape.width)
|
45 |
-
|
46 |
-
self.conv_norm_relus = []
|
47 |
-
for k in range(num_conv):
|
48 |
-
conv = Conv2d(
|
49 |
-
self._output_size[0],
|
50 |
-
conv_dim,
|
51 |
-
kernel_size=3,
|
52 |
-
padding=1,
|
53 |
-
bias=not norm,
|
54 |
-
norm=get_norm(norm, conv_dim),
|
55 |
-
activation=F.relu,
|
56 |
-
)
|
57 |
-
self.add_module("conv{}".format(k + 1), conv)
|
58 |
-
self.conv_norm_relus.append(conv)
|
59 |
-
self._output_size = (conv_dim, self._output_size[1], self._output_size[2])
|
60 |
-
|
61 |
-
self.fcs = []
|
62 |
-
for k in range(num_fc):
|
63 |
-
fc = Linear(np.prod(self._output_size), fc_dim)
|
64 |
-
self.add_module("fc{}".format(k + 1), fc)
|
65 |
-
self.fcs.append(fc)
|
66 |
-
self._output_size = fc_dim
|
67 |
-
|
68 |
-
for layer in self.conv_norm_relus:
|
69 |
-
weight_init.c2_msra_fill(layer)
|
70 |
-
for layer in self.fcs:
|
71 |
-
weight_init.c2_xavier_fill(layer)
|
72 |
-
|
73 |
-
def forward(self, x):
|
74 |
-
for layer in self.conv_norm_relus:
|
75 |
-
x = layer(x)
|
76 |
-
if len(self.fcs):
|
77 |
-
if x.dim() > 2:
|
78 |
-
x = torch.flatten(x, start_dim=1)
|
79 |
-
for layer in self.fcs:
|
80 |
-
x = F.relu(layer(x))
|
81 |
-
return x
|
82 |
-
|
83 |
-
@property
|
84 |
-
def output_shape(self):
|
85 |
-
"""
|
86 |
-
Returns:
|
87 |
-
ShapeSpec: the output feature shape
|
88 |
-
"""
|
89 |
-
o = self._output_size
|
90 |
-
if isinstance(o, int):
|
91 |
-
return ShapeSpec(channels=o)
|
92 |
-
else:
|
93 |
-
return ShapeSpec(channels=o[0], height=o[1], width=o[2])
|
94 |
-
|
95 |
-
|
96 |
-
def build_box_head(cfg, input_shape):
|
97 |
-
"""
|
98 |
-
Build a box head defined by `cfg.MODEL.ROI_BOX_HEAD.NAME`.
|
99 |
-
"""
|
100 |
-
name = cfg.MODEL.ROI_BOX_HEAD.NAME
|
101 |
-
return ROI_BOX_HEAD_REGISTRY.get(name)(cfg, input_shape)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/models/butd/model_cfgs.py
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
# --------------------------------------------------------
|
2 |
-
# OpenVQA
|
3 |
-
# Written by Zhenwei Shao https://github.com/ParadoxZW
|
4 |
-
# --------------------------------------------------------
|
5 |
-
|
6 |
-
from openvqa.core.base_cfgs import BaseCfgs
|
7 |
-
|
8 |
-
|
9 |
-
class Cfgs(BaseCfgs):
|
10 |
-
def __init__(self):
|
11 |
-
super(Cfgs, self).__init__()
|
12 |
-
|
13 |
-
self.IMG_FEAT_SIZE = 2048
|
14 |
-
self.HIDDEN_SIZE = 512
|
15 |
-
self.DROPOUT_R = 0.2
|
16 |
-
self.CLASSIFER_DROPOUT_R = 0.5
|
17 |
-
self.FLAT_OUT_SIZE = 1024
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/temporary_buffer.h
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// this system has no special temporary buffer functions
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/cwalt/clustering_utils.py
DELETED
@@ -1,132 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
# -*- coding: utf-8 -*-
|
3 |
-
"""
|
4 |
-
Created on Fri May 20 15:18:20 2022
|
5 |
-
|
6 |
-
@author: dinesh
|
7 |
-
"""
|
8 |
-
|
9 |
-
# 0 - Import related libraries
|
10 |
-
|
11 |
-
import urllib
|
12 |
-
import zipfile
|
13 |
-
import os
|
14 |
-
import scipy.io
|
15 |
-
import math
|
16 |
-
import numpy as np
|
17 |
-
import matplotlib.pyplot as plt
|
18 |
-
import seaborn as sns
|
19 |
-
|
20 |
-
from scipy.spatial.distance import directed_hausdorff
|
21 |
-
from sklearn.cluster import DBSCAN
|
22 |
-
from sklearn.metrics.pairwise import pairwise_distances
|
23 |
-
import scipy.spatial.distance
|
24 |
-
|
25 |
-
from .kmedoid import kMedoids # kMedoids code is adapted from https://github.com/letiantian/kmedoids
|
26 |
-
|
27 |
-
# Some visualization stuff, not so important
|
28 |
-
# sns.set()
|
29 |
-
plt.rcParams['figure.figsize'] = (12, 12)
|
30 |
-
|
31 |
-
# Utility Functions
|
32 |
-
|
33 |
-
color_lst = plt.rcParams['axes.prop_cycle'].by_key()['color']
|
34 |
-
color_lst.extend(['firebrick', 'olive', 'indigo', 'khaki', 'teal', 'saddlebrown',
|
35 |
-
'skyblue', 'coral', 'darkorange', 'lime', 'darkorchid', 'dimgray'])
|
36 |
-
|
37 |
-
|
38 |
-
def plot_cluster(image, traj_lst, cluster_lst):
|
39 |
-
'''
|
40 |
-
Plots given trajectories with a color that is specific for every trajectory's own cluster index.
|
41 |
-
Outlier trajectories which are specified with -1 in `cluster_lst` are plotted dashed with black color
|
42 |
-
'''
|
43 |
-
cluster_count = np.max(cluster_lst) + 1
|
44 |
-
|
45 |
-
for traj, cluster in zip(traj_lst, cluster_lst):
|
46 |
-
|
47 |
-
# if cluster == -1:
|
48 |
-
# # Means it it a noisy trajectory, paint it black
|
49 |
-
# plt.plot(traj[:, 0], traj[:, 1], c='k', linestyle='dashed')
|
50 |
-
#
|
51 |
-
# else:
|
52 |
-
plt.plot(traj[:, 0], traj[:, 1], c=color_lst[cluster % len(color_lst)])
|
53 |
-
|
54 |
-
plt.imshow(image)
|
55 |
-
# plt.show()
|
56 |
-
plt.axis('off')
|
57 |
-
plt.savefig('trajectory.png', bbox_inches='tight')
|
58 |
-
plt.show()
|
59 |
-
|
60 |
-
|
61 |
-
# 3 - Distance matrix
|
62 |
-
|
63 |
-
def hausdorff( u, v):
|
64 |
-
d = max(directed_hausdorff(u, v)[0], directed_hausdorff(v, u)[0])
|
65 |
-
return d
|
66 |
-
|
67 |
-
|
68 |
-
def build_distance_matrix(traj_lst):
|
69 |
-
# 2 - Trajectory segmentation
|
70 |
-
|
71 |
-
print('Running trajectory segmentation...')
|
72 |
-
degree_threshold = 5
|
73 |
-
|
74 |
-
for traj_index, traj in enumerate(traj_lst):
|
75 |
-
|
76 |
-
hold_index_lst = []
|
77 |
-
previous_azimuth = 1000
|
78 |
-
|
79 |
-
for point_index, point in enumerate(traj[:-1]):
|
80 |
-
next_point = traj[point_index + 1]
|
81 |
-
diff_vector = next_point - point
|
82 |
-
azimuth = (math.degrees(math.atan2(*diff_vector)) + 360) % 360
|
83 |
-
|
84 |
-
if abs(azimuth - previous_azimuth) > degree_threshold:
|
85 |
-
hold_index_lst.append(point_index)
|
86 |
-
previous_azimuth = azimuth
|
87 |
-
hold_index_lst.append(traj.shape[0] - 1) # Last point of trajectory is always added
|
88 |
-
|
89 |
-
traj_lst[traj_index] = traj[hold_index_lst, :]
|
90 |
-
|
91 |
-
print('Building distance matrix...')
|
92 |
-
traj_count = len(traj_lst)
|
93 |
-
D = np.zeros((traj_count, traj_count))
|
94 |
-
|
95 |
-
# This may take a while
|
96 |
-
for i in range(traj_count):
|
97 |
-
if i % 20 == 0:
|
98 |
-
print(i)
|
99 |
-
for j in range(i + 1, traj_count):
|
100 |
-
distance = hausdorff(traj_lst[i], traj_lst[j])
|
101 |
-
D[i, j] = distance
|
102 |
-
D[j, i] = distance
|
103 |
-
|
104 |
-
return D
|
105 |
-
|
106 |
-
|
107 |
-
def run_kmedoids(image, traj_lst, D):
|
108 |
-
# 4 - Different clustering methods
|
109 |
-
|
110 |
-
# 4.1 - kmedoids
|
111 |
-
|
112 |
-
traj_count = len(traj_lst)
|
113 |
-
|
114 |
-
k = 3 # The number of clusters
|
115 |
-
medoid_center_lst, cluster2index_lst = kMedoids(D, k)
|
116 |
-
|
117 |
-
cluster_lst = np.empty((traj_count,), dtype=int)
|
118 |
-
|
119 |
-
for cluster in cluster2index_lst:
|
120 |
-
cluster_lst[cluster2index_lst[cluster]] = cluster
|
121 |
-
|
122 |
-
plot_cluster(image, traj_lst, cluster_lst)
|
123 |
-
|
124 |
-
|
125 |
-
def run_dbscan(image, traj_lst, D):
|
126 |
-
mdl = DBSCAN(eps=400, min_samples=10)
|
127 |
-
cluster_lst = mdl.fit_predict(D)
|
128 |
-
|
129 |
-
plot_cluster(image, traj_lst, cluster_lst)
|
130 |
-
|
131 |
-
|
132 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/core/bbox/samplers/__init__.py
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
from .base_sampler import BaseSampler
|
2 |
-
from .combined_sampler import CombinedSampler
|
3 |
-
from .instance_balanced_pos_sampler import InstanceBalancedPosSampler
|
4 |
-
from .iou_balanced_neg_sampler import IoUBalancedNegSampler
|
5 |
-
from .ohem_sampler import OHEMSampler
|
6 |
-
from .pseudo_sampler import PseudoSampler
|
7 |
-
from .random_sampler import RandomSampler
|
8 |
-
from .sampling_result import SamplingResult
|
9 |
-
from .score_hlr_sampler import ScoreHLRSampler
|
10 |
-
|
11 |
-
__all__ = [
|
12 |
-
'BaseSampler', 'PseudoSampler', 'RandomSampler',
|
13 |
-
'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
|
14 |
-
'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler'
|
15 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/lama-example/bin/evaluate_predicts.py
DELETED
@@ -1,79 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
|
3 |
-
import os
|
4 |
-
|
5 |
-
import pandas as pd
|
6 |
-
|
7 |
-
from saicinpainting.evaluation.data import PrecomputedInpaintingResultsDataset
|
8 |
-
from saicinpainting.evaluation.evaluator import InpaintingEvaluator, lpips_fid100_f1
|
9 |
-
from saicinpainting.evaluation.losses.base_loss import SegmentationAwareSSIM, \
|
10 |
-
SegmentationClassStats, SSIMScore, LPIPSScore, FIDScore, SegmentationAwareLPIPS, SegmentationAwareFID
|
11 |
-
from saicinpainting.evaluation.utils import load_yaml
|
12 |
-
|
13 |
-
|
14 |
-
def main(args):
|
15 |
-
config = load_yaml(args.config)
|
16 |
-
|
17 |
-
dataset = PrecomputedInpaintingResultsDataset(args.datadir, args.predictdir, **config.dataset_kwargs)
|
18 |
-
|
19 |
-
metrics = {
|
20 |
-
'ssim': SSIMScore(),
|
21 |
-
'lpips': LPIPSScore(),
|
22 |
-
'fid': FIDScore()
|
23 |
-
}
|
24 |
-
enable_segm = config.get('segmentation', dict(enable=False)).get('enable', False)
|
25 |
-
if enable_segm:
|
26 |
-
weights_path = os.path.expandvars(config.segmentation.weights_path)
|
27 |
-
metrics.update(dict(
|
28 |
-
segm_stats=SegmentationClassStats(weights_path=weights_path),
|
29 |
-
segm_ssim=SegmentationAwareSSIM(weights_path=weights_path),
|
30 |
-
segm_lpips=SegmentationAwareLPIPS(weights_path=weights_path),
|
31 |
-
segm_fid=SegmentationAwareFID(weights_path=weights_path)
|
32 |
-
))
|
33 |
-
evaluator = InpaintingEvaluator(dataset, scores=metrics,
|
34 |
-
integral_title='lpips_fid100_f1', integral_func=lpips_fid100_f1,
|
35 |
-
**config.evaluator_kwargs)
|
36 |
-
|
37 |
-
os.makedirs(os.path.dirname(args.outpath), exist_ok=True)
|
38 |
-
|
39 |
-
results = evaluator.evaluate()
|
40 |
-
|
41 |
-
results = pd.DataFrame(results).stack(1).unstack(0)
|
42 |
-
results.dropna(axis=1, how='all', inplace=True)
|
43 |
-
results.to_csv(args.outpath, sep='\t', float_format='%.4f')
|
44 |
-
|
45 |
-
if enable_segm:
|
46 |
-
only_short_results = results[[c for c in results.columns if not c[0].startswith('segm_')]].dropna(axis=1, how='all')
|
47 |
-
only_short_results.to_csv(args.outpath + '_short', sep='\t', float_format='%.4f')
|
48 |
-
|
49 |
-
print(only_short_results)
|
50 |
-
|
51 |
-
segm_metrics_results = results[['segm_ssim', 'segm_lpips', 'segm_fid']].dropna(axis=1, how='all').transpose().unstack(0).reorder_levels([1, 0], axis=1)
|
52 |
-
segm_metrics_results.drop(['mean', 'std'], axis=0, inplace=True)
|
53 |
-
|
54 |
-
segm_stats_results = results['segm_stats'].dropna(axis=1, how='all').transpose()
|
55 |
-
segm_stats_results.index = pd.MultiIndex.from_tuples(n.split('/') for n in segm_stats_results.index)
|
56 |
-
segm_stats_results = segm_stats_results.unstack(0).reorder_levels([1, 0], axis=1)
|
57 |
-
segm_stats_results.sort_index(axis=1, inplace=True)
|
58 |
-
segm_stats_results.dropna(axis=0, how='all', inplace=True)
|
59 |
-
|
60 |
-
segm_results = pd.concat([segm_metrics_results, segm_stats_results], axis=1, sort=True)
|
61 |
-
segm_results.sort_values(('mask_freq', 'total'), ascending=False, inplace=True)
|
62 |
-
|
63 |
-
segm_results.to_csv(args.outpath + '_segm', sep='\t', float_format='%.4f')
|
64 |
-
else:
|
65 |
-
print(results)
|
66 |
-
|
67 |
-
|
68 |
-
if __name__ == '__main__':
|
69 |
-
import argparse
|
70 |
-
|
71 |
-
aparser = argparse.ArgumentParser()
|
72 |
-
aparser.add_argument('config', type=str, help='Path to evaluation config')
|
73 |
-
aparser.add_argument('datadir', type=str,
|
74 |
-
help='Path to folder with images and masks (output of gen_mask_dataset.py)')
|
75 |
-
aparser.add_argument('predictdir', type=str,
|
76 |
-
help='Path to folder with predicts (e.g. predict_hifill_baseline.py)')
|
77 |
-
aparser.add_argument('outpath', type=str, help='Where to put results')
|
78 |
-
|
79 |
-
main(aparser.parse_args())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/layers/roi_align.py
DELETED
@@ -1,74 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
from torch import nn
|
3 |
-
from torchvision.ops import roi_align
|
4 |
-
|
5 |
-
|
6 |
-
# NOTE: torchvision's RoIAlign has a different default aligned=False
|
7 |
-
class ROIAlign(nn.Module):
|
8 |
-
def __init__(self, output_size, spatial_scale, sampling_ratio, aligned=True):
|
9 |
-
"""
|
10 |
-
Args:
|
11 |
-
output_size (tuple): h, w
|
12 |
-
spatial_scale (float): scale the input boxes by this number
|
13 |
-
sampling_ratio (int): number of inputs samples to take for each output
|
14 |
-
sample. 0 to take samples densely.
|
15 |
-
aligned (bool): if False, use the legacy implementation in
|
16 |
-
Detectron. If True, align the results more perfectly.
|
17 |
-
|
18 |
-
Note:
|
19 |
-
The meaning of aligned=True:
|
20 |
-
|
21 |
-
Given a continuous coordinate c, its two neighboring pixel indices (in our
|
22 |
-
pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example,
|
23 |
-
c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled
|
24 |
-
from the underlying signal at continuous coordinates 0.5 and 1.5). But the original
|
25 |
-
roi_align (aligned=False) does not subtract the 0.5 when computing neighboring
|
26 |
-
pixel indices and therefore it uses pixels with a slightly incorrect alignment
|
27 |
-
(relative to our pixel model) when performing bilinear interpolation.
|
28 |
-
|
29 |
-
With `aligned=True`,
|
30 |
-
we first appropriately scale the ROI and then shift it by -0.5
|
31 |
-
prior to calling roi_align. This produces the correct neighbors; see
|
32 |
-
detectron2/tests/test_roi_align.py for verification.
|
33 |
-
|
34 |
-
The difference does not make a difference to the model's performance if
|
35 |
-
ROIAlign is used together with conv layers.
|
36 |
-
"""
|
37 |
-
super().__init__()
|
38 |
-
self.output_size = output_size
|
39 |
-
self.spatial_scale = spatial_scale
|
40 |
-
self.sampling_ratio = sampling_ratio
|
41 |
-
self.aligned = aligned
|
42 |
-
|
43 |
-
from torchvision import __version__
|
44 |
-
|
45 |
-
version = tuple(int(x) for x in __version__.split(".")[:2])
|
46 |
-
# https://github.com/pytorch/vision/pull/2438
|
47 |
-
assert version >= (0, 7), "Require torchvision >= 0.7"
|
48 |
-
|
49 |
-
def forward(self, input, rois):
|
50 |
-
"""
|
51 |
-
Args:
|
52 |
-
input: NCHW images
|
53 |
-
rois: Bx5 boxes. First column is the index into N. The other 4 columns are xyxy.
|
54 |
-
"""
|
55 |
-
assert rois.dim() == 2 and rois.size(1) == 5
|
56 |
-
if input.is_quantized:
|
57 |
-
input = input.dequantize()
|
58 |
-
return roi_align(
|
59 |
-
input,
|
60 |
-
rois.to(dtype=input.dtype),
|
61 |
-
self.output_size,
|
62 |
-
self.spatial_scale,
|
63 |
-
self.sampling_ratio,
|
64 |
-
self.aligned,
|
65 |
-
)
|
66 |
-
|
67 |
-
def __repr__(self):
|
68 |
-
tmpstr = self.__class__.__name__ + "("
|
69 |
-
tmpstr += "output_size=" + str(self.output_size)
|
70 |
-
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
|
71 |
-
tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
|
72 |
-
tmpstr += ", aligned=" + str(self.aligned)
|
73 |
-
tmpstr += ")"
|
74 |
-
return tmpstr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cartinoe5930/LLMAgora/app.py
DELETED
@@ -1,364 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import json
|
3 |
-
import requests
|
4 |
-
import os
|
5 |
-
from model_inference import Inference
|
6 |
-
import time
|
7 |
-
HF_TOKEN = os.environ.get("HF_TOKEN")
|
8 |
-
|
9 |
-
question_selector_map = {}
|
10 |
-
|
11 |
-
every_model = ["llama2", "llama2-chat", "vicuna", "falcon", "falcon-instruct", "orca", "wizardlm"]
|
12 |
-
|
13 |
-
with open("src/inference_endpoint.json", "r") as f:
|
14 |
-
inference_endpoint = json.load(f)
|
15 |
-
|
16 |
-
for i in range(len(every_model)):
|
17 |
-
inference_endpoint[every_model[i]]["headers"]["Authorization"] += HF_TOKEN
|
18 |
-
|
19 |
-
def build_question_selector_map(questions):
|
20 |
-
question_selector_map = {}
|
21 |
-
|
22 |
-
# Build question selector map
|
23 |
-
for q in questions:
|
24 |
-
preview = f"{q['question_id']+1}: " + q["question"][:128] + "..."
|
25 |
-
question_selector_map[preview] = q
|
26 |
-
|
27 |
-
return question_selector_map
|
28 |
-
|
29 |
-
def math_display_question_answer(question, cot, request: gr.Request):
|
30 |
-
if cot:
|
31 |
-
q = math_cot_question_selector_map[question]
|
32 |
-
else:
|
33 |
-
q = math_question_selector_map[question]
|
34 |
-
|
35 |
-
return q["agent_response"]["llama"][0], q["agent_response"]["wizardlm"][0], q["agent_response"]["orca"][0], q["summarization"][0], q["agent_response"]["llama"][1], q["agent_response"]["wizardlm"][1], q["agent_response"]["orca"][1], q["summarization"][1], q["agent_response"]["llama"][2], q["agent_response"]["wizardlm"][2], q["agent_response"]["orca"][2]
|
36 |
-
|
37 |
-
def gsm_display_question_answer(question, cot, request: gr.Request):
|
38 |
-
if cot:
|
39 |
-
q = gsm_cot_question_selector_map[question]
|
40 |
-
else:
|
41 |
-
q = gsm_question_selector_map[question]
|
42 |
-
|
43 |
-
return q["agent_response"]["llama"][0], q["agent_response"]["wizardlm"][0], q["agent_response"]["orca"][0], q["summarization"][0], q["agent_response"]["llama"][1], q["agent_response"]["wizardlm"][1], q["agent_response"]["orca"][1], q["summarization"][1], q["agent_response"]["llama"][2], q["agent_response"]["wizardlm"][2], q["agent_response"]["orca"][2]
|
44 |
-
|
45 |
-
def mmlu_display_question_answer(question, cot, request: gr.Request):
|
46 |
-
if cot:
|
47 |
-
q = mmlu_cot_question_selector_map[question]
|
48 |
-
else:
|
49 |
-
q = mmlu_question_selector_map[question]
|
50 |
-
|
51 |
-
return q["agent_response"]["llama"][0], q["agent_response"]["wizardlm"][0], q["agent_response"]["orca"][0], q["summarization"][0], q["agent_response"]["llama"][1], q["agent_response"]["wizardlm"][1], q["agent_response"]["orca"][1], q["summarization"][1], q["agent_response"]["llama"][2], q["agent_response"]["wizardlm"][2], q["agent_response"]["orca"][2]
|
52 |
-
|
53 |
-
def warmup(list_model, model_inference_endpoints=inference_endpoint):
|
54 |
-
for model in list_model:
|
55 |
-
model = model.lower()
|
56 |
-
API_URL = model_inference_endpoints[model]["API_URL"]
|
57 |
-
headers = model_inference_endpoints[model]["headers"]
|
58 |
-
headers["Authorization"] += HF_TOKEN
|
59 |
-
|
60 |
-
def query(payload):
|
61 |
-
return requests.post(API_URL, headers=headers, json=payload)
|
62 |
-
|
63 |
-
output = query({
|
64 |
-
"inputs": "Hello. "
|
65 |
-
})
|
66 |
-
|
67 |
-
time.sleep(300)
|
68 |
-
return {
|
69 |
-
model_list: gr.update(visible=False),
|
70 |
-
options: gr.update(visible=True),
|
71 |
-
inputbox: gr.update(visible=True),
|
72 |
-
submit: gr.update(visible=True),
|
73 |
-
warmup_button: gr.update(visible=False),
|
74 |
-
welcome_message: gr.update(visible=True)
|
75 |
-
}
|
76 |
-
|
77 |
-
def inference(model_list, question, API_KEY, cot, hf_token=HF_TOKEN):
|
78 |
-
if len(model_list) != 3:
|
79 |
-
raise gr.Error("Please choose just '3' models! Neither more nor less!")
|
80 |
-
|
81 |
-
for i in range(len(model_list)):
|
82 |
-
model_list[i] = model_list[i].lower()
|
83 |
-
|
84 |
-
model_response = Inference(model_list, question, API_KEY, cot, hf_token)
|
85 |
-
|
86 |
-
return {
|
87 |
-
output_msg: gr.update(visible=True),
|
88 |
-
output_col: gr.update(visible=True),
|
89 |
-
model1_output1: model_response["agent_response"][model_list[0]][0],
|
90 |
-
model2_output1: model_response["agent_response"][model_list[1]][0],
|
91 |
-
model3_output1: model_response["agent_response"][model_list[2]][0],
|
92 |
-
summarization_text1: model_response["summarization"][0],
|
93 |
-
model1_output2: model_response["agent_response"][model_list[0]][1],
|
94 |
-
model2_output2: model_response["agent_response"][model_list[1]][1],
|
95 |
-
model3_output2: model_response["agent_response"][model_list[2]][1],
|
96 |
-
summarization_text2: model_response["summarization"][1],
|
97 |
-
model1_output3: model_response["agent_response"][model_list[0]][2],
|
98 |
-
model2_output3: model_response["agent_response"][model_list[1]][2],
|
99 |
-
model3_output3: model_response["agent_response"][model_list[2]][2]
|
100 |
-
}
|
101 |
-
|
102 |
-
def load_responses():
|
103 |
-
with open("result/Math/math_result.json", "r") as math_file:
|
104 |
-
math_responses = json.load(math_file)
|
105 |
-
|
106 |
-
with open("result/Math/math_result_cot.json", "r") as math_cot_file:
|
107 |
-
math_cot_responses = json.load(math_cot_file)
|
108 |
-
|
109 |
-
with open("result/GSM8K/gsm_result.json", "r") as gsm_file:
|
110 |
-
gsm_responses = json.load(gsm_file)
|
111 |
-
|
112 |
-
with open("result/GSM8K/gsm_result_cot.json", "r") as gsm_cot_file:
|
113 |
-
gsm_cot_responses = json.load(gsm_cot_file)
|
114 |
-
|
115 |
-
with open("result/MMLU/mmlu_result.json", "r") as mmlu_file:
|
116 |
-
mmlu_responses = json.load(mmlu_file)
|
117 |
-
|
118 |
-
with open("result/MMLU/mmlu_result_cot.json", "r") as mmlu_cot_file:
|
119 |
-
mmlu_cot_responses = json.load(mmlu_cot_file)
|
120 |
-
|
121 |
-
return math_responses, math_cot_responses, gsm_responses, gsm_cot_responses, mmlu_responses, mmlu_cot_responses
|
122 |
-
|
123 |
-
def load_questions(math, gsm, mmlu):
|
124 |
-
math_questions = []
|
125 |
-
gsm_questions = []
|
126 |
-
mmlu_questions = []
|
127 |
-
for i in range(100):
|
128 |
-
math_questions.append(f"{i+1}: " + math[i]["question"][:128] + "...")
|
129 |
-
gsm_questions.append(f"{i+1}: " + gsm[i]["question"][:128] + "...")
|
130 |
-
mmlu_questions.append(f"{i+1}: " + mmlu[i]["question"][:128] + "...")
|
131 |
-
|
132 |
-
return math_questions, gsm_questions, mmlu_questions
|
133 |
-
|
134 |
-
math_result, math_cot_result, gsm_result, gsm_cot_result, mmlu_result, mmlu_cot_result = load_responses()
|
135 |
-
|
136 |
-
math_questions, gsm_questions, mmlu_questions = load_questions(math_result, gsm_result, mmlu_result)
|
137 |
-
|
138 |
-
math_question_selector_map = build_question_selector_map(math_result)
|
139 |
-
math_cot_question_selector_map = build_question_selector_map(math_cot_result)
|
140 |
-
gsm_question_selector_map = build_question_selector_map(gsm_result)
|
141 |
-
gsm_cot_question_selector_map = build_question_selector_map(gsm_cot_result)
|
142 |
-
mmlu_question_selector_map = build_question_selector_map(mmlu_result)
|
143 |
-
mmlu_cot_question_selector_map = build_question_selector_map(mmlu_cot_result)
|
144 |
-
|
145 |
-
|
146 |
-
TITLE = """<h1 align="center">LLM Agora 🗣️🏦</h1>"""
|
147 |
-
|
148 |
-
INTRODUCTION_TEXT = """
|
149 |
-
The **LLM Agora** 🗣️🏦 aims to improve the quality of open-source LMs' responses through debate & revision introduced in [Improving Factuality and Reasoning in Language Models through Multiagent Debate](https://arxiv.org/abs/2305.14325).
|
150 |
-
Thank you to the authors of this paper for suggesting a great idea!
|
151 |
-
|
152 |
-
Do you know that? 🤔 **LLMs can also improve their responses by debating with other LLMs**! 😮 We applied this concept to several open-source LMs to verify that the open-source model, not the proprietary one, can sufficiently improve the response through discussion. 🤗
|
153 |
-
For more details, please refer to the [GitHub Repository](https://github.com/gauss5930/LLM-Agora).
|
154 |
-
You can also check the results in this Space!
|
155 |
-
|
156 |
-
You can use LLM Agora with your own questions if the response of open-source LM is not satisfactory and you want to improve the quality!
|
157 |
-
The Math, GSM8K, and MMLU Tabs show the results of the experiment(Llama2, WizardLM2, Orca2), and for inference, please use the 'Inference' tab.
|
158 |
-
|
159 |
-
Here's how to use LLM Agora!
|
160 |
-
|
161 |
-
1. Before starting, choose just 3 models and click the 'Warm-up LLM Agora 🔥' button and wait until '🤗🔥 Welcome to LLM Agora 🔥🤗' appears. (Suggest to go grab a coffee☕ since it takes 5 minutes!)
|
162 |
-
2. Once the interaction space is available, proceed with the following process.
|
163 |
-
3. Check the CoT box if you want to utilize the Chain-of-Thought while inferencing.
|
164 |
-
4. Please fill in your OpenAI API KEY, it will be used to use ChatGPT to summarize the responses.
|
165 |
-
5. Type your question in the Question box and click the 'Submit' button! If you do so, LLM Agora will show you improved answers! 🤗 (It will take roughly a minute! Please wait for an answer!)
|
166 |
-
|
167 |
-
For more detailed information, please check '※ Specific information about LLM Agora' at the bottom of the page.
|
168 |
-
|
169 |
-
※ Due to quota limitations, 'Llama2-Chat' and 'Falcon-Instruct' are currently unavailable. We will provide additional updates in the future.
|
170 |
-
"""
|
171 |
-
|
172 |
-
WELCOME_TEXT = """<h1 align="center">🤗🔥 Welcome to LLM Agora 🔥🤗</h1>"""
|
173 |
-
|
174 |
-
RESPONSE_TEXT = """<h1 align="center">🤗 Here are the responses to each model!! 🤗</h1>"""
|
175 |
-
|
176 |
-
SPECIFIC_INFORMATION = """
|
177 |
-
This is the specific information about LLM Agora!
|
178 |
-
|
179 |
-
**Tasks**
|
180 |
-
|
181 |
-
- Math: The problem of arithmetic operations on six randomly selected numbers. The format is '{}+{}*{}+{}-{}*{}=?'
|
182 |
-
- GSM8K: GSM8K is a dataset of 8.5K high quality linguistically diverse grade school math word problems created by human problem writers.
|
183 |
-
- MMLU: MMLU (Massive Multitask Language Understanding) is a new benchmark designed to measure knowledge acquired during pretraining by evaluating models exclusively in zero-shot and few-shot settings.
|
184 |
-
|
185 |
-
**Model size**
|
186 |
-
|
187 |
-
Besides Falcon, all other models are based on Llama2.
|
188 |
-
|
189 |
-
|Model name|Model size|
|
190 |
-
|---|---|
|
191 |
-
|Llama2|13B|
|
192 |
-
|Llama2-Chat|13B|
|
193 |
-
|Vicuna|13B|
|
194 |
-
|Falcon|7B|
|
195 |
-
|Falcon-Instruct|7B|
|
196 |
-
|WizardLM|13B|
|
197 |
-
|Orca|13B|
|
198 |
-
|
199 |
-
**Agent numbers & Debate rounds**
|
200 |
-
|
201 |
-
- We limit the number of agents and debate rounds because of the limitation of resources. As a result, we decided to use 3 agents and 2 rounds of debate!
|
202 |
-
|
203 |
-
**GitHub Repository**
|
204 |
-
|
205 |
-
- If you want to see more specific information, please check the [GitHub Repository](https://github.com/gauss5930/LLM-Agora) of LLM Agora!
|
206 |
-
|
207 |
-
**Citation**
|
208 |
-
|
209 |
-
```
|
210 |
-
@article{du2023improving,
|
211 |
-
title={Improving Factuality and Reasoning in Language Models through Multiagent Debate},
|
212 |
-
author={Du, Yilun and Li, Shuang and Torralba, Antonio and Tenenbaum, Joshua B and Mordatch, Igor},
|
213 |
-
journal={arXiv preprint arXiv:2305.14325},
|
214 |
-
year={2023}
|
215 |
-
}
|
216 |
-
```
|
217 |
-
"""
|
218 |
-
|
219 |
-
with gr.Blocks() as demo:
|
220 |
-
gr.HTML(TITLE)
|
221 |
-
gr.Markdown(INTRODUCTION_TEXT)
|
222 |
-
with gr.Column():
|
223 |
-
with gr.Tab("Inference"):
|
224 |
-
model_list = gr.CheckboxGroup(["Llama2", "Vicuna", "Falcon", "WizardLM", "Orca"], label="Model Selection", info="Choose 3 LMs to participate in LLM Agora.", type="value", visible=True)
|
225 |
-
warmup_button = gr.Button("Warm-up LLM Agora 🔥", visible=True)
|
226 |
-
welcome_message = gr.HTML(WELCOME_TEXT, visible=False)
|
227 |
-
with gr.Row(visible=False) as options:
|
228 |
-
cot = gr.Checkbox(label="CoT", info="Do you want to use CoT for inference?")
|
229 |
-
API_KEY = gr.Textbox(label="OpenAI API Key", value="", info="Please fill in your OpenAI API token.", placeholder="sk..", type="password")
|
230 |
-
with gr.Column(visible=False) as inputbox:
|
231 |
-
question = gr.Textbox(label="Question", value="", info="Please type your question!", placeholder="")
|
232 |
-
submit = gr.Button("Submit", visible=False)
|
233 |
-
|
234 |
-
with gr.Row(visible=False) as output_msg:
|
235 |
-
gr.HTML(RESPONSE_TEXT)
|
236 |
-
|
237 |
-
with gr.Column(visible=False) as output_col:
|
238 |
-
with gr.Row(elem_id="model1_response"):
|
239 |
-
model1_output1 = gr.Textbox(label="1️⃣ model's initial response")
|
240 |
-
model2_output1 = gr.Textbox(label="2️⃣ model's initial response")
|
241 |
-
model3_output1 = gr.Textbox(label="3️⃣ model's initial response")
|
242 |
-
summarization_text1 = gr.Textbox(label="Summarization 1")
|
243 |
-
with gr.Row(elem_id="model2_response"):
|
244 |
-
model1_output2 = gr.Textbox(label="1️⃣ model's revised response")
|
245 |
-
model2_output2 = gr.Textbox(label="2️⃣ model's revised response")
|
246 |
-
model3_output2 = gr.Textbox(label="3️⃣ model's revised response")
|
247 |
-
summarization_text2 = gr.Textbox(label="Summarization 2")
|
248 |
-
with gr.Row(elem_id="model3_response"):
|
249 |
-
model1_output3 = gr.Textbox(label="1️⃣ model's final response")
|
250 |
-
model2_output3 = gr.Textbox(label="2️⃣ model's final response")
|
251 |
-
model3_output3 = gr.Textbox(label="3️⃣ model's final response")
|
252 |
-
|
253 |
-
|
254 |
-
with gr.Tab("Math"):
|
255 |
-
math_cot = gr.Checkbox(label="CoT", info="If you want to see CoT result, please check the box.")
|
256 |
-
math_question_list = gr.Dropdown(math_questions, label="Math Question")
|
257 |
-
|
258 |
-
with gr.Column():
|
259 |
-
with gr.Row(elem_id="model1_response"):
|
260 |
-
math_model1_output1 = gr.Textbox(label="Llama2🦙's 1️⃣st response")
|
261 |
-
math_model2_output1 = gr.Textbox(label="WizardLM🧙♂️'s 1️⃣st response")
|
262 |
-
math_model3_output1 = gr.Textbox(label="Orca🐬's 1️⃣st response")
|
263 |
-
math_summarization_text1 = gr.Textbox(label="Summarization 1️⃣")
|
264 |
-
with gr.Row(elem_id="model2_response"):
|
265 |
-
math_model1_output2 = gr.Textbox(label="Llama2🦙's 2️⃣nd response")
|
266 |
-
math_model2_output2 = gr.Textbox(label="WizardLM🧙♂️'s 2️⃣nd response")
|
267 |
-
math_model3_output2 = gr.Textbox(label="Orca🐬's 2️⃣nd response")
|
268 |
-
math_summarization_text2 = gr.Textbox(label="Summarization 2️⃣")
|
269 |
-
with gr.Row(elem_id="model3_response"):
|
270 |
-
math_model1_output3 = gr.Textbox(label="Llama2🦙's 3️⃣rd response")
|
271 |
-
math_model2_output3 = gr.Textbox(label="WizardLM🧙♂️'s 3️⃣rd response")
|
272 |
-
math_model3_output3 = gr.Textbox(label="Orca🐬's 3️⃣rd response")
|
273 |
-
|
274 |
-
gr.HTML("""<h1 align="center"> The result of Math </h1>""")
|
275 |
-
gr.HTML("""<p align="center"><img src='https://github.com/gauss5930/LLM-Agora/assets/80087878/4fc22896-1306-4a93-bd54-a7a2ff184c98'></p>""")
|
276 |
-
|
277 |
-
math_cot.select(
|
278 |
-
math_display_question_answer,
|
279 |
-
[math_question_list, math_cot],
|
280 |
-
[math_model1_output1, math_model2_output1, math_model3_output1, math_summarization_text1, math_model1_output2, math_model2_output2, math_model3_output2, math_summarization_text2, math_model1_output3, math_model2_output3, math_model3_output3]
|
281 |
-
)
|
282 |
-
math_question_list.change(
|
283 |
-
math_display_question_answer,
|
284 |
-
[math_question_list, math_cot],
|
285 |
-
[math_model1_output1, math_model2_output1, math_model3_output1, math_summarization_text1, math_model1_output2, math_model2_output2, math_model3_output2, math_summarization_text2, math_model1_output3, math_model2_output3, math_model3_output3]
|
286 |
-
)
|
287 |
-
|
288 |
-
|
289 |
-
with gr.Tab("GSM8K"):
|
290 |
-
gsm_cot = gr.Checkbox(label="CoT", info="If you want to see CoT result, please check the box.")
|
291 |
-
gsm_question_list = gr.Dropdown(gsm_questions, label="GSM8K Question")
|
292 |
-
|
293 |
-
with gr.Column():
|
294 |
-
with gr.Row(elem_id="model1_response"):
|
295 |
-
gsm_model1_output1 = gr.Textbox(label="Llama2🦙's 1️⃣st response")
|
296 |
-
gsm_model2_output1 = gr.Textbox(label="WizardLM🧙♂️'s 1️⃣st response")
|
297 |
-
gsm_model3_output1 = gr.Textbox(label="Orca🐬's 1️⃣st response")
|
298 |
-
gsm_summarization_text1 = gr.Textbox(label="Summarization 1️⃣")
|
299 |
-
with gr.Row(elem_id="model2_response"):
|
300 |
-
gsm_model1_output2 = gr.Textbox(label="Llama2🦙's 2️⃣nd response")
|
301 |
-
gsm_model2_output2 = gr.Textbox(label="WizardLM🧙♂️'s 2️⃣nd response")
|
302 |
-
gsm_model3_output2 = gr.Textbox(label="Orca🐬's 2️⃣nd response")
|
303 |
-
gsm_summarization_text2 = gr.Textbox(label="Summarization 2️⃣")
|
304 |
-
with gr.Row(elem_id="model3_response"):
|
305 |
-
gsm_model1_output3 = gr.Textbox(label="Llama2🦙's 3️⃣rd response")
|
306 |
-
gsm_model2_output3 = gr.Textbox(label="WizardLM🧙♂️'s 3️⃣rd response")
|
307 |
-
gsm_model3_output3 = gr.Textbox(label="Orca🐬's 3️⃣rd response")
|
308 |
-
|
309 |
-
gr.HTML("""<h1 align="center"> The result of GSM8K </h1>""")
|
310 |
-
gr.HTML("""<p align="center"><img src="https://github.com/gauss5930/LLM-Agora/assets/80087878/64f05ea4-5bec-41e4-83d7-d8855e753290"></p>""")
|
311 |
-
|
312 |
-
gsm_cot.select(
|
313 |
-
gsm_display_question_answer,
|
314 |
-
[gsm_question_list, gsm_cot],
|
315 |
-
[gsm_model1_output1, gsm_model2_output1, gsm_model3_output1, gsm_summarization_text1, gsm_model1_output2, gsm_model2_output2, gsm_model3_output2, gsm_summarization_text2, gsm_model1_output3, gsm_model2_output3, gsm_model3_output3]
|
316 |
-
)
|
317 |
-
gsm_question_list.change(
|
318 |
-
gsm_display_question_answer,
|
319 |
-
[gsm_question_list, gsm_cot],
|
320 |
-
[gsm_model1_output1, gsm_model2_output1, gsm_model3_output1, gsm_summarization_text1, gsm_model1_output2, gsm_model2_output2, gsm_model3_output2, gsm_summarization_text2, gsm_model1_output3, gsm_model2_output3, gsm_model3_output3]
|
321 |
-
)
|
322 |
-
|
323 |
-
|
324 |
-
with gr.Tab("MMLU"):
|
325 |
-
mmlu_cot = gr.Checkbox(label="CoT", info="If you want to see CoT result, please check the box.")
|
326 |
-
mmlu_question_list = gr.Dropdown(mmlu_questions, label="MMLU Question")
|
327 |
-
|
328 |
-
with gr.Column():
|
329 |
-
with gr.Row(elem_id="model1_response"):
|
330 |
-
mmlu_model1_output1 = gr.Textbox(label="Llama2🦙's 1️⃣st response")
|
331 |
-
mmlu_model2_output1 = gr.Textbox(label="WizardLM🧙♂️'s 1️⃣st response")
|
332 |
-
mmlu_model3_output1 = gr.Textbox(label="Orca🐬's 1️⃣st response")
|
333 |
-
mmlu_summarization_text1 = gr.Textbox(label="Summarization 1️⃣")
|
334 |
-
with gr.Row(elem_id="model2_response"):
|
335 |
-
mmlu_model1_output2 = gr.Textbox(label="Llama2🦙's 2️⃣nd response")
|
336 |
-
mmlu_model2_output2 = gr.Textbox(label="WizardLM🧙♂️'s 2️⃣nd response")
|
337 |
-
mmlu_model3_output2 = gr.Textbox(label="Orca🐬's 2️⃣nd response")
|
338 |
-
mmlu_summarization_text2 = gr.Textbox(label="Summarization 2️⃣")
|
339 |
-
with gr.Row(elem_id="model3_response"):
|
340 |
-
mmlu_model1_output3 = gr.Textbox(label="Llama2🦙's 3️⃣rd response")
|
341 |
-
mmlu_model2_output3 = gr.Textbox(label="WizardLM🧙♂️'s 3️⃣rd response")
|
342 |
-
mmlu_model3_output3 = gr.Textbox(label="Orca🐬's 3️⃣rd response")
|
343 |
-
|
344 |
-
gr.HTML("""<h1 align="center"> The result of MMLU </h1>""")
|
345 |
-
gr.HTML("""<p align="center"><img src="https://github.com/composable-models/llm_multiagent_debate/assets/80087878/963571aa-228b-4d73-9082-5f528552383e"></p>""")
|
346 |
-
|
347 |
-
mmlu_cot.select(
|
348 |
-
mmlu_display_question_answer,
|
349 |
-
[mmlu_question_list, mmlu_cot],
|
350 |
-
[mmlu_model1_output1, mmlu_model2_output1, mmlu_model3_output1, mmlu_summarization_text1, mmlu_model1_output2, mmlu_model2_output2, mmlu_model3_output2, mmlu_summarization_text2, mmlu_model1_output3, mmlu_model2_output3, mmlu_model3_output3]
|
351 |
-
)
|
352 |
-
mmlu_question_list.change(
|
353 |
-
mmlu_display_question_answer,
|
354 |
-
[mmlu_question_list, mmlu_cot],
|
355 |
-
[mmlu_model1_output1, mmlu_model2_output1, mmlu_model3_output1, mmlu_summarization_text1, mmlu_model1_output2, mmlu_model2_output2, mmlu_model3_output2, mmlu_summarization_text2, mmlu_model1_output3, mmlu_model2_output3, mmlu_model3_output3]
|
356 |
-
)
|
357 |
-
|
358 |
-
with gr.Accordion("※ Specific information about LLM Agora", open=False):
|
359 |
-
gr.Markdown(SPECIFIC_INFORMATION)
|
360 |
-
|
361 |
-
warmup_button.click(warmup, [model_list], [model_list, options, inputbox, submit, warmup_button, welcome_message])
|
362 |
-
submit.click(inference, [model_list, question, API_KEY, cot], [output_msg, output_col, model1_output1, model2_output1, model3_output1, summarization_text1, model1_output2, model2_output2, model3_output2, summarization_text2, model1_output3, model2_output3, model3_output3])
|
363 |
-
|
364 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChandraMohanNayal/AutoGPT/autogpt/processing/__init__.py
DELETED
File without changes
|
spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/model/red/index.js
DELETED
@@ -1,161 +0,0 @@
|
|
1 |
-
import { QQRedBot } from "./bot.js"
|
2 |
-
import { getToken } from './tool.js'
|
3 |
-
import { toQQRedMsg } from './message.js'
|
4 |
-
import { Version, allSocketList, setAllSocketList } from '../../components/index.js'
|
5 |
-
import WebSocket from 'ws'
|
6 |
-
import fetch from "node-fetch"
|
7 |
-
|
8 |
-
logger.info(logger.yellow("- 正在加载 Chronocat(red) 适配器插件"))
|
9 |
-
|
10 |
-
export const redAdapter = new class RedAdapter {
|
11 |
-
constructor() {
|
12 |
-
this.id = "QQ"
|
13 |
-
this.name = 'chronocat'
|
14 |
-
}
|
15 |
-
|
16 |
-
reconnectCount = 1
|
17 |
-
|
18 |
-
async connect(data) {
|
19 |
-
if (data.closed) return
|
20 |
-
const [host, port] = data.address.split(':')
|
21 |
-
let token = data.accessToken
|
22 |
-
if (!token) {
|
23 |
-
token = getToken()
|
24 |
-
if (!token) return
|
25 |
-
}
|
26 |
-
const bot = {
|
27 |
-
host,
|
28 |
-
port,
|
29 |
-
token
|
30 |
-
}
|
31 |
-
bot.sendApi = async (method, api, body) => {
|
32 |
-
const controller = new AbortController()
|
33 |
-
const signal = controller.signal
|
34 |
-
const timeout = 30000
|
35 |
-
setTimeout(() => {
|
36 |
-
controller.abort()
|
37 |
-
}, timeout);
|
38 |
-
return await fetch(`http://${bot.host}:${bot.port}/api/${api}`, {
|
39 |
-
signal,
|
40 |
-
method,
|
41 |
-
body,
|
42 |
-
headers: {
|
43 |
-
Authorization: 'Bearer ' + bot.token
|
44 |
-
}
|
45 |
-
}).then(r => {
|
46 |
-
if (!r.ok) throw r
|
47 |
-
const contentType = r.headers.get('content-type');
|
48 |
-
if (contentType.includes('application/json')) {
|
49 |
-
return r.json();
|
50 |
-
} else if (contentType.includes('text/plain')) {
|
51 |
-
return r.text();
|
52 |
-
} else {
|
53 |
-
return r
|
54 |
-
}
|
55 |
-
}).catch(error => {
|
56 |
-
if (error.name === 'AbortError') {
|
57 |
-
return { error: `${logger.red(`[${this.uin}] ${api} 请求超时, 请检查账号状态或重启QQ!`)}` }
|
58 |
-
} else {
|
59 |
-
return { error }
|
60 |
-
}
|
61 |
-
})
|
62 |
-
}
|
63 |
-
const reconnect = () => {
|
64 |
-
if (!data.stopReconnect && ((this.reconnectCount < data.maxReconnectAttempts) || data.maxReconnectAttempts <= 0)) {
|
65 |
-
logger.warn(`${this.name} 开始尝试重新连接第${this.reconnectCount}次`);
|
66 |
-
this.reconnectCount++
|
67 |
-
setTimeout(() => {
|
68 |
-
this.connect(data)
|
69 |
-
}, data.reconnectInterval * 1000);
|
70 |
-
} else {
|
71 |
-
this.stopReconnect = false
|
72 |
-
logger.warn(`${this.name} 达到最大重连次数或关闭连接,停止重连`);
|
73 |
-
}
|
74 |
-
}
|
75 |
-
let info = await bot.sendApi('get', 'getSelfProfile')
|
76 |
-
if (info.error) {
|
77 |
-
if (info.error.code == 'ECONNREFUSED') {
|
78 |
-
logger.error(`${this.name} 请检查是否安装Chronocat并启动QQNT`)
|
79 |
-
reconnect()
|
80 |
-
return
|
81 |
-
}
|
82 |
-
logger.error(`${this.name} Token错误或其他未知原因`)
|
83 |
-
logger.error(await info.error.text?.() || info.error)
|
84 |
-
return
|
85 |
-
}
|
86 |
-
if (!info.uin) {
|
87 |
-
logger.error(`${this.name} 请点击登录`)
|
88 |
-
reconnect()
|
89 |
-
return
|
90 |
-
}
|
91 |
-
bot.info = {
|
92 |
-
...info,
|
93 |
-
user_id: info.uin,
|
94 |
-
self_id: info.uin,
|
95 |
-
nickname: info.nick,
|
96 |
-
username: info.nick
|
97 |
-
}
|
98 |
-
bot.nickname = info.nick
|
99 |
-
bot.self_id = Number(info.uin)
|
100 |
-
this.uin = bot.self_id
|
101 |
-
bot.uin = bot.self_id
|
102 |
-
bot.ws = new WebSocket(`ws://${bot.host}:${bot.port}`)
|
103 |
-
bot.send = (type, payload) => bot.ws.send(JSON.stringify({ type, payload }))
|
104 |
-
bot.ws.on('open', () => bot.send('meta::connect', { token: bot.token }))
|
105 |
-
bot.ws.on('message', data => toQQRedMsg(bot, data))
|
106 |
-
bot.ws.on('close', (code) => {
|
107 |
-
delete Bot[bot.self_id]
|
108 |
-
switch (code) {
|
109 |
-
case 1005:
|
110 |
-
logger.error(`${this.name}(${bot.self_id}) 主动断开连接`)
|
111 |
-
return
|
112 |
-
case 1006:
|
113 |
-
logger.error(`${this.name}(${bot.self_id}) QQNT被关闭`)
|
114 |
-
reconnect()
|
115 |
-
return
|
116 |
-
default:
|
117 |
-
return
|
118 |
-
}
|
119 |
-
})
|
120 |
-
Bot[bot.self_id] = new QQRedBot(bot)
|
121 |
-
if (!Version.isTrss) {
|
122 |
-
/** 米游社主动推送、椰奶状态pro */
|
123 |
-
if (!Bot?.adapter) {
|
124 |
-
Bot.adapter = [Bot.uin]
|
125 |
-
Bot.adapter.push(bot.self_id)
|
126 |
-
} else {
|
127 |
-
Bot.adapter.push(bot.self_id)
|
128 |
-
/** 去重防止断连后出现多个重复的id */
|
129 |
-
Bot.adapter = Array.from(new Set(Bot.adapter.map(JSON.stringify))).map(JSON.parse)
|
130 |
-
}
|
131 |
-
}
|
132 |
-
logger.mark(`${logger.blue(`[${bot.self_id}]`)} ${this.name}(${this.id}) 已连接`)
|
133 |
-
data.ws = {
|
134 |
-
close: () => {
|
135 |
-
bot.ws.close()
|
136 |
-
}
|
137 |
-
}
|
138 |
-
data.status = 1
|
139 |
-
data.uin = this.uin
|
140 |
-
setAllSocketList(data)
|
141 |
-
this.reconnectCount = 1
|
142 |
-
Bot.em(`connect.${bot.self_id}`, Bot[bot.self_id])
|
143 |
-
return true
|
144 |
-
}
|
145 |
-
|
146 |
-
async load() {
|
147 |
-
for (const i of allSocketList) {
|
148 |
-
if (i.type == 4) {
|
149 |
-
await new Promise(resolve => {
|
150 |
-
redAdapter.connect(i).then(resolve)
|
151 |
-
setTimeout(resolve, 5000)
|
152 |
-
})
|
153 |
-
}
|
154 |
-
}
|
155 |
-
}
|
156 |
-
}
|
157 |
-
|
158 |
-
if (Version.isTrss) {
|
159 |
-
Bot.adapter.push(redAdapter)
|
160 |
-
}
|
161 |
-
logger.info(logger.green("- Chronocat(red) 适配器插件 加载完成"))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|