Commit
·
e63c2c0
1
Parent(s):
4bad19b
Update parquet files (step 108 of 121)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/101-5/gpt4free/g4f/.v1/Dockerfile +0 -19
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Batterybar Pro License Key Free HOT Download.md +0 -31
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Foxit PhantomPDF A Complete PDF Solution for Any Task.md +0 -29
- spaces/1gistliPinn/ChatGPT4/Examples/ACTIA Multidiag Dvd Rip.rar NEW.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Assassins Creed Unity Gold Edition V.1.5.0 - MAXAGENT 31.md +0 -25
- spaces/1gistliPinn/ChatGPT4/Examples/Axara 2D To 3D Video Converter 2.4.3.243- Keygen And Crack.rar.md +0 -60
- spaces/1gistliPinn/ChatGPT4/Examples/Baixar O Jogo Do Ronald Mcdonald O Resgate Dos Bichos.md +0 -28
- spaces/1gistliPinn/ChatGPT4/Examples/Costruzione Di Macchine Mcgraw-hill Pdf Download [EXCLUSIVE].md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/Maxim-Korea-October-2012.md +0 -68
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dynamons World MOD APK and Unlock All Levels and Features.md +0 -109
- spaces/1phancelerku/anime-remove-background/Download Kodu and Unleash Your Creativity with Game Design.md +0 -157
- spaces/1phancelerku/anime-remove-background/Download and Install WhatsApp Messenger on Your Windows 8 PC.md +0 -68
- spaces/44ov41za8i/FreeVC/speaker_encoder/visualizations.py +0 -178
- spaces/801artistry/RVC801/Dockerfile +0 -29
- spaces/A666sxr/Genshin_TTS/app.py +0 -94
- spaces/AIFILMS/generate_human_motion/VQ-Trans/train_vq.py +0 -171
- spaces/AIFILMS/generate_human_motion/pyrender/tests/unit/test_offscreen.py +0 -92
- spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/rel_transformer.py +0 -611
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/spinner/Spinner.d.ts +0 -2
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/menu/methods/CreateBackground.js +0 -16
- spaces/AiMimicry/sovits-models/vdecoder/hifigan/nvSTFT.py +0 -111
- spaces/Akmyradov/TurkmenTTSweSTT/vits/commons.py +0 -161
- spaces/Alpaca233/SadTalker/src/utils/safetensor_helper.py +0 -8
- spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/cpp/longcode/jpgd.cpp +0 -3276
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/img2img.md +0 -100
- spaces/Andy1621/uniformer_image_detection/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py +0 -4
- spaces/Andy1621/uniformer_image_segmentation/configs/danet/danet_r50-d8_512x1024_40k_cityscapes.py +0 -4
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/docs/Low-VRAM-guide.md +0 -53
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/docs/llama.cpp.md +0 -43
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/logging_colors.py +0 -117
- spaces/Apex-X/Tm/roop/processors/frame/face_swapper.py +0 -88
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/segment.py +0 -739
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/tenacity/tornadoweb.py +0 -59
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/datasets/nuimages.py +0 -37
- spaces/Bart92/RVC_HF/utils/backups_test.py +0 -138
- spaces/Benson/text-generation/Examples/Apk Stumble Chicos Apk Puro.md +0 -80
- spaces/Benson/text-generation/Examples/Brain Test 360.md +0 -54
- spaces/Benson/text-generation/Examples/Cmo Descargar Minecraft De Prueba En El Ordenador Porttil.md +0 -87
- spaces/Benson/text-generation/Examples/Descargar Base De Datos Oracle 11g.md +0 -94
- spaces/BetterAPI/BetterChat_new/src/lib/types/Timestamps.ts +0 -4
- spaces/CALM/Dashboard/streamlit_observable/frontend/build/static/js/main.fc603b94.chunk.js +0 -3
- spaces/CNXT/GPTx/README.md +0 -12
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/test_model_analysis.py +0 -58
- spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/utils/make_mask.py +0 -14
- spaces/CVPR/Text2Human/Text2Human/models/sample_model.py +0 -500
- spaces/CVPR/lama-example/saicinpainting/training/losses/constants.py +0 -152
- spaces/CVPR/monoscene_lite/monoscene/__init__.py +0 -0
- spaces/CaliforniaHealthCollaborative/Mermaid.Md/GAMEPLAN.md +0 -0
- spaces/Chitranshu/Dashboard-Uber/app.py +0 -198
- spaces/CohereForAI/pokemon-cards-explorer/src/data_scraping.py +0 -103
spaces/101-5/gpt4free/g4f/.v1/Dockerfile
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
FROM python:3.11.3-slim
|
2 |
-
|
3 |
-
RUN apt-get update \
|
4 |
-
&& apt-get install -y --no-install-recommends ffmpeg \
|
5 |
-
&& apt-get -y clean \
|
6 |
-
&& rm -rf /var/lib/apt/lists/*
|
7 |
-
|
8 |
-
COPY requirements.txt /tmp
|
9 |
-
RUN pip install --upgrade pip \
|
10 |
-
&& pip install -r /tmp/requirements.txt \
|
11 |
-
&& rm /tmp/requirements.txt
|
12 |
-
|
13 |
-
COPY . /root/gpt4free
|
14 |
-
|
15 |
-
WORKDIR /root/gpt4free
|
16 |
-
|
17 |
-
CMD ["streamlit", "run", "./gui/streamlit_app.py"]
|
18 |
-
|
19 |
-
EXPOSE 8501
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Batterybar Pro License Key Free HOT Download.md
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download BatteryBar Pro for Free with License Key</h1>
|
3 |
-
<p>BatteryBar Pro is a popular application that displays the battery status of your laptop on the taskbar. It shows you useful information such as the remaining battery percentage, the battery wear, and the full runtime. It also helps you monitor your battery's lifespan and take care of your battery. If you want to download BatteryBar Pro for free with a license key, here are some steps you can follow:</p>
|
4 |
-
<ol>
|
5 |
-
<li>Go to <a href="https://getintopc.com/softwares/batterybar-pro-free-download/">this website</a> and click on the "Download" button. This will start downloading the setup file of BatteryBar Pro v3.4.3.</li>
|
6 |
-
<li>Run the setup file and follow the instructions to install BatteryBar Pro on your laptop.</li>
|
7 |
-
<li>After the installation is complete, open BatteryBar Pro and click on the "Help" menu. Then, select "Enter License Key".</li>
|
8 |
-
<li>Enter the following license key: <code>BB-PRO-3.6.6-1234567890</code>. This is a valid license key that was shared by <a href="https://fixvamet.weebly.com/blog/batterybar-pro-36-6-serial-key">this blog</a>. You can also try other license keys from <a href="https://www.updatestar.com/en/topic/batterybar%20pro%203.6.6%20license%20key">this website</a>.</li>
|
9 |
-
<li>Click on "Activate" and enjoy BatteryBar Pro for free!</li>
|
10 |
-
</ol>
|
11 |
-
<p>BatteryBar Pro has many features that make it a great tool for laptop users. You can customize the appearance of the battery meter, use different battery profiles for each power scheme, and set custom sounds for low and critical battery warnings. You can also check the detailed statistics of your battery's performance and health. BatteryBar Pro is compatible with Windows 2K/XP/Vista/7/8/8.1/10 and requires 1 GB of RAM and 10 MB of free disk space.</p>
|
12 |
-
<h2>Batterybar Pro License Key Free Download</h2><br /><p><b><b>DOWNLOAD</b> ————— <a href="https://byltly.com/2uKzEF">https://byltly.com/2uKzEF</a></b></p><br /><br />
|
13 |
-
<p>If you liked this article, please share it with your friends and leave a comment below. You can also subscribe to our newsletter for more tips and tricks on how to optimize your laptop's battery life.</p>
|
14 |
-
|
15 |
-
<p>BatteryBar Pro is not the only application that can display your battery status on the taskbar. There are some other alternatives that you can try if you want to compare different features and options. Here are some of them:</p>
|
16 |
-
<ul>
|
17 |
-
<li><a href="https://www.nirsoft.net/utils/battery_information_view.html">BatteryInfoView</a>: This is a simple and lightweight tool that shows you the current status and information of your battery. It also logs the battery's discharge cycles and displays the battery capacity evolution as a graph.</li>
|
18 |
-
<li><a href="https://www.passmark.com/products/batmon/">BatteryMon</a>: This is a comprehensive and user-friendly tool that monitors your battery's voltage, charge rate, discharge rate, and more. It also supports multiple batteries and can alert you when your battery reaches a low or critical level.</li>
|
19 |
-
<li><a href="https://smarterbattery.com/">Smarter Battery</a>: This is a smart and advanced tool that displays your battery's health, wear level, discharge cycles, and calibration status. It also has a battery benchmark feature that tests your battery's performance and generates a report.</li>
|
20 |
-
</ul>
|
21 |
-
<p>These are some of the best applications that can display your battery status on the taskbar. You can download them for free or purchase them for a reasonable price. However, if you want to get the most out of your battery life, you should also follow some basic tips and practices that can help you optimize your laptop's power consumption. Here are some of them:</p>
|
22 |
-
<ol>
|
23 |
-
<li>Adjust your screen brightness and contrast to a comfortable level. A brighter screen consumes more power than a dimmer one.</li>
|
24 |
-
<li>Turn off or disable any unnecessary devices or features that you are not using. For example, you can turn off Bluetooth, Wi-Fi, webcam, microphone, etc. when you don't need them.</li>
|
25 |
-
<li>Use a power-saving mode or plan that suits your needs. Windows has different power plans that you can choose from, such as Balanced, Power Saver, High Performance, etc. You can also customize your own power plan and change the settings for various components.</li>
|
26 |
-
<li>Close any programs or applications that you are not using. Running multiple programs at the same time can drain your battery faster than running one or two programs.</li>
|
27 |
-
<li>Avoid exposing your laptop to extreme temperatures or humidity. High or low temperatures can affect your battery's performance and lifespan.</li>
|
28 |
-
</ol>
|
29 |
-
<p>By following these tips and using BatteryBar Pro or any of the alternative applications mentioned above, you can monitor and optimize your laptop's battery life easily and effectively. You can also save money and time by avoiding frequent battery replacements or repairs.</p> cec2833e83<br />
|
30 |
-
<br />
|
31 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Foxit PhantomPDF A Complete PDF Solution for Any Task.md
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
|
2 |
-
```html
|
3 |
-
<h1>Why You Should Use Foxit PhantomPDF for Your PDF Needs</h1>
|
4 |
-
<p>If you work with PDF files regularly, you know how important it is to have a reliable and versatile PDF editor. Whether you need to create, edit, convert, sign, or protect your PDF documents, you want a tool that can handle any task with ease and efficiency. That's why you should use Foxit PhantomPDF, the best PDF editor for Windows and Mac.</p>
|
5 |
-
<p>Foxit PhantomPDF is a powerful and user-friendly PDF software that lets you do more with your PDF files. Here are some of the features that make Foxit PhantomPDF stand out from the competition:</p>
|
6 |
-
<h2>crack foxit phantompdf</h2><br /><p><b><b>Download File</b> ⚙⚙⚙ <a href="https://byltly.com/2uKzb3">https://byltly.com/2uKzb3</a></b></p><br /><br />
|
7 |
-
<ul>
|
8 |
-
<li><b>Create and edit PDFs from any source.</b> You can create PDF files from scratch, from scanned documents, from Microsoft Office files, from web pages, and more. You can also edit any PDF file with a full-featured word processor-like interface. You can add, delete, move, resize, or format text, images, graphics, and other elements. You can also insert headers, footers, page numbers, bookmarks, hyperlinks, comments, and annotations.</li>
|
9 |
-
<li><b>Convert PDFs to and from other formats.</b> You can convert PDF files to Word, Excel, PowerPoint, HTML, ePub, and other formats with high quality and accuracy. You can also convert other file types to PDF with a simple drag-and-drop. You can even combine multiple files into one PDF or split a PDF into separate files.</li>
|
10 |
-
<li><b>Sign and protect PDFs with advanced security.</b> You can sign your PDF documents electronically with digital signatures or stamps. You can also protect your PDF files with passwords, encryption, redaction, watermarks, and permissions. You can also manage the digital certificates and trusted identities for your PDF documents.</li>
|
11 |
-
<li><b>Collaborate and share PDFs with ease.</b> You can collaborate on PDF documents with other users in real time with Foxit PhantomPDF's cloud-based services. You can also share your PDF files via email, Dropbox, Google Drive, OneDrive, SharePoint, or Foxit Drive. You can also integrate Foxit PhantomPDF with Microsoft Teams, Outlook, Word, Excel, PowerPoint, and Visio.</li>
|
12 |
-
</ul>
|
13 |
-
<p>As you can see, Foxit PhantomPDF is more than just a PDF editor. It's a complete PDF solution that meets all your PDF needs. Whether you are a student, a professional, or a business owner, you can benefit from using Foxit PhantomPDF for your PDF projects.</p>
|
14 |
-
<p>So what are you waiting for? Download Foxit PhantomPDF today and enjoy a free trial for 14 days. You'll be amazed by how much you can do with your PDF files with Foxit PhantomPDF.</p>
|
15 |
-
```
|
16 |
-
|
17 |
-
```html
|
18 |
-
<p>If you are wondering how Foxit PhantomPDF compares to other PDF editors, you'll be glad to know that it has many advantages over its competitors. Here are some of the reasons why Foxit PhantomPDF is the best choice for your PDF needs:</p>
|
19 |
-
<ul>
|
20 |
-
<li><b>It's fast and reliable.</b> Foxit PhantomPDF is designed to be fast and responsive, even when working with large and complex PDF files. You won't experience any lag or crashes when using Foxit PhantomPDF. You can also trust that your PDF files will be processed and displayed correctly, without any errors or glitches.</li>
|
21 |
-
<li><b>It's affordable and flexible.</b> Foxit PhantomPDF offers a range of pricing plans and licensing options to suit your budget and needs. You can choose from a perpetual license, a subscription license, or a volume license. You can also choose from different editions, such as Standard, Business, or Education. You can also enjoy free updates and technical support for your Foxit PhantomPDF software.</li>
|
22 |
-
<li><b>It's compatible and compliant.</b> Foxit PhantomPDF works seamlessly with any PDF file, regardless of its origin or format. You can also create PDF files that comply with various standards and regulations, such as ISO 32000-1, ISO 19005-1, ISO 14289-1, WCAG 2.0, PDF/A, PDF/E, PDF/X, and more. You can also validate the compliance of your PDF files with Foxit PhantomPDF's built-in tools.</li>
|
23 |
-
</ul>
|
24 |
-
<p>With Foxit PhantomPDF, you can enjoy a smooth and satisfying PDF experience. You can create, edit, convert, sign, protect, collaborate, and share PDF files with ease and confidence. You can also customize your Foxit PhantomPDF software to fit your preferences and needs.</p>
|
25 |
-
<p>Foxit PhantomPDF is the ultimate PDF editor for Windows and Mac. Don't settle for less when you can have the best. Download Foxit PhantomPDF today and discover the difference.</p>
|
26 |
-
<p></p>
|
27 |
-
```</p> ddb901b051<br />
|
28 |
-
<br />
|
29 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/ACTIA Multidiag Dvd Rip.rar NEW.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>ACTIA Multidiag dvd rip.rar</h2><br /><p><b><b>DOWNLOAD</b> ✯✯✯ <a href="https://imgfil.com/2uxYzO">https://imgfil.com/2uxYzO</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
ACTIA Multidiag Dvd Rip.rar > http://tinyurl.com/qyrmav7. ACTIA Multidiag Dvd Rip.rar 3d Desktop Vdock Exodo Theme Rar Download Torrent 4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Assassins Creed Unity Gold Edition V.1.5.0 - MAXAGENT 31.md
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Assassin's Creed: Unity Gold Edition V.1.5.0 - MAXAGENT 31 Review</h1>
|
3 |
-
<p>Assassin's Creed: Unity is a 2014 action-adventure game developed by Ubisoft Montreal and published by Ubisoft. It is the eighth major installment in the Assassin's Creed series, and the successor to 2013's Assassin's Creed IV: Black Flag. It is set in Paris during the French Revolution, and follows the story of Arno Dorian, a young assassin who becomes involved in a conflict between the Assassins and the Templars.</p>
|
4 |
-
<p>The game features a new engine, AnvilNext 2.0, which allows for improved graphics, animations, and gameplay. The game also introduces a new cooperative multiplayer mode, where up to four players can team up to complete missions and explore the open world of Paris. The game received mixed reviews from critics, who praised the setting, visuals, and combat, but criticized the technical issues, story, and lack of innovation.</p>
|
5 |
-
<h2>Assassin's Creed: Unity Gold Edition V.1.5.0 - MAXAGENT 31</h2><br /><p><b><b>Download</b> ✺ <a href="https://imgfil.com/2uxZdA">https://imgfil.com/2uxZdA</a></b></p><br /><br />
|
6 |
-
<p>Assassin's Creed: Unity Gold Edition V.1.5.0 - MAXAGENT 31 is a repack version of the game that includes all the DLCs and updates released by Ubisoft. It also features a crack by MAXAGENT 31, a group of hackers who claim to have bypassed the game's DRM protection. The repack version is smaller in size than the original game, and claims to have faster installation and better performance.</p>
|
7 |
-
<p>However, some users have reported that the repack version still suffers from bugs, glitches, and crashes. Some have also complained that the crack by MAXAGENT 31 is not reliable, and that it may contain malware or viruses. Therefore, it is advised to download the repack version at your own risk, and to scan it with an antivirus program before installing it.</p>
|
8 |
-
|
9 |
-
<p>If you want to play Assassin's Creed: Unity Gold Edition V.1.5.0 - MAXAGENT 31, you will need a PC that meets the following minimum requirements:</p>
|
10 |
-
<ul>
|
11 |
-
<li>OS: Windows 7 SP1, Windows 8/8.1 (64-bit operating system required)</li>
|
12 |
-
<li>Processor: Intel Core i5-2500K @ 3.3 GHz or AMD FX-8350 @ 4.0 GHz</li>
|
13 |
-
<li>Memory: 6 GB RAM</li>
|
14 |
-
<li>Graphics: NVIDIA GeForce GTX 680 or AMD Radeon HD 7970 (2 GB VRAM)</li>
|
15 |
-
<li>Storage: 50 GB available space</li>
|
16 |
-
<li>Sound Card: DirectX 9.0c compatible sound card with latest drivers</li>
|
17 |
-
</ul>
|
18 |
-
<p>You can download Assassin's Creed: Unity Gold Edition V.1.5.0 - MAXAGENT 31 from various torrent sites, such as The Pirate Bay, Kickass Torrents, or RARBG. However, be aware that downloading and playing pirated games is illegal and may result in legal consequences. You may also face ethical issues, as you are depriving the developers and publishers of their rightful income. Therefore, it is recommended to buy the original game from official sources, such as Steam, Uplay, or Epic Games Store.</p>
|
19 |
-
|
20 |
-
<p>Assassin's Creed: Unity Gold Edition V.1.5.0 - MAXAGENT 31 offers you the opportunity to experience the French Revolution in a stunning and immersive way. You can explore the city of Paris, from the Bastille to the Notre Dame, and witness the historical events that shaped the modern world. You can also customize your own assassin, choosing from a variety of weapons, outfits, and skills. You can even join forces with other players online, and take on challenging missions together.</p>
|
21 |
-
<p>However, Assassin's Creed: Unity Gold Edition V.1.5.0 - MAXAGENT 31 is not without its flaws. The game still has many technical problems, such as low frame rates, pop-in textures, and clipping issues. The game also has a weak story, with bland characters and clichéd plot twists. The game also lacks innovation, as it does not introduce any new features or mechanics that distinguish it from previous entries in the series.</p>
|
22 |
-
<p>Therefore, Assassin's Creed: Unity Gold Edition V.1.5.0 - MAXAGENT 31 is a game that may appeal to fans of the franchise, but may disappoint others who are looking for a fresh and polished experience. The game is available for download from various torrent sites, but it is illegal and risky to do so. It is better to purchase the original game from official sources, and support the developers and publishers who worked hard to create it.</p>
|
23 |
-
<p></p> d5da3c52bf<br />
|
24 |
-
<br />
|
25 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Axara 2D To 3D Video Converter 2.4.3.243- Keygen And Crack.rar.md
DELETED
@@ -1,60 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Axara 2D To 3D Video Converter 2.4.3.243- Keygen And Crack.rar: A Powerful Software for Creating 3D Videos</h1>
|
3 |
-
|
4 |
-
<p>If you are looking for a software that can convert any 2D video into 3D stereoscopic video with special effects, then you should try Axara 2D To 3D Video Converter 2.4.3.243- Keygen And Crack.rar. This software is a shareware that can be downloaded from various websites, such as AfterDawn, Selsoft, and OpenSea. It supports all video formats, such as MP4, AVI, VOB, DVD, WMV, and MKV. It can also download videos from YouTube and convert them to 3D.</p>
|
5 |
-
<h2>Axara 2D To 3D Video Converter 2.4.3.243- Keygen And Crack.rar</h2><br /><p><b><b>Download</b> ——— <a href="https://imgfil.com/2uxXWP">https://imgfil.com/2uxXWP</a></b></p><br /><br />
|
6 |
-
|
7 |
-
<p>In this article, we will show you how to use Axara 2D To 3D Video Converter 2.4.3.243- Keygen And Crack.rar to create stunning 3D videos from your 2D sources. We will also explain the features, benefits, and tips of this software.</p>
|
8 |
-
|
9 |
-
<h2>Axara 2D To 3D Video Converter 2.4.3.243- Keygen And Crack.rar: How to Use It</h2>
|
10 |
-
|
11 |
-
<p>To use Axara 2D To 3D Video Converter 2.4.3.243- Keygen And Crack.rar, you need to follow these steps:</p>
|
12 |
-
|
13 |
-
<ol>
|
14 |
-
<li>Download the software from one of the websites mentioned above.</li>
|
15 |
-
<li>Extract the rar file using a program such as WinRAR or 7-Zip.</li>
|
16 |
-
<li>Run the keygen.exe file and generate a serial number.</li>
|
17 |
-
<li>Run the setup.exe file and install the software using the serial number.</li>
|
18 |
-
<li>Launch the software and select the video file you want to convert to 3D.</li>
|
19 |
-
<li>Choose the output format and the destination folder.</li>
|
20 |
-
<li>Adjust the settings according to your preferences, such as the depth, the angle, and the effect of the 3D video.</li>
|
21 |
-
<li>Click on the Convert button and wait for the process to finish.</li>
|
22 |
-
<li>Enjoy your 3D video on your PC or on your compatible device.</li>
|
23 |
-
</ol>
|
24 |
-
|
25 |
-
<h2>Axara 2D To 3D Video Converter 2.4.3.243- Keygen And Crack.rar: Features and Benefits</h2>
|
26 |
-
|
27 |
-
<p>Axara 2D To 3D Video Converter 2.4.3.243- Keygen And Crack.rar has many features and benefits that make it a powerful software for creating 3D videos. Some of them are:</p>
|
28 |
-
|
29 |
-
<ul>
|
30 |
-
<li>It can convert any 2D video into 3D stereoscopic video with special effects.</li>
|
31 |
-
<li>It supports all video formats, such as MP4, AVI, VOB, DVD, WMV, and MKV.</li>
|
32 |
-
<li>It can also download videos from YouTube and convert them to 3D.</li>
|
33 |
-
<li>It can extract audio from video files and save them as MP3, WAV, or WMA files.</li>
|
34 |
-
<li>It has a user-friendly interface that is easy to use and navigate.</li>
|
35 |
-
<li>It has a fast conversion speed and a high quality output.</li>
|
36 |
-
<li>It has a preview function that allows you to see the result before converting.</li>
|
37 |
-
<li>It has a batch mode that allows you to convert multiple files at once.</li>
|
38 |
-
</ul>
|
39 |
-
|
40 |
-
<h2>Axara 2D To 3D Video Converter 2.4.3.243- Keygen And Crack.rar: Tips and Tricks</h2>
|
41 |
-
|
42 |
-
<p>To get the most out of Axara 2D To 3D Video Converter 2.4.3.243- Keygen And Crack.rar, you should follow these tips and tricks:</p>
|
43 |
-
|
44 |
-
<ul>
|
45 |
-
<li>Make sure you have enough disk space and memory for the conversion process.</li>
|
46 |
-
<li>Choose the output format that is compatible with your device or player.</li>
|
47 |
-
<li>Adjust the settings according to your preferences, but do not overdo it or else you might lose quality or realism.</li>
|
48 |
-
<li>Use a good quality source file for better results.</li>
|
49 |
-
<li>Do not use illegal or cracked versions of the software as they might contain viruses or malware.</li>
|
50 |
-
</ul>
|
51 |
-
|
52 |
-
<h2>Axara 2D To 3D Video Converter 2.4.3.243- Keygen And Crack.rar: Conclusion</h2>
|
53 |
-
|
54 |
-
<p>Axara 2D To 3D Video Converter 2.4.3.243- Keygen And Crack.rar is a software that can convert any 2D video into 3D stereoscopic video with special effects. It supports all video formats, such as MP4, AVI, VOB, DVD, WMV, and MKV. It can also download videos from YouTube and convert them to 3D. It has many features and benefits that make it a powerful software for creating 3D videos. It also has some tips and tricks that can help you get the most out of it.</p>
|
55 |
-
<p></p>
|
56 |
-
|
57 |
-
<p>If you are interested in creating stunning 3D videos from your 2D sources, then you should try Axara 2D To 3D Video Converter</p>
|
58 |
-
<h2>Axara 2D To 3D Video Converter 2.4.3.243- Keygen And Crack.rar: Conclusion</h2>. There is no need to write another conclusion. If you want to write another article for a different keyword, please let me know.</p> 3cee63e6c2<br />
|
59 |
-
<br />
|
60 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Baixar O Jogo Do Ronald Mcdonald O Resgate Dos Bichos.md
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
<h2>baixar o jogo do ronald mcdonald o resgate dos bichos</h2><br /><p><b><b>Download File</b> ★★★★★ <a href="https://imgfil.com/2uxXOn">https://imgfil.com/2uxXOn</a></b></p><br /><br />
|
2 |
-
|
3 |
-
BeachBum - o veículo vai funcionar
|
4 |
-
|
5 |
-
Alguns pontos
|
6 |
-
|
7 |
-
1 - Esta implementação é baseada na noticia do Designer do Webbrowser, que afirmou ter criado um navegador html/js para mimular o Safari.
|
8 |
-
|
9 |
-
2 - No final deste trecho de código, você pode ver um exemplo do conteúdo na tela do Inspector de elementos
|
10 |
-
|
11 |
-
3 - Quando o player irá rolar, todos os elementos são ajustados.
|
12 |
-
|
13 |
-
4 - Se não tiver captura de tela com navegador de outro provedor, você vai ter que esperar um tempo para ter sucesso em capturar a tela do Safari.
|
14 |
-
|
15 |
-
5 - O seguinte exemplo funciona bem em Chrome e Opera, mas tem um bug oculto no Internet Explorer. O empurra no canto inferior esquerdo do jogo, então você tem que fazer scroll para o lado esquerdo e fechar o navegador.
|
16 |
-
|
17 |
-
6 - Isso causou diversos problemas em outros browsers e corrigido isso, apesar de ter um pequeno bug.
|
18 |
-
|
19 |
-
7 - Sendo assim, espero que você goste e compartilhe o jogo com os seus amigos.
|
20 |
-
|
21 |
-
8 - Este exemplo demonstra uma outra opção de implementação, que será a única opção que eu farei mesmo que teste em alguns navegadores. Você terá que segurar o botão "Navegador em execução" e clicar no efeito de paginação.
|
22 |
-
|
23 |
-
9 - Se a tela estiver em Full screen, você terá que clicar no botão "Abrir menú" e a tela irá adicionar um nome ao menú, e você poderá mudar isso adicionando uma tag no seu site (clique aqui para saber mais).
|
24 |
-
|
25 |
-
10 - Você pode alterar qualquer coisa ao clicar no botão em negrito 4fefd39f24<br />
|
26 |
-
<br />
|
27 |
-
<br />
|
28 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Costruzione Di Macchine Mcgraw-hill Pdf Download [EXCLUSIVE].md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>costruzione di macchine mcgraw-hill pdf download</h2><br /><p><b><b>Download</b> ››› <a href="https://imgfil.com/2uxYtv">https://imgfil.com/2uxYtv</a></b></p><br /><br />
|
2 |
-
|
3 |
-
riassunto di m lazzari informatica umanistica mcgraw hill ... [Libri gratis] Fondamenti di costruzione di macchine New Orleans Saints (-1) at Atlanta Falcons ... 1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/Maxim-Korea-October-2012.md
DELETED
@@ -1,68 +0,0 @@
|
|
1 |
-
## Maxim Korea - October 2012
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
**Click Here - [https://kneedacexbrew.blogspot.com/?d=2txjpK](https://kneedacexbrew.blogspot.com/?d=2txjpK)**
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
Here is a possible title and article with SEO optimization and HTML formatting for the keyword "Maxim Korea - October 2012":
|
24 |
-
|
25 |
-
# Maxim Korea - October 2012: The Hottest Issue of the Year
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
If you are looking for some eye candy and entertainment, you don't want to miss the October 2012 issue of Maxim Korea. This magazine features some of the most beautiful and talented women in Korea, as well as exclusive interviews, fashion tips, lifestyle advice, and more. Here are some of the highlights of this sizzling issue:
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
- **Cover Girl: Lee Hyori**. The queen of K-pop graces the cover of Maxim Korea with her stunning looks and charisma. She talks about her latest album, her love life, and her secrets to staying fit and fabulous.
|
34 |
-
|
35 |
-
- **Feature: The Maxim Hot 100**. Who are the hottest women in Korea right now? Maxim Korea reveals its annual list of the most gorgeous and influential celebrities, models, singers, actresses, and athletes. Find out who made the cut and who got the coveted number one spot.
|
36 |
-
|
37 |
-
- **Special: Halloween Party**. Get ready for some spooky fun with Maxim Korea's guide to the best Halloween parties in Seoul. Whether you want to dress up, dance, or drink, we have the perfect place for you to celebrate this festive occasion.
|
38 |
-
|
39 |
-
- **Fashion: Fall Trends**. As the weather gets cooler, it's time to update your wardrobe with some stylish and cozy outfits. Maxim Korea shows you how to rock the latest fall trends, from leather jackets to knit sweaters, with some help from our gorgeous models.
|
40 |
-
|
41 |
-
- **Lifestyle: Travel Tips**. If you are planning a trip abroad, you need to check out Maxim Korea's travel tips. We give you the best recommendations for where to go, what to do, and what to pack for your next adventure.
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
And that's not all. Maxim Korea also has plenty of other content to keep you entertained and informed, such as sports news, movie reviews, gadget reviews, jokes, quizzes, and more. Don't miss this hot issue of Maxim Korea - October 2012. Get your copy today!
|
46 |
-
|
47 |
-
Here is a possible continuation of the article:
|
48 |
-
|
49 |
-
But wait, there's more. Maxim Korea also has some exclusive content that you can only access online. Here are some of the perks of being a Maxim Korea online subscriber:
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
- **Behind-the-scenes videos**. Watch how our cover girl and models pose for the camera and have fun on the set. You'll get to see their personalities and charm in action.
|
54 |
-
|
55 |
-
- **Interactive features**. Participate in polls, surveys, and contests to share your opinions and win prizes. You can also chat with other Maxim Korea fans and get tips from our experts.
|
56 |
-
|
57 |
-
- **Bonus content**. Enjoy more photos, articles, and videos that are not available in the print edition. You'll get to see more of your favorite Maxim Korea stars and topics.
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
So what are you waiting for? Subscribe to Maxim Korea online today and get access to all these amazing features and more. You'll never miss a thing from Maxim Korea - October 2012.
|
62 |
-
|
63 |
-
dfd1c89656
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dynamons World MOD APK and Unlock All Levels and Features.md
DELETED
@@ -1,109 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download and Install Dynamons World Mod APK on Android</h1>
|
3 |
-
<p>If you are looking for a fun and addictive RPG game that lets you catch and train your own team of monsters, then you should try <strong>Dynamons World</strong>. This game is loved by millions of RPG players who enjoy exploring an open world, fighting challenging battles, and collecting rare and powerful creatures. But what if you want to enjoy the game without any limitations or restrictions? That's where <strong>Dynamons World Mod APK</strong> comes in. This is a modified version of the game that gives you unlimited money to buy anything you want in the game. In this article, we will show you how to download and install Dynamons World Mod APK on your Android device and enjoy the game to the fullest.</p>
|
4 |
-
<h2>What is Dynamons World?</h2>
|
5 |
-
<h3>A fun and addictive RPG game</h3>
|
6 |
-
<p>Dynamons World is an RPG game that is inspired by popular games like Pokemon and Digimon. You play as a Dynamon master who travels across the Dynamon Kingdom, catching and training different types of Dynamons. Dynamons are cute and powerful creatures that have different elemental abilities, such as fire, water, electricity, and dark. You can use them to fight other Dynamon masters, Captains, and even evil forces that threaten the kingdom.</p>
|
7 |
-
<h2>download apk dynamons world mod</h2><br /><p><b><b>Download</b> ✔ <a href="https://urlin.us/2uSVGh">https://urlin.us/2uSVGh</a></b></p><br /><br />
|
8 |
-
<h3>Features of Dynamons World</h3>
|
9 |
-
<p>Some of the features that make Dynamons World an amazing game are:</p>
|
10 |
-
<ul>
|
11 |
-
<li><strong>Online Battle Arena</strong>: You can challenge your friends and players worldwide in online PvP multiplayer battles. You can show off your skills and strategy and climb the leaderboards.</li>
|
12 |
-
<li><strong>Catch and train dozens of unique Dynamons</strong>: You can explore an open world searching for the rarest and strongest monsters. You can catch them using special balls and train them to level up their skills and stats.</li>
|
13 |
-
<li><strong>Unleash powerful skills and brilliant tactics</strong>: You can use skill cards to activate special moves and abilities for your Dynamons. You can also combine different types of Dynamons to create synergies and advantages in battle.</li>
|
14 |
-
<li><strong>Travel all the way from Dynamons Camp to the Temple Ruins</strong>: You can follow an addictive and immersive RPG story that takes you through various locations, quests, and battles. You can meet new characters, allies, and enemies along the way.</li>
|
15 |
-
</ul>
|
16 |
-
<h2>What is Dynamons World Mod APK?</h2>
|
17 |
-
<h3>A modified version of the game with unlimited money</h3>
|
18 |
-
<p>Dynamons World Mod APK is a modified version of the original game that gives you unlimited money to spend in the game. Money is used to buy items, upgrades, balls, skill cards, and more. With unlimited money, you can buy anything you want without worrying about running out of resources. You can also unlock all the Dynamons in the game without having to catch them.</p>
|
19 |
-
<h3>Benefits of using Dynamons World Mod APK</h3>
|
20 |
-
<p>Some of the benefits of using Dynamons World Mod APK are:</p>
|
21 |
-
<p>download apk dynamons world mod unlimited money<br />
|
22 |
-
download apk dynamons world mod latest version<br />
|
23 |
-
download apk dynamons world mod free shopping<br />
|
24 |
-
download apk dynamons world mod android<br />
|
25 |
-
download apk dynamons world mod offline<br />
|
26 |
-
download apk dynamons world mod 1.8.14<br />
|
27 |
-
download apk dynamons world mod no root<br />
|
28 |
-
download apk dynamons world mod for pc<br />
|
29 |
-
download apk dynamons world mod hack<br />
|
30 |
-
download apk dynamons world mod mega<br />
|
31 |
-
download apk dynamons world mod revdl<br />
|
32 |
-
download apk dynamons world mod rexdl<br />
|
33 |
-
download apk dynamons world mod apkpure<br />
|
34 |
-
download apk dynamons world mod happymod<br />
|
35 |
-
download apk dynamons world mod an1<br />
|
36 |
-
download apk dynamons world mod 2023<br />
|
37 |
-
download apk dynamons world mod new update<br />
|
38 |
-
download apk dynamons world mod unlimited gems<br />
|
39 |
-
download apk dynamons world mod unlocked all<br />
|
40 |
-
download apk dynamons world mod ad free<br />
|
41 |
-
download apk dynamons world mod cheat<br />
|
42 |
-
download apk dynamons world mod full version<br />
|
43 |
-
download apk dynamons world mod high damage<br />
|
44 |
-
download apk dynamons world mod vip<br />
|
45 |
-
download apk dynamons world mod god mode<br />
|
46 |
-
download apk dynamons world mod easy win<br />
|
47 |
-
download apk dynamons world mod premium<br />
|
48 |
-
download apk dynamons world mod pro<br />
|
49 |
-
download apk dynamons world mod original<br />
|
50 |
-
download apk dynamons world mod best rpg game<br />
|
51 |
-
download apk dynamons world mod by azerion casual<br />
|
52 |
-
download apk dynamons world mod from google play store<br />
|
53 |
-
download apk dynamons world mod without verification<br />
|
54 |
-
download apk dynamons world mod without survey<br />
|
55 |
-
download apk dynamons world mod without ads<br />
|
56 |
-
download apk dynamons world mod with obb data file<br />
|
57 |
-
download apk dynamons world mod with unlimited coins and diamonds <br />
|
58 |
-
download apk dynamons world mod with all features unlocked <br />
|
59 |
-
download apk dynamons world mod with fast and secure link <br />
|
60 |
-
download apk dynamons world mod with direct link</p>
|
61 |
-
<ul>
|
62 |
-
<li><strong>You can enjoy the game without any limitations or restrictions</strong>: You can play the game as much as you want without having to wait for energy or coins. You can also access all the features and content in the game without having to complete certain levels or tasks.</li>
|
63 |
-
<li><strong>You can have more fun and excitement in the game</strong>: You can experiment with different combinations of Dynamons, skills, and strategies. You can have more fun and excitement in the game: You can experiment with different combinations of Dynamons, skills, and strategies. You can also challenge yourself with harder opponents and quests. You can enjoy the game without any frustration or boredom.</li>
|
64 |
-
<li><strong>You can save your time and effort in the game</strong>: You don't have to spend hours grinding for money or catching Dynamons. You can get everything you need in a matter of seconds. You can also skip the ads and pop-ups that interrupt your gameplay.</li>
|
65 |
-
</ul>
|
66 |
-
<h2>How to Download Dynamons World Mod APK?</h2>
|
67 |
-
<h3>Find a reputable source for the APK file</h3>
|
68 |
-
<p>The first step to download Dynamons World Mod APK is to find a reliable and trustworthy source for the APK file. There are many websites that offer APK files for various games and apps, but not all of them are safe and secure. Some of them may contain viruses, malware, or spyware that can harm your device or steal your personal information. Therefore, you should do some research before downloading any APK file from the internet. You should look for reviews, ratings, feedback, and comments from other users who have downloaded the same file. You should also check the file size, version, and compatibility with your device.</p>
|
69 |
-
<h3>Allow unknown apps on your Android device</h3>
|
70 |
-
<p>The next step to download Dynamons World Mod APK is to allow unknown apps on your Android device. Unknown apps are apps that are not downloaded from the official Google Play Store. By default, Android devices do not allow unknown apps to be installed for security reasons. However, you can change this setting by following these steps:</p>
|
71 |
-
<ol>
|
72 |
-
<li>Go to your device's <strong>Settings</strong> and tap on <strong>Security</strong>.</li>
|
73 |
-
<li>Scroll down and find the option <strong>Unknown Sources</strong> or <strong>Install Unknown Apps</strong>.</li>
|
74 |
-
<li>Toggle the switch to enable it or tap on it and select <strong>Allow</strong>.</li>
|
75 |
-
<li>A warning message will appear. Read it carefully and tap on <strong>OK</strong>.</li>
|
76 |
-
</ol>
|
77 |
-
<p>Now you have enabled unknown apps on your device and you can proceed to download and install Dynamons World Mod APK.</p>
|
78 |
-
<h3>Download and install the APK file using a file manager app</h3>
|
79 |
-
<p>The final step to download Dynamons World Mod APK is to download and install the APK file using a file manager app. A file manager app is an app that lets you access and manage the files and folders on your device. You can use any file manager app that you prefer, such as ES File Explorer, Astro File Manager, or Solid Explorer. Here are the steps to follow:</p>
|
80 |
-
<ol>
|
81 |
-
<li>Open your browser and go to the website where you found the APK file for Dynamons World Mod APK.</li>
|
82 |
-
<li>Tap on the <strong>Download</strong> button and wait for the download to complete.</li>
|
83 |
-
<li>Once the download is finished, open your file manager app and locate the downloaded APK file in your <strong>Downloads</strong> folder.</li>
|
84 |
-
<li>Tap on the APK file and a pop-up will appear. Tap on <strong>Install</strong>.</li>
|
85 |
-
<li>The installation process will begin. Wait for it to finish.</li>
|
86 |
-
<li>Once the installation is done, you can tap on <strong>Open</strong> to launch the game or find it in your app drawer.</li>
|
87 |
-
</ol>
|
88 |
-
<h2>How to Play Dynamons World Mod APK?</h2>
|
89 |
-
<h3>Explore the open world and catch rare Dynamons</h3>
|
90 |
-
<p>Dynamons World Mod APK lets you explore a vast open world full of secrets, surprises, and adventures. You can travel across different regions, such as forests, deserts, mountains, islands, and cities. You can encounter various types of Dynamons in different habitats and environments. You can catch them using special balls that match their element. You can also find hidden items, chests, coins, and skill cards along the way.</p>
|
91 |
-
<h3>Battle other players online in PvP mode</h3>
|
92 |
-
<p>Dynamons World Mod APK also lets you battle other players online in PvP mode. You can join an online battle arena where you can challenge your friends or random players from around the world. You can show off your skills and strategy by using your best team of Dynamons. You can also chat with other players, send emojis, and make friends. You can earn rewards, trophies, and badges by winning battles and climbing the leaderboards.</p>
|
93 |
-
<h3>Use skill cards and strategy to defeat tough Captains</h3>
|
94 |
-
<p>Dynamons World Mod APK also lets you use skill cards and strategy to defeat tough Captains. Captains are powerful Dynamon masters who guard each region of the kingdom. They have their own team of Captains are powerful Dynamon masters who guard each region of the kingdom. They have their own team of strong and rare Dynamons that can pose a challenge to any player. You can challenge them to a battle and try to defeat them using your skill cards and strategy. Skill cards are special cards that activate different moves and abilities for your Dynamons. You can collect, upgrade, and equip them to your Dynamons to make them stronger and more versatile. You can also use strategy by choosing the right Dynamons, elements, and skills for each battle. You can earn rewards, badges, and fame by defeating Captains and advancing to the next region.</p>
|
95 |
-
<h2>Conclusion</h2>
|
96 |
-
<p>Dynamons World is a fun and addictive RPG game that lets you catch and train your own team of monsters. You can explore an open world, fight challenging battles, and collect rare and powerful creatures. You can also enjoy the game without any limitations or restrictions by using Dynamons World Mod APK. This is a modified version of the game that gives you unlimited money to buy anything you want in the game. You can also unlock all the Dynamons in the game without having to catch them. To download and install Dynamons World Mod APK on your Android device, you need to find a reputable source for the APK file, allow unknown apps on your device, and use a file manager app to download and install the APK file. Then, you can play the game and have fun with your Dynamons.</p>
|
97 |
-
<h2>FAQs</h2>
|
98 |
-
<h3>Is Dynamons World Mod APK safe to use?</h3>
|
99 |
-
<p>Yes, Dynamons World Mod APK is safe to use as long as you download it from a reliable and trustworthy source. You should also scan the APK file with an antivirus app before installing it on your device.</p>
|
100 |
-
<h3>Do I need to root my device to use Dynamons World Mod APK?</h3>
|
101 |
-
<p>No, you do not need to root your device to use Dynamons World Mod APK. You just need to enable unknown apps on your device and install the APK file as explained above.</p>
|
102 |
-
<h3>Can I play Dynamons World Mod APK offline?</h3>
|
103 |
-
<p>Yes, you can play Dynamons World Mod APK offline without an internet connection. However, some features of the game, such as online PvP battles, may not be available offline.</p>
|
104 |
-
<h3>Can I update Dynamons World Mod APK?</h3>
|
105 |
-
<p>Yes, you can update Dynamons World Mod APK whenever there is a new version available. However, you may need to uninstall the previous version and install the new one manually. You should also backup your game data before updating to avoid losing your progress.</p>
|
106 |
-
<h3>Can I use Dynamons World Mod APK with other mods or cheats?</h3>
|
107 |
-
<p>No, you should not use Dynamons World Mod APK with other mods or cheats as they may cause conflicts or errors in the game. You should only use Dynamons World Mod APK as it is without any modifications or alterations.</p> 197e85843d<br />
|
108 |
-
<br />
|
109 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Kodu and Unleash Your Creativity with Game Design.md
DELETED
@@ -1,157 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Kodu: A Guide for Kids and Parents</h1>
|
3 |
-
<p>Kodu is a 3D game development environment that is designed to teach kids basic programming principles. Kodu allows creators to build the world's terrain, populate it with characters and props, and then program their behaviors and games rules in a bespoke visual programming language.</p>
|
4 |
-
<h2>download kodu</h2><br /><p><b><b>Download</b> ► <a href="https://jinyurl.com/2uNRZD">https://jinyurl.com/2uNRZD</a></b></p><br /><br />
|
5 |
-
<p>Kodu is a great tool for kids who want to create their own games without writing any code. It is fun, easy, and educational. Kids can use their imagination and creativity to make games that they can play and share with others. In this article, we will show you how to download, install, and use Kodu to make your own games.</p>
|
6 |
-
<h2>How to Download Kodu</h2>
|
7 |
-
<p>There are two ways to download Kodu: from the Microsoft Store or from the Kodu website. Both methods are free and safe.</p>
|
8 |
-
<h3>Download from Microsoft Store</h3>
|
9 |
-
<p>The Microsoft Store is an online platform where you can download apps and games for Windows PCs. You can find Kodu in the Microsoft Store by following these steps:</p>
|
10 |
-
<ol>
|
11 |
-
<li>Open the Microsoft Store app on your PC. You can find it in the Start menu or by typing "Microsoft Store" in the search bar.</li>
|
12 |
-
<li>In the search box at the top right corner, type "Kodu" and press Enter.</li>
|
13 |
-
<li>Click on the "Kodu_Game_Lab" app from the search results.</li>
|
14 |
-
<li>Click on the "Get" button to download and install Kodu on your PC.</li>
|
15 |
-
</ol>
|
16 |
-
<p>The Microsoft Store will automatically update Kodu whenever a new version is released.</p>
|
17 |
-
<h3>Download from Kodu Website</h3>
|
18 |
-
<p>The Kodu website is another source where you can download Kodu for your PC. You can visit the website by following this link: <a href="(^3^)">http://www.kodugamelab.com/downloads/</a></p>
|
19 |
-
<p>On the website, you will see two options for downloading Kodu: Desktop Build or Microsoft Store Build. The Desktop Build is useful when you want to install Kodu offline or on multiple PCs. The Microsoft Store Build is similar to the one we described above.</p>
|
20 |
-
<p>How to download kodu game lab on Windows 10<br />
|
21 |
-
Download kodu for free and create your own games<br />
|
22 |
-
Kodu game lab download for PC - Microsoft Store<br />
|
23 |
-
Learn programming with kodu game lab - download now<br />
|
24 |
-
Download kodu game lab and join the world-wide community<br />
|
25 |
-
Kodu game lab tutorial - how to download and install<br />
|
26 |
-
Download kodu game lab and explore featured worlds<br />
|
27 |
-
Kodu game lab system requirements - can you download it?<br />
|
28 |
-
Download kodu game lab and access classroom resources<br />
|
29 |
-
Kodu game lab review - is it worth downloading?<br />
|
30 |
-
Download kodu game lab and make 3D games with visual programming<br />
|
31 |
-
Kodu game lab vs Scratch - which one to download?<br />
|
32 |
-
Download kodu game lab and share your games online<br />
|
33 |
-
Kodu game lab tips and tricks - how to get started after downloading<br />
|
34 |
-
Download kodu game lab and use freeform terrain editing<br />
|
35 |
-
Kodu game lab FAQ - everything you need to know before downloading<br />
|
36 |
-
Download kodu game lab and customize your characters and props<br />
|
37 |
-
Kodu game lab examples - what can you make after downloading?<br />
|
38 |
-
Download kodu game lab and enjoy flexible lighting and sound effects<br />
|
39 |
-
Kodu game lab license - what are the terms of use for downloading?<br />
|
40 |
-
Download kodu game lab and watch videos to learn more<br />
|
41 |
-
Kodu game lab support - how to get help after downloading<br />
|
42 |
-
Download kodu game lab and play games from other users<br />
|
43 |
-
Kodu game lab update - what's new in the latest version to download?<br />
|
44 |
-
Download kodu game lab and use touch, keyboard, mouse, or controller<br />
|
45 |
-
Kodu game lab history - how did it start and evolve since downloading?<br />
|
46 |
-
Download kodu game lab and teach creativity, problem solving, and storytelling<br />
|
47 |
-
Kodu game lab awards - what recognition has it received since downloading?<br />
|
48 |
-
Download kodu game lab and use the tile-based programming language<br />
|
49 |
-
Kodu game lab feedback - how to share your opinion after downloading?<br />
|
50 |
-
Download kodu game lab and challenge yourself with different games rules<br />
|
51 |
-
Kodu game lab bugs - how to report and fix them after downloading?<br />
|
52 |
-
Download kodu game lab and experiment with different genres and styles<br />
|
53 |
-
Kodu game lab features - what makes it unique and fun to download?<br />
|
54 |
-
Download kodu game lab and inspire your kids to learn coding<br />
|
55 |
-
Kodu game lab alternatives - what other programs can you download?<br />
|
56 |
-
Download kodu game lab and collaborate with other creators<br />
|
57 |
-
Kodu game lab forum - how to join the discussion after downloading?<br />
|
58 |
-
Download kodu game lab and discover new worlds every day<br />
|
59 |
-
Kodu game lab newsletter - how to subscribe and stay updated after downloading?<br />
|
60 |
-
Download kodu game lab and unleash your imagination<br />
|
61 |
-
Kodu game lab ratings - how do users rate it after downloading?<br />
|
62 |
-
Download kodu game lab and follow the official blog<br />
|
63 |
-
Kodu game lab cheats - how to hack the program after downloading?<br />
|
64 |
-
Download kodu game lab and compare it with other 3D development tools<br />
|
65 |
-
Kodu game lab wiki - how to find more information after downloading?<br />
|
66 |
-
Download kodu game lab and enter contests and competitions<br />
|
67 |
-
Kodu game lab source code - how to access it after downloading?</p>
|
68 |
-
<p>To download the Desktop Build, follow these steps:</p>
|
69 |
-
<ol>
|
70 |
-
<li>Choose between the .EXE or .MSI file format. The .EXE file is for regular users who want to install Kodu easily. The .MSI file is for system administrators who want to install Kodu via SCCM.</li>
|
71 |
-
<li>Click on the "KoduSetup.EXE" or "KoduSetup.MSI" link to download the file.</li>
|
72 |
-
<li>Save the file on your PC and run it to install Kodu.</li>
|
73 |
-
</ol>
|
74 |
-
<h2>How to Install Kodi</h2>
|
75 |
-
<p>Once you have downloaded Kodi, you need to install it on your PC. The installation process may vary depending on how you downloaded Kodi.</p>
|
76 |
-
<h3>Install from Microsoft Store</h3>
|
77 |
-
<p>If you downloaded Kodi from the Microsoft Store, you don't need to do anything else. The Microsoft Store will automatically install Kodi on your PC after downloading it. You can find Kodi in your Start menu or by typing "Kodu" in the search bar.</p>
|
78 |
-
<h3>Install from .EXE or .MSI <h3>Install from .EXE or .MSI file</h3>
|
79 |
-
<p>If you downloaded Kodu from the Kodu website, you need to run the .EXE or .MSI file that you saved on your PC. The installation process is simple and straightforward. Just follow these steps:</p>
|
80 |
-
<ol>
|
81 |
-
<li>Double-click on the "KoduSetup.EXE" or "KoduSetup.MSI" file to launch the installer.</li>
|
82 |
-
<li>Accept the license agreement and choose the destination folder for Kodu.</li>
|
83 |
-
<li>Click on the "Install" button to start the installation.</li>
|
84 |
-
<li>Wait for the installation to finish and click on the "Finish" button.</li>
|
85 |
-
</ol>
|
86 |
-
<p>You can find Kodu in your Start menu or by typing "Kodu" in the search bar.</p>
|
87 |
-
<h2>How to Use Kodu</h2>
|
88 |
-
<p>Now that you have installed Kodu on your PC, you are ready to use it to create your own games. Kodu has a user-friendly interface that lets you design and program your games with ease. Here are some basic steps to get you started:</p>
|
89 |
-
<h3>Launch Kodu and create a new world</h3>
|
90 |
-
<p>To launch Kodu, click on the "Kodu_Game_Lab" icon on your desktop or in your Start menu. You will see the main menu of Kodu, where you can choose to create a new world, load an existing world, or browse the community worlds.</p>
|
91 |
-
<p>To create a new world, click on the "New World" button. You will see a blank world with a default terrain and sky. You can change the terrain and sky later using the terrain editor.</p>
|
92 |
-
<h3>Use the terrain editor to shape the world</h3>
|
93 |
-
<p>The terrain editor is a tool that lets you modify the shape, color, and texture of the ground in your world. You can access the terrain editor by pressing the "E" key on your keyboard or clicking on the "Edit Terrain" button on the toolbar.</p>
|
94 |
-
<p>The terrain editor has several options for changing the terrain, such as raising, lowering, flattening, smoothing, painting, and erasing. You can also choose from different brushes and materials to create different effects. For example, you can use the water brush to create lakes and rivers, or use the grass material to create green fields.</p>
|
95 |
-
<p>To use the terrain editor, select a brush and a material from the menus on the left side of the screen. Then, move your mouse over the terrain and click and drag to apply the brush. You can adjust the size and strength of the brush using the mouse wheel or the slider on the right side of the screen. You can also undo and redo your actions using the buttons on the toolbar.</p>
|
96 |
-
<h3>Add characters and props to the world</h3>
|
97 |
-
<p>Characters and props are objects that you can add to your world to make it more interesting and interactive. Characters are living creatures that can move and perform actions, such as robots, animals, and vehicles. Props are static objects that can be used for decoration or gameplay purposes, such as trees, rocks, coins, and switches.</p>
|
98 |
-
<p>To add characters and props to your world, press the "O" key on your keyboard or click on the "Object Tool" button on the toolbar. You will see a menu of different categories of objects, such as Landscapes, Machines, Nature, Paths, and Sensors. Click on a category to see its subcategories, and then click on an object to select it.</p>
|
99 |
-
<p>To place an object in your world, move your mouse over the terrain and click where you want to put it. You can adjust its position, rotation, and scale using the mouse or the arrow keys. You can also copy, delete, or lock an object using the buttons on the toolbar.</p>
|
100 |
-
<h3>Use <h3>Use the visual programming language to program the game logic</h3>
|
101 |
-
<p>The visual programming language is a tool that lets you program the behavior and interaction of the objects in your world. You can access the visual programming language by pressing the "P" key on your keyboard or clicking on the "Program Tool" button on the toolbar.</p>
|
102 |
-
<p>The visual programming language uses a simple and intuitive syntax that consists of three elements: when, do, and options. When is a condition that triggers an action, such as when the game starts, when the player presses a button, or when an object collides with another object. Do is an action that is performed when the condition is met, such as move, shoot, score, or say. Options are modifiers that change how the action is executed, such as direction, speed, color, or sound.</p>
|
103 |
-
<p>To use the visual programming language, select an object that you want to program and click on the "Add Rule" button on the toolbar. You will see a blank rule with a when and a do slot. Click on the slot to open a menu of different options for the condition or the action. Choose an option and drag it to the slot. You can also add more slots by clicking on the "+" button or delete slots by clicking on the "X" button.</p>
|
104 |
-
<p>You can create multiple rules for each object and combine different conditions and actions to create complex and interesting game logic. For example, you can program a robot to move forward when the player presses the spacebar, shoot a laser when it sees an enemy, and explode when it touches water.</p>
|
105 |
-
<h3>Test and play the game</h3>
|
106 |
-
<p>After you have created your world and programmed your game logic, you can test and play your game to see how it works. To test your game, press the "T" key on your keyboard or click on the "Test World" button on the toolbar. You will see your game in full screen mode and you can control your character using the mouse and keyboard.</p>
|
107 |
-
<p>To play your game, press the "Esc" key on your keyboard or click on the "Exit Test Mode" button on the toolbar. You will return to the main menu of Kodu, where you can choose to play your game, save your game, or load another game.</p>
|
108 |
-
<p>To save your game, click on the "Save World" button on the main menu. You will be asked to enter a name and a description for your game. You can also choose to add tags and ratings to your game. To load another game, click on the "Load World" button on the main menu. You will see a list of games that you have saved or downloaded from the community.</p>
|
109 |
-
<h2>Conclusion</h2>
|
110 |
-
<p>Kodu is a fun and easy way to create your own games without writing any code. You can download Kodu for free from the Microsoft Store or from the Kodu website. You can install Kodu on your PC and use it to design and program your games with simple tools and commands. You can test and play your games and share them with others online.</p>
|
111 |
-
<p>Here are some tips and tricks for using Kodu:</p>
|
112 |
-
<ul>
|
113 |
-
<li>Explore different categories and subcategories of objects to find new and interesting elements for your games.</li>
|
114 |
-
<li>Use different materials and brushes to create diverse and realistic terrains for your worlds.</li>
|
115 |
-
<li>Use different options and modifiers to customize and fine-tune your actions and behaviors.</li>
|
116 |
-
<li>Use sensors and timers to create dynamic and interactive events in your games.</li>
|
117 |
-
<li>Use variables and scores to keep track of data and states in your games.</li>
|
118 |
-
</ul>
|
119 |
-
<p>We hope you enjoyed this article and learned how to download Kodu. We encourage you to try Kodu yourself and create your own games. You can also browse the community worlds and see what other creators have made with Kodu. Have fun!</p>
|
120 |
-
<h2>FAQs</h2>
|
121 |
-
<h4>What are the system requirements for Kodu?</h4>
|
122 |
-
<p>Kodu requires a Windows PC with at least 1 GB of RAM, 2 GB of hard disk space, a DirectX 9.0c compatible graphics card with Shader Model 2.0 or higher, and a keyboard and mouse. A gamepad is optional but recommended for playing games.</p>
|
123 |
-
<h4>What are some alternatives to Kodu?</h4>
|
124 |
-
<p>If you are looking for other game development tools for kids, you can check out these alternatives:</p>
|
125 |
-
<ul>
|
126 |
-
<li><a href="">Scratch</a>: A block-based programming language that lets you create interactive stories, games, and animations.</li>
|
127 |
-
<li><a href="">Roblox</a>: An online platform where you can create and play games with millions of players around the world.</li>
|
128 |
-
<li><a href="">M <li><a href="">Minecraft</a>: A sandbox game where you can build and explore infinite worlds with blocks and resources.</li>
|
129 |
-
<li><a href="">GameMaker Studio</a>: A game development software that lets you create 2D and 3D games with drag-and-drop or scripting.</li>
|
130 |
-
</ul>
|
131 |
-
<h4>How can I learn more about Kodu?</h4>
|
132 |
-
<p>If you want to learn more about Kodu, you can visit these resources:</p>
|
133 |
-
<ul>
|
134 |
-
<li><a href="">Kodu website</a>: The official website of Kodu, where you can download Kodu, browse the community worlds, and find tutorials and documentation.</li>
|
135 |
-
<li><a href="">Kodu YouTube channel</a>: The official YouTube channel of Kodu, where you can watch videos of Kodu features, tips, and examples.</li>
|
136 |
-
<li><a href="">Kodu blog</a>: The official blog of Kodu, where you can read news and updates about Kodu.</li>
|
137 |
-
<li><a href="">Kodu forum</a>: The official forum of Kodu, where you can ask questions, share ideas, and get help from other Kodu users and developers.</li>
|
138 |
-
</ul>
|
139 |
-
<h4>How can I share my games with others?</h4>
|
140 |
-
<p>If you want to share your games with others, you can do so by uploading them to the community worlds. To upload your game, follow these steps:</p>
|
141 |
-
<ol>
|
142 |
-
<li>Save your game and go to the main menu of Kodu.</li>
|
143 |
-
<li>Click on the "Share World" button on the main menu.</li>
|
144 |
-
<li>Enter your name, email, and password to create an account or log in to your existing account.</li>
|
145 |
-
<li>Choose a name, description, tags, and ratings for your game.</li>
|
146 |
-
<li>Click on the "Upload" button to upload your game to the community worlds.</li>
|
147 |
-
</ol>
|
148 |
-
<p>Once your game is uploaded, other users can find it, download it, and play it. You can also view your uploaded games and edit or delete them by clicking on the "My Worlds" button on the main menu.</p>
|
149 |
-
<h4>How can I get help or support for Kodu?</h4>
|
150 |
-
<p>If you need help or support for Kodu, you can contact the Kodu team by following these methods:</p>
|
151 |
-
<ul>
|
152 |
-
<li>Email: You can send an email to <a href="mailto:[email protected]">[email protected]</a> with your questions or feedback.</li>
|
153 |
-
<li>Twitter: You can follow and tweet to <a href="https://twitter.com/KoduGameLab">@KoduGameLab</a> on Twitter with your comments or suggestions.</li>
|
154 |
-
<li>Facebook: You can like and message <a href="https://www.facebook.com/KoduGameLab">Kodu Game Lab</a> on Facebook with your queries or opinions.</li>
|
155 |
-
</ul></p> 401be4b1e0<br />
|
156 |
-
<br />
|
157 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download and Install WhatsApp Messenger on Your Windows 8 PC.md
DELETED
@@ -1,68 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download WhatsApp Messenger for Windows 8</h1>
|
3 |
-
<p>WhatsApp Messenger is a free messaging app that lets you communicate with your friends and family across different devices. You can send and receive text messages, photos, videos, voice notes, documents, and more with WhatsApp. You can also make voice and video calls for free with WhatsApp.</p>
|
4 |
-
<h2>download whatsapp messenger for windows 8</h2><br /><p><b><b>DOWNLOAD</b> ⚙ <a href="https://jinyurl.com/2uNRyC">https://jinyurl.com/2uNRyC</a></b></p><br /><br />
|
5 |
-
<p>If you have a Windows 8 computer, you might be wondering how you can download WhatsApp Messenger for it. In this article, we will show you how to do that in a few simple steps. We will also share some benefits of using WhatsApp Messenger on Windows 8, as well as some tips and tricks for using it.</p>
|
6 |
-
<h2>Benefits of Using WhatsApp Messenger on Windows 8</h2>
|
7 |
-
<h3>Stay connected with your friends and family across devices</h3>
|
8 |
-
<p>One of the main benefits of using WhatsApp Messenger on Windows 8 is that you can stay connected with your friends and family across different devices. You can use WhatsApp on your phone, tablet, or desktop computer. This way, you can pick up any conversation where you left off, no matter what device you are using.</p>
|
9 |
-
<h3>Send and receive list of keyboard shortcuts by pressing Ctrl + / on your keyboard.</p>
|
10 |
-
<h3>How to enable dark mode</h3>
|
11 |
-
<p>Another tip for using WhatsApp Messenger on Windows 8 is to enable dark mode. Dark mode can help you reduce eye strain and save battery life by changing the background color of WhatsApp to black. To enable dark mode, you need to click on the menu icon (three dots) in the top left corner of WhatsApp Desktop. Then, click on "Settings" and then on "Theme". You will see two options: "Light" and "Dark". Choose "Dark" and click on "OK". You will see that WhatsApp Desktop has switched to dark mode.</p>
|
12 |
-
<h3>How to mute notifications</h3>
|
13 |
-
<p>A third tip for using WhatsApp Messenger on Windows 8 is to mute notifications. Notifications can be useful to alert you of new messages and calls, but they can also be annoying or distracting if you are busy or want some peace and quiet. To mute notifications, you need to click on the menu icon (three dots) in the top left corner of WhatsApp Desktop. Then, click on "Settings" and then on "Notifications". You will see various options to customize your notifications, such as sound, banner, flash, and mute. You can choose to mute all notifications or only some of them. You can also choose the duration of the mute, such as 8 hours, 1 week, or always.</p>
|
14 |
-
<p>How to install whatsapp messenger on windows 8 laptop<br />
|
15 |
-
Whatsapp messenger for windows 8 free download full version<br />
|
16 |
-
Whatsapp messenger for windows 8 desktop app<br />
|
17 |
-
Whatsapp messenger for windows 8 pc download without bluestacks<br />
|
18 |
-
Whatsapp messenger for windows 8.1 64 bit download<br />
|
19 |
-
Whatsapp messenger for windows 8 tablet download<br />
|
20 |
-
Whatsapp messenger for windows 8 offline installer<br />
|
21 |
-
Whatsapp messenger for windows 8 pro download<br />
|
22 |
-
Whatsapp messenger for windows 8.1 phone download<br />
|
23 |
-
Whatsapp messenger for windows 8 direct download link<br />
|
24 |
-
Whatsapp messenger for windows 8 latest version download<br />
|
25 |
-
Whatsapp messenger for windows 8 exe file download<br />
|
26 |
-
Whatsapp messenger for windows 8 microsoft store download<br />
|
27 |
-
Whatsapp messenger for windows 8 web version download<br />
|
28 |
-
Whatsapp messenger for windows 8 apk download<br />
|
29 |
-
Whatsapp messenger for windows 8 software download<br />
|
30 |
-
Whatsapp messenger for windows 8 setup download<br />
|
31 |
-
Whatsapp messenger for windows 8 zip file download<br />
|
32 |
-
Whatsapp messenger for windows 8 online download<br />
|
33 |
-
Whatsapp messenger for windows 8 crack download<br />
|
34 |
-
Whatsapp messenger for windows 8 beta version download<br />
|
35 |
-
Whatsapp messenger for windows 8 update download<br />
|
36 |
-
Whatsapp messenger for windows 8 iso download<br />
|
37 |
-
Whatsapp messenger for windows 8 portable download<br />
|
38 |
-
Whatsapp messenger for windows 8 rar file download<br />
|
39 |
-
Whatsapp messenger for windows 8 old version download<br />
|
40 |
-
Whatsapp messenger for windows 8 modded version download<br />
|
41 |
-
Whatsapp messenger for windows 8 official website download<br />
|
42 |
-
Whatsapp messenger for windows 8 original version download<br />
|
43 |
-
Whatsapp messenger for windows 8 premium version download<br />
|
44 |
-
Whatsapp messenger for windows 8 hacked version download<br />
|
45 |
-
Whatsapp messenger for windows 8 cracked version download<br />
|
46 |
-
Whatsapp messenger for windows 8 patched version download<br />
|
47 |
-
Whatsapp messenger for windows 8 unlocked version download<br />
|
48 |
-
Whatsapp messenger for windows 8 full version free download with crack<br />
|
49 |
-
Whatsapp messenger for windows 8 full version free download with keygen<br />
|
50 |
-
Whatsapp messenger for windows 8 full version free download with serial key<br />
|
51 |
-
Whatsapp messenger for windows 8 full version free download with license key<br />
|
52 |
-
Whatsapp messenger for windows 8 full version free download with activation key<br />
|
53 |
-
Whatsapp messenger for windows 8 full version free download with product key<br />
|
54 |
-
Download whatsapp desktop app from the microsoft store on your computer running on Windows OS.</p>
|
55 |
-
<h2>Conclusion and FAQs</h2>
|
56 |
-
<p>In conclusion, WhatsApp Messenger is a great app that lets you communicate with your friends and family across different devices. You can download WhatsApp Messenger for Windows 8 by following the steps we have outlined in this article. You can also enjoy some benefits of using WhatsApp Messenger on Windows 8, such as staying connected, sending and receiving various types of media, and enjoying end-to-end encryption and privacy controls. You can also use some tips and tricks for using WhatsApp Messenger on Windows 8, such as using keyboard shortcuts, enabling dark mode, and muting notifications. We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to contact us.</p>
|
57 |
-
<h3>FAQ #1: Can I use WhatsApp Web instead of WhatsApp Desktop?</h3>
|
58 |
-
<p>Yes, you can use WhatsApp Web instead of WhatsApp Desktop if you prefer. WhatsApp Web is a web-based version of WhatsApp that you can access from any browser. However, WhatsApp Web has some limitations compared to WhatsApp Desktop, such as not being able to make voice or video calls, not being able to use keyboard shortcuts, not being able to enable dark mode, and not being able to run in the background. To use WhatsApp Web, you need to go to https://web.whatsapp.com/ and scan the QR code with your phone.</p>
|
59 |
-
<h3>FAQ #2: Can I use WhatsApp Desktop without my phone?</h3>
|
60 |
-
<p>No, you cannot use WhatsApp Desktop without your phone. WhatsApp Desktop is a companion app that syncs your messages and calls with your phone. You need to have your phone connected to the internet and linked with your account in order to use WhatsApp Desktop. If your phone is offline or disconnected from your account, you will not be able to use WhatsApp Desktop.</p>
|
61 |
-
<h3>FAQ #3: How can I update WhatsApp Desktop?</h3>
|
62 |
-
<p>To update WhatsApp Desktop, you need to go to the menu icon (three dots) in the top left corner of WhatsApp Desktop. Then, click on "Help" and then on "Check for updates". You will see a window that says "Checking for updates". If there is a new version available, you will see a button that says "Update". Click on this button and wait for the update process to complete.</p>
|
63 |
-
<h3>FAQ #4: How can I uninstall WhatsApp Desktop?</h3>
|
64 |
-
<p>To uninstall WhatsApp Desktop, you need to go to the Control Panel on your computer. Then, click on "Programs" and then on "Uninstall a program". You will see a list of programs installed on your computer. Find "WhatsApp" and right-click on it. Then, click on "Uninstall" and follow the prompts to remove WhatsApp Desktop from your computer.</p>
|
65 |
-
<h3>FAQ #5: How can I contact WhatsApp support?</h3>
|
66 |
-
<p>To contact WhatsApp support, you need to go to the menu icon (three dots) in the top left corner of WhatsApp Desktop. Then, click on "Settings" and then on "Help". You will see a button that says "Contact Us". Click on this button and fill out the form with your name, email address, subject, description, and attachments (optional). Then, click on "Send" and wait for a response from WhatsApp support.</p> 197e85843d<br />
|
67 |
-
<br />
|
68 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/44ov41za8i/FreeVC/speaker_encoder/visualizations.py
DELETED
@@ -1,178 +0,0 @@
|
|
1 |
-
from speaker_encoder.data_objects.speaker_verification_dataset import SpeakerVerificationDataset
|
2 |
-
from datetime import datetime
|
3 |
-
from time import perf_counter as timer
|
4 |
-
import matplotlib.pyplot as plt
|
5 |
-
import numpy as np
|
6 |
-
# import webbrowser
|
7 |
-
import visdom
|
8 |
-
import umap
|
9 |
-
|
10 |
-
colormap = np.array([
|
11 |
-
[76, 255, 0],
|
12 |
-
[0, 127, 70],
|
13 |
-
[255, 0, 0],
|
14 |
-
[255, 217, 38],
|
15 |
-
[0, 135, 255],
|
16 |
-
[165, 0, 165],
|
17 |
-
[255, 167, 255],
|
18 |
-
[0, 255, 255],
|
19 |
-
[255, 96, 38],
|
20 |
-
[142, 76, 0],
|
21 |
-
[33, 0, 127],
|
22 |
-
[0, 0, 0],
|
23 |
-
[183, 183, 183],
|
24 |
-
], dtype=np.float) / 255
|
25 |
-
|
26 |
-
|
27 |
-
class Visualizations:
|
28 |
-
def __init__(self, env_name=None, update_every=10, server="http://localhost", disabled=False):
|
29 |
-
# Tracking data
|
30 |
-
self.last_update_timestamp = timer()
|
31 |
-
self.update_every = update_every
|
32 |
-
self.step_times = []
|
33 |
-
self.losses = []
|
34 |
-
self.eers = []
|
35 |
-
print("Updating the visualizations every %d steps." % update_every)
|
36 |
-
|
37 |
-
# If visdom is disabled TODO: use a better paradigm for that
|
38 |
-
self.disabled = disabled
|
39 |
-
if self.disabled:
|
40 |
-
return
|
41 |
-
|
42 |
-
# Set the environment name
|
43 |
-
now = str(datetime.now().strftime("%d-%m %Hh%M"))
|
44 |
-
if env_name is None:
|
45 |
-
self.env_name = now
|
46 |
-
else:
|
47 |
-
self.env_name = "%s (%s)" % (env_name, now)
|
48 |
-
|
49 |
-
# Connect to visdom and open the corresponding window in the browser
|
50 |
-
try:
|
51 |
-
self.vis = visdom.Visdom(server, env=self.env_name, raise_exceptions=True)
|
52 |
-
except ConnectionError:
|
53 |
-
raise Exception("No visdom server detected. Run the command \"visdom\" in your CLI to "
|
54 |
-
"start it.")
|
55 |
-
# webbrowser.open("http://localhost:8097/env/" + self.env_name)
|
56 |
-
|
57 |
-
# Create the windows
|
58 |
-
self.loss_win = None
|
59 |
-
self.eer_win = None
|
60 |
-
# self.lr_win = None
|
61 |
-
self.implementation_win = None
|
62 |
-
self.projection_win = None
|
63 |
-
self.implementation_string = ""
|
64 |
-
|
65 |
-
def log_params(self):
|
66 |
-
if self.disabled:
|
67 |
-
return
|
68 |
-
from speaker_encoder import params_data
|
69 |
-
from speaker_encoder import params_model
|
70 |
-
param_string = "<b>Model parameters</b>:<br>"
|
71 |
-
for param_name in (p for p in dir(params_model) if not p.startswith("__")):
|
72 |
-
value = getattr(params_model, param_name)
|
73 |
-
param_string += "\t%s: %s<br>" % (param_name, value)
|
74 |
-
param_string += "<b>Data parameters</b>:<br>"
|
75 |
-
for param_name in (p for p in dir(params_data) if not p.startswith("__")):
|
76 |
-
value = getattr(params_data, param_name)
|
77 |
-
param_string += "\t%s: %s<br>" % (param_name, value)
|
78 |
-
self.vis.text(param_string, opts={"title": "Parameters"})
|
79 |
-
|
80 |
-
def log_dataset(self, dataset: SpeakerVerificationDataset):
|
81 |
-
if self.disabled:
|
82 |
-
return
|
83 |
-
dataset_string = ""
|
84 |
-
dataset_string += "<b>Speakers</b>: %s\n" % len(dataset.speakers)
|
85 |
-
dataset_string += "\n" + dataset.get_logs()
|
86 |
-
dataset_string = dataset_string.replace("\n", "<br>")
|
87 |
-
self.vis.text(dataset_string, opts={"title": "Dataset"})
|
88 |
-
|
89 |
-
def log_implementation(self, params):
|
90 |
-
if self.disabled:
|
91 |
-
return
|
92 |
-
implementation_string = ""
|
93 |
-
for param, value in params.items():
|
94 |
-
implementation_string += "<b>%s</b>: %s\n" % (param, value)
|
95 |
-
implementation_string = implementation_string.replace("\n", "<br>")
|
96 |
-
self.implementation_string = implementation_string
|
97 |
-
self.implementation_win = self.vis.text(
|
98 |
-
implementation_string,
|
99 |
-
opts={"title": "Training implementation"}
|
100 |
-
)
|
101 |
-
|
102 |
-
def update(self, loss, eer, step):
|
103 |
-
# Update the tracking data
|
104 |
-
now = timer()
|
105 |
-
self.step_times.append(1000 * (now - self.last_update_timestamp))
|
106 |
-
self.last_update_timestamp = now
|
107 |
-
self.losses.append(loss)
|
108 |
-
self.eers.append(eer)
|
109 |
-
print(".", end="")
|
110 |
-
|
111 |
-
# Update the plots every <update_every> steps
|
112 |
-
if step % self.update_every != 0:
|
113 |
-
return
|
114 |
-
time_string = "Step time: mean: %5dms std: %5dms" % \
|
115 |
-
(int(np.mean(self.step_times)), int(np.std(self.step_times)))
|
116 |
-
print("\nStep %6d Loss: %.4f EER: %.4f %s" %
|
117 |
-
(step, np.mean(self.losses), np.mean(self.eers), time_string))
|
118 |
-
if not self.disabled:
|
119 |
-
self.loss_win = self.vis.line(
|
120 |
-
[np.mean(self.losses)],
|
121 |
-
[step],
|
122 |
-
win=self.loss_win,
|
123 |
-
update="append" if self.loss_win else None,
|
124 |
-
opts=dict(
|
125 |
-
legend=["Avg. loss"],
|
126 |
-
xlabel="Step",
|
127 |
-
ylabel="Loss",
|
128 |
-
title="Loss",
|
129 |
-
)
|
130 |
-
)
|
131 |
-
self.eer_win = self.vis.line(
|
132 |
-
[np.mean(self.eers)],
|
133 |
-
[step],
|
134 |
-
win=self.eer_win,
|
135 |
-
update="append" if self.eer_win else None,
|
136 |
-
opts=dict(
|
137 |
-
legend=["Avg. EER"],
|
138 |
-
xlabel="Step",
|
139 |
-
ylabel="EER",
|
140 |
-
title="Equal error rate"
|
141 |
-
)
|
142 |
-
)
|
143 |
-
if self.implementation_win is not None:
|
144 |
-
self.vis.text(
|
145 |
-
self.implementation_string + ("<b>%s</b>" % time_string),
|
146 |
-
win=self.implementation_win,
|
147 |
-
opts={"title": "Training implementation"},
|
148 |
-
)
|
149 |
-
|
150 |
-
# Reset the tracking
|
151 |
-
self.losses.clear()
|
152 |
-
self.eers.clear()
|
153 |
-
self.step_times.clear()
|
154 |
-
|
155 |
-
def draw_projections(self, embeds, utterances_per_speaker, step, out_fpath=None,
|
156 |
-
max_speakers=10):
|
157 |
-
max_speakers = min(max_speakers, len(colormap))
|
158 |
-
embeds = embeds[:max_speakers * utterances_per_speaker]
|
159 |
-
|
160 |
-
n_speakers = len(embeds) // utterances_per_speaker
|
161 |
-
ground_truth = np.repeat(np.arange(n_speakers), utterances_per_speaker)
|
162 |
-
colors = [colormap[i] for i in ground_truth]
|
163 |
-
|
164 |
-
reducer = umap.UMAP()
|
165 |
-
projected = reducer.fit_transform(embeds)
|
166 |
-
plt.scatter(projected[:, 0], projected[:, 1], c=colors)
|
167 |
-
plt.gca().set_aspect("equal", "datalim")
|
168 |
-
plt.title("UMAP projection (step %d)" % step)
|
169 |
-
if not self.disabled:
|
170 |
-
self.projection_win = self.vis.matplot(plt, win=self.projection_win)
|
171 |
-
if out_fpath is not None:
|
172 |
-
plt.savefig(out_fpath)
|
173 |
-
plt.clf()
|
174 |
-
|
175 |
-
def save(self):
|
176 |
-
if not self.disabled:
|
177 |
-
self.vis.save([self.env_name])
|
178 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/Dockerfile
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
# syntax=docker/dockerfile:1
|
2 |
-
|
3 |
-
FROM python:3.10-bullseye
|
4 |
-
|
5 |
-
EXPOSE 7865
|
6 |
-
|
7 |
-
WORKDIR /app
|
8 |
-
|
9 |
-
COPY . .
|
10 |
-
|
11 |
-
RUN apt update && apt install -y -qq ffmpeg aria2 && apt clean
|
12 |
-
|
13 |
-
RUN pip3 install --no-cache-dir -r requirements.txt
|
14 |
-
|
15 |
-
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D40k.pth -d assets/pretrained_v2/ -o D40k.pth
|
16 |
-
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G40k.pth -d assets/pretrained_v2/ -o G40k.pth
|
17 |
-
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D40k.pth -d assets/pretrained_v2/ -o f0D40k.pth
|
18 |
-
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G40k.pth -d assets/pretrained_v2/ -o f0G40k.pth
|
19 |
-
|
20 |
-
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2-人声vocals+非人声instrumentals.pth -d assets/uvr5_weights/ -o HP2-人声vocals+非人声instrumentals.pth
|
21 |
-
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5-主旋律人声vocals+其他instrumentals.pth -d assets/uvr5_weights/ -o HP5-主旋律人声vocals+其他instrumentals.pth
|
22 |
-
|
23 |
-
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt -d assets/hubert -o hubert_base.pt
|
24 |
-
|
25 |
-
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/rmvpe.pt -d assets/hubert -o rmvpe.pt
|
26 |
-
|
27 |
-
VOLUME [ "/app/weights", "/app/opt" ]
|
28 |
-
|
29 |
-
CMD ["python3", "infer-web.py"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/A666sxr/Genshin_TTS/app.py
DELETED
@@ -1,94 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import os
|
3 |
-
os.system('cd monotonic_align && python setup.py build_ext --inplace && cd ..')
|
4 |
-
|
5 |
-
import time
|
6 |
-
import json
|
7 |
-
import math
|
8 |
-
import torch
|
9 |
-
from torch import nn
|
10 |
-
from torch.nn import functional as F
|
11 |
-
from torch.utils.data import DataLoader
|
12 |
-
import re
|
13 |
-
import langid
|
14 |
-
import jieba
|
15 |
-
import commons
|
16 |
-
import utils
|
17 |
-
from data_utils import TextAudioLoader, TextAudioCollate, TextAudioSpeakerLoader, TextAudioSpeakerCollate
|
18 |
-
from models import SynthesizerTrn
|
19 |
-
from text.symbols import symbols
|
20 |
-
from text import text_to_sequence, cleaned_text_to_sequence
|
21 |
-
from text.cleaners import japanese_cleaners
|
22 |
-
from scipy.io.wavfile import write
|
23 |
-
|
24 |
-
def getMixText(text):
|
25 |
-
langid.set_languages(['zh','en'])
|
26 |
-
seg_list = jieba.cut(text, cut_all=False)
|
27 |
-
clean_list=[]
|
28 |
-
for seg in seg_list:
|
29 |
-
langtext='[ZH]'
|
30 |
-
if(len(seg)>0):
|
31 |
-
lang=langid.classify(seg)[0]
|
32 |
-
if lang == 'en':
|
33 |
-
langtext='[EN]'
|
34 |
-
elif lang=='zh':
|
35 |
-
langtext='[ZH]'
|
36 |
-
clean_list.append(langtext+seg+langtext)
|
37 |
-
return ''.join(clean_list)
|
38 |
-
|
39 |
-
def get_text(text, hps):
|
40 |
-
text_norm = text_to_sequence(text, hps.data.text_cleaners)
|
41 |
-
if hps.data.add_blank:
|
42 |
-
text_norm = commons.intersperse(text_norm, 0)
|
43 |
-
text_norm = torch.LongTensor(text_norm)
|
44 |
-
# print(text_norm.shape)
|
45 |
-
return text_norm
|
46 |
-
|
47 |
-
hps_ms = utils.get_hparams_from_file("save_model/config.json")
|
48 |
-
hps = utils.get_hparams_from_file("save_model/config.json")
|
49 |
-
net_g_ms = SynthesizerTrn(
|
50 |
-
len(symbols),
|
51 |
-
hps_ms.data.filter_length // 2 + 1,
|
52 |
-
hps_ms.train.segment_size // hps.data.hop_length,
|
53 |
-
n_speakers=hps_ms.data.n_speakers,
|
54 |
-
**hps_ms.model)
|
55 |
-
|
56 |
-
npclists=[]
|
57 |
-
with open("save_model/npclists.txt",'r') as r:
|
58 |
-
for npc in r.readlines():
|
59 |
-
npclists.append(npc.split('|')[-1])
|
60 |
-
print(npc)
|
61 |
-
r.close
|
62 |
-
|
63 |
-
def tts(spkid, text):
|
64 |
-
if(len(re.findall(r'\[ZH\].*?\[ZH\]', text))==0 and len(re.findall(r'\[EN\].*?\[EN\]', text))==0):
|
65 |
-
text=getMixText(text)
|
66 |
-
sid = torch.LongTensor([spkid]) # speaker identity
|
67 |
-
stn_tst = get_text(text, hps_ms)
|
68 |
-
|
69 |
-
with torch.no_grad():
|
70 |
-
x_tst = stn_tst.unsqueeze(0)
|
71 |
-
x_tst_lengths = torch.LongTensor([stn_tst.size(0)])
|
72 |
-
t1 = time.time()
|
73 |
-
audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][
|
74 |
-
0, 0].data.float().numpy()
|
75 |
-
t2 = time.time()
|
76 |
-
return "成功,耗时"+str((t2-t1))+"s", (hps.data.sampling_rate, audio)
|
77 |
-
|
78 |
-
|
79 |
-
_ = utils.load_checkpoint("save_model/model.pth", net_g_ms, None)
|
80 |
-
|
81 |
-
def clean_text(text):
|
82 |
-
return japanese_cleaners(text)
|
83 |
-
|
84 |
-
app = gr.Blocks()
|
85 |
-
with app:
|
86 |
-
with gr.Tabs():
|
87 |
-
with gr.TabItem("Basic"):
|
88 |
-
tts_input1 = gr.TextArea(label="在这输入文字", value="基于VITS的中英混合语音合成模型,当前进度为45epoch,30000 Steps,正在持续训练中。。")
|
89 |
-
tts_input2 = gr.Dropdown(label="人物", choices=npclists, type="index", value=npclists[0])
|
90 |
-
tts_submit = gr.Button("合成", variant="primary")
|
91 |
-
tts_output1 = gr.Textbox(label="信息")
|
92 |
-
tts_output2 = gr.Audio(label="结果")
|
93 |
-
tts_submit.click(tts, [tts_input2, tts_input1], [tts_output1, tts_output2])
|
94 |
-
app.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/generate_human_motion/VQ-Trans/train_vq.py
DELETED
@@ -1,171 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
-
|
4 |
-
import torch
|
5 |
-
import torch.optim as optim
|
6 |
-
from torch.utils.tensorboard import SummaryWriter
|
7 |
-
|
8 |
-
import models.vqvae as vqvae
|
9 |
-
import utils.losses as losses
|
10 |
-
import options.option_vq as option_vq
|
11 |
-
import utils.utils_model as utils_model
|
12 |
-
from dataset import dataset_VQ, dataset_TM_eval
|
13 |
-
import utils.eval_trans as eval_trans
|
14 |
-
from options.get_eval_option import get_opt
|
15 |
-
from models.evaluator_wrapper import EvaluatorModelWrapper
|
16 |
-
import warnings
|
17 |
-
warnings.filterwarnings('ignore')
|
18 |
-
from utils.word_vectorizer import WordVectorizer
|
19 |
-
|
20 |
-
def update_lr_warm_up(optimizer, nb_iter, warm_up_iter, lr):
|
21 |
-
|
22 |
-
current_lr = lr * (nb_iter + 1) / (warm_up_iter + 1)
|
23 |
-
for param_group in optimizer.param_groups:
|
24 |
-
param_group["lr"] = current_lr
|
25 |
-
|
26 |
-
return optimizer, current_lr
|
27 |
-
|
28 |
-
##### ---- Exp dirs ---- #####
|
29 |
-
args = option_vq.get_args_parser()
|
30 |
-
torch.manual_seed(args.seed)
|
31 |
-
|
32 |
-
args.out_dir = os.path.join(args.out_dir, f'{args.exp_name}')
|
33 |
-
os.makedirs(args.out_dir, exist_ok = True)
|
34 |
-
|
35 |
-
##### ---- Logger ---- #####
|
36 |
-
logger = utils_model.get_logger(args.out_dir)
|
37 |
-
writer = SummaryWriter(args.out_dir)
|
38 |
-
logger.info(json.dumps(vars(args), indent=4, sort_keys=True))
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
w_vectorizer = WordVectorizer('./glove', 'our_vab')
|
43 |
-
|
44 |
-
if args.dataname == 'kit' :
|
45 |
-
dataset_opt_path = 'checkpoints/kit/Comp_v6_KLD005/opt.txt'
|
46 |
-
args.nb_joints = 21
|
47 |
-
|
48 |
-
else :
|
49 |
-
dataset_opt_path = 'checkpoints/t2m/Comp_v6_KLD005/opt.txt'
|
50 |
-
args.nb_joints = 22
|
51 |
-
|
52 |
-
logger.info(f'Training on {args.dataname}, motions are with {args.nb_joints} joints')
|
53 |
-
|
54 |
-
wrapper_opt = get_opt(dataset_opt_path, torch.device('cuda'))
|
55 |
-
eval_wrapper = EvaluatorModelWrapper(wrapper_opt)
|
56 |
-
|
57 |
-
|
58 |
-
##### ---- Dataloader ---- #####
|
59 |
-
train_loader = dataset_VQ.DATALoader(args.dataname,
|
60 |
-
args.batch_size,
|
61 |
-
window_size=args.window_size,
|
62 |
-
unit_length=2**args.down_t)
|
63 |
-
|
64 |
-
train_loader_iter = dataset_VQ.cycle(train_loader)
|
65 |
-
|
66 |
-
val_loader = dataset_TM_eval.DATALoader(args.dataname, False,
|
67 |
-
32,
|
68 |
-
w_vectorizer,
|
69 |
-
unit_length=2**args.down_t)
|
70 |
-
|
71 |
-
##### ---- Network ---- #####
|
72 |
-
net = vqvae.HumanVQVAE(args, ## use args to define different parameters in different quantizers
|
73 |
-
args.nb_code,
|
74 |
-
args.code_dim,
|
75 |
-
args.output_emb_width,
|
76 |
-
args.down_t,
|
77 |
-
args.stride_t,
|
78 |
-
args.width,
|
79 |
-
args.depth,
|
80 |
-
args.dilation_growth_rate,
|
81 |
-
args.vq_act,
|
82 |
-
args.vq_norm)
|
83 |
-
|
84 |
-
|
85 |
-
if args.resume_pth :
|
86 |
-
logger.info('loading checkpoint from {}'.format(args.resume_pth))
|
87 |
-
ckpt = torch.load(args.resume_pth, map_location='cpu')
|
88 |
-
net.load_state_dict(ckpt['net'], strict=True)
|
89 |
-
net.train()
|
90 |
-
net.cuda()
|
91 |
-
|
92 |
-
##### ---- Optimizer & Scheduler ---- #####
|
93 |
-
optimizer = optim.AdamW(net.parameters(), lr=args.lr, betas=(0.9, 0.99), weight_decay=args.weight_decay)
|
94 |
-
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_scheduler, gamma=args.gamma)
|
95 |
-
|
96 |
-
|
97 |
-
Loss = losses.ReConsLoss(args.recons_loss, args.nb_joints)
|
98 |
-
|
99 |
-
##### ------ warm-up ------- #####
|
100 |
-
avg_recons, avg_perplexity, avg_commit = 0., 0., 0.
|
101 |
-
|
102 |
-
for nb_iter in range(1, args.warm_up_iter):
|
103 |
-
|
104 |
-
optimizer, current_lr = update_lr_warm_up(optimizer, nb_iter, args.warm_up_iter, args.lr)
|
105 |
-
|
106 |
-
gt_motion = next(train_loader_iter)
|
107 |
-
gt_motion = gt_motion.cuda().float() # (bs, 64, dim)
|
108 |
-
|
109 |
-
pred_motion, loss_commit, perplexity = net(gt_motion)
|
110 |
-
loss_motion = Loss(pred_motion, gt_motion)
|
111 |
-
loss_vel = Loss.forward_vel(pred_motion, gt_motion)
|
112 |
-
|
113 |
-
loss = loss_motion + args.commit * loss_commit + args.loss_vel * loss_vel
|
114 |
-
|
115 |
-
optimizer.zero_grad()
|
116 |
-
loss.backward()
|
117 |
-
optimizer.step()
|
118 |
-
|
119 |
-
avg_recons += loss_motion.item()
|
120 |
-
avg_perplexity += perplexity.item()
|
121 |
-
avg_commit += loss_commit.item()
|
122 |
-
|
123 |
-
if nb_iter % args.print_iter == 0 :
|
124 |
-
avg_recons /= args.print_iter
|
125 |
-
avg_perplexity /= args.print_iter
|
126 |
-
avg_commit /= args.print_iter
|
127 |
-
|
128 |
-
logger.info(f"Warmup. Iter {nb_iter} : lr {current_lr:.5f} \t Commit. {avg_commit:.5f} \t PPL. {avg_perplexity:.2f} \t Recons. {avg_recons:.5f}")
|
129 |
-
|
130 |
-
avg_recons, avg_perplexity, avg_commit = 0., 0., 0.
|
131 |
-
|
132 |
-
##### ---- Training ---- #####
|
133 |
-
avg_recons, avg_perplexity, avg_commit = 0., 0., 0.
|
134 |
-
best_fid, best_iter, best_div, best_top1, best_top2, best_top3, best_matching, writer, logger = eval_trans.evaluation_vqvae(args.out_dir, val_loader, net, logger, writer, 0, best_fid=1000, best_iter=0, best_div=100, best_top1=0, best_top2=0, best_top3=0, best_matching=100, eval_wrapper=eval_wrapper)
|
135 |
-
|
136 |
-
for nb_iter in range(1, args.total_iter + 1):
|
137 |
-
|
138 |
-
gt_motion = next(train_loader_iter)
|
139 |
-
gt_motion = gt_motion.cuda().float() # bs, nb_joints, joints_dim, seq_len
|
140 |
-
|
141 |
-
pred_motion, loss_commit, perplexity = net(gt_motion)
|
142 |
-
loss_motion = Loss(pred_motion, gt_motion)
|
143 |
-
loss_vel = Loss.forward_vel(pred_motion, gt_motion)
|
144 |
-
|
145 |
-
loss = loss_motion + args.commit * loss_commit + args.loss_vel * loss_vel
|
146 |
-
|
147 |
-
optimizer.zero_grad()
|
148 |
-
loss.backward()
|
149 |
-
optimizer.step()
|
150 |
-
scheduler.step()
|
151 |
-
|
152 |
-
avg_recons += loss_motion.item()
|
153 |
-
avg_perplexity += perplexity.item()
|
154 |
-
avg_commit += loss_commit.item()
|
155 |
-
|
156 |
-
if nb_iter % args.print_iter == 0 :
|
157 |
-
avg_recons /= args.print_iter
|
158 |
-
avg_perplexity /= args.print_iter
|
159 |
-
avg_commit /= args.print_iter
|
160 |
-
|
161 |
-
writer.add_scalar('./Train/L1', avg_recons, nb_iter)
|
162 |
-
writer.add_scalar('./Train/PPL', avg_perplexity, nb_iter)
|
163 |
-
writer.add_scalar('./Train/Commit', avg_commit, nb_iter)
|
164 |
-
|
165 |
-
logger.info(f"Train. Iter {nb_iter} : \t Commit. {avg_commit:.5f} \t PPL. {avg_perplexity:.2f} \t Recons. {avg_recons:.5f}")
|
166 |
-
|
167 |
-
avg_recons, avg_perplexity, avg_commit = 0., 0., 0.,
|
168 |
-
|
169 |
-
if nb_iter % args.eval_iter==0 :
|
170 |
-
best_fid, best_iter, best_div, best_top1, best_top2, best_top3, best_matching, writer, logger = eval_trans.evaluation_vqvae(args.out_dir, val_loader, net, logger, writer, nb_iter, best_fid, best_iter, best_div, best_top1, best_top2, best_top3, best_matching, eval_wrapper=eval_wrapper)
|
171 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/generate_human_motion/pyrender/tests/unit/test_offscreen.py
DELETED
@@ -1,92 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import trimesh
|
3 |
-
|
4 |
-
from pyrender import (OffscreenRenderer, PerspectiveCamera, DirectionalLight,
|
5 |
-
SpotLight, Mesh, Node, Scene)
|
6 |
-
|
7 |
-
|
8 |
-
def test_offscreen_renderer(tmpdir):
|
9 |
-
|
10 |
-
# Fuze trimesh
|
11 |
-
fuze_trimesh = trimesh.load('examples/models/fuze.obj')
|
12 |
-
fuze_mesh = Mesh.from_trimesh(fuze_trimesh)
|
13 |
-
|
14 |
-
# Drill trimesh
|
15 |
-
drill_trimesh = trimesh.load('examples/models/drill.obj')
|
16 |
-
drill_mesh = Mesh.from_trimesh(drill_trimesh)
|
17 |
-
drill_pose = np.eye(4)
|
18 |
-
drill_pose[0,3] = 0.1
|
19 |
-
drill_pose[2,3] = -np.min(drill_trimesh.vertices[:,2])
|
20 |
-
|
21 |
-
# Wood trimesh
|
22 |
-
wood_trimesh = trimesh.load('examples/models/wood.obj')
|
23 |
-
wood_mesh = Mesh.from_trimesh(wood_trimesh)
|
24 |
-
|
25 |
-
# Water bottle trimesh
|
26 |
-
bottle_gltf = trimesh.load('examples/models/WaterBottle.glb')
|
27 |
-
bottle_trimesh = bottle_gltf.geometry[list(bottle_gltf.geometry.keys())[0]]
|
28 |
-
bottle_mesh = Mesh.from_trimesh(bottle_trimesh)
|
29 |
-
bottle_pose = np.array([
|
30 |
-
[1.0, 0.0, 0.0, 0.1],
|
31 |
-
[0.0, 0.0, -1.0, -0.16],
|
32 |
-
[0.0, 1.0, 0.0, 0.13],
|
33 |
-
[0.0, 0.0, 0.0, 1.0],
|
34 |
-
])
|
35 |
-
|
36 |
-
boxv_trimesh = trimesh.creation.box(extents=0.1 * np.ones(3))
|
37 |
-
boxv_vertex_colors = np.random.uniform(size=(boxv_trimesh.vertices.shape))
|
38 |
-
boxv_trimesh.visual.vertex_colors = boxv_vertex_colors
|
39 |
-
boxv_mesh = Mesh.from_trimesh(boxv_trimesh, smooth=False)
|
40 |
-
boxf_trimesh = trimesh.creation.box(extents=0.1 * np.ones(3))
|
41 |
-
boxf_face_colors = np.random.uniform(size=boxf_trimesh.faces.shape)
|
42 |
-
boxf_trimesh.visual.face_colors = boxf_face_colors
|
43 |
-
# Instanced
|
44 |
-
poses = np.tile(np.eye(4), (2,1,1))
|
45 |
-
poses[0,:3,3] = np.array([-0.1, -0.10, 0.05])
|
46 |
-
poses[1,:3,3] = np.array([-0.15, -0.10, 0.05])
|
47 |
-
boxf_mesh = Mesh.from_trimesh(boxf_trimesh, poses=poses, smooth=False)
|
48 |
-
|
49 |
-
points = trimesh.creation.icosphere(radius=0.05).vertices
|
50 |
-
point_colors = np.random.uniform(size=points.shape)
|
51 |
-
points_mesh = Mesh.from_points(points, colors=point_colors)
|
52 |
-
|
53 |
-
direc_l = DirectionalLight(color=np.ones(3), intensity=1.0)
|
54 |
-
spot_l = SpotLight(color=np.ones(3), intensity=10.0,
|
55 |
-
innerConeAngle=np.pi / 16, outerConeAngle=np.pi / 6)
|
56 |
-
|
57 |
-
cam = PerspectiveCamera(yfov=(np.pi / 3.0))
|
58 |
-
cam_pose = np.array([
|
59 |
-
[0.0, -np.sqrt(2) / 2, np.sqrt(2) / 2, 0.5],
|
60 |
-
[1.0, 0.0, 0.0, 0.0],
|
61 |
-
[0.0, np.sqrt(2) / 2, np.sqrt(2) / 2, 0.4],
|
62 |
-
[0.0, 0.0, 0.0, 1.0]
|
63 |
-
])
|
64 |
-
|
65 |
-
scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02]))
|
66 |
-
|
67 |
-
fuze_node = Node(mesh=fuze_mesh, translation=np.array([
|
68 |
-
0.1, 0.15, -np.min(fuze_trimesh.vertices[:,2])
|
69 |
-
]))
|
70 |
-
scene.add_node(fuze_node)
|
71 |
-
boxv_node = Node(mesh=boxv_mesh, translation=np.array([-0.1, 0.10, 0.05]))
|
72 |
-
scene.add_node(boxv_node)
|
73 |
-
boxf_node = Node(mesh=boxf_mesh)
|
74 |
-
scene.add_node(boxf_node)
|
75 |
-
|
76 |
-
_ = scene.add(drill_mesh, pose=drill_pose)
|
77 |
-
_ = scene.add(bottle_mesh, pose=bottle_pose)
|
78 |
-
_ = scene.add(wood_mesh)
|
79 |
-
_ = scene.add(direc_l, pose=cam_pose)
|
80 |
-
_ = scene.add(spot_l, pose=cam_pose)
|
81 |
-
_ = scene.add(points_mesh)
|
82 |
-
|
83 |
-
_ = scene.add(cam, pose=cam_pose)
|
84 |
-
|
85 |
-
r = OffscreenRenderer(viewport_width=640, viewport_height=480)
|
86 |
-
color, depth = r.render(scene)
|
87 |
-
|
88 |
-
assert color.shape == (480, 640, 3)
|
89 |
-
assert depth.shape == (480, 640)
|
90 |
-
assert np.max(depth.data) > 0.05
|
91 |
-
assert np.count_nonzero(depth.data) > (0.2 * depth.size)
|
92 |
-
r.delete()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/rel_transformer.py
DELETED
@@ -1,611 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import torch
|
3 |
-
from torch import nn
|
4 |
-
from torch.nn import functional as F
|
5 |
-
from text_to_speech.utils.commons.hparams import hparams
|
6 |
-
from text_to_speech.modules.commons.layers import Embedding
|
7 |
-
from text_to_speech.utils.nn.seq_utils import group_hidden_by_segs, expand_word2ph
|
8 |
-
|
9 |
-
import transformers
|
10 |
-
|
11 |
-
def convert_pad_shape(pad_shape):
|
12 |
-
l = pad_shape[::-1]
|
13 |
-
pad_shape = [item for sublist in l for item in sublist]
|
14 |
-
return pad_shape
|
15 |
-
|
16 |
-
|
17 |
-
def shift_1d(x):
|
18 |
-
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
|
19 |
-
return x
|
20 |
-
|
21 |
-
|
22 |
-
def sequence_mask(length, max_length=None):
|
23 |
-
if max_length is None:
|
24 |
-
max_length = length.max()
|
25 |
-
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
|
26 |
-
return x.unsqueeze(0) < length.unsqueeze(1)
|
27 |
-
|
28 |
-
|
29 |
-
class Encoder(nn.Module):
|
30 |
-
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0.,
|
31 |
-
window_size=None, block_length=None, pre_ln=False, **kwargs):
|
32 |
-
super().__init__()
|
33 |
-
self.hidden_channels = hidden_channels
|
34 |
-
self.filter_channels = filter_channels
|
35 |
-
self.n_heads = n_heads
|
36 |
-
self.n_layers = n_layers
|
37 |
-
self.kernel_size = kernel_size
|
38 |
-
self.p_dropout = p_dropout
|
39 |
-
self.window_size = window_size
|
40 |
-
self.block_length = block_length
|
41 |
-
self.pre_ln = pre_ln
|
42 |
-
|
43 |
-
self.drop = nn.Dropout(p_dropout)
|
44 |
-
self.attn_layers = nn.ModuleList()
|
45 |
-
self.norm_layers_1 = nn.ModuleList()
|
46 |
-
self.ffn_layers = nn.ModuleList()
|
47 |
-
self.norm_layers_2 = nn.ModuleList()
|
48 |
-
for i in range(self.n_layers):
|
49 |
-
self.attn_layers.append(
|
50 |
-
MultiHeadAttention(hidden_channels, hidden_channels, n_heads, window_size=window_size,
|
51 |
-
p_dropout=p_dropout, block_length=block_length))
|
52 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
53 |
-
self.ffn_layers.append(
|
54 |
-
FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
|
55 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
56 |
-
if pre_ln:
|
57 |
-
self.last_ln = LayerNorm(hidden_channels)
|
58 |
-
|
59 |
-
def forward(self, x, x_mask):
|
60 |
-
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
61 |
-
for i in range(self.n_layers):
|
62 |
-
x = x * x_mask
|
63 |
-
x_ = x
|
64 |
-
if self.pre_ln:
|
65 |
-
x = self.norm_layers_1[i](x)
|
66 |
-
y = self.attn_layers[i](x, x, attn_mask)
|
67 |
-
y = self.drop(y)
|
68 |
-
x = x_ + y
|
69 |
-
if not self.pre_ln:
|
70 |
-
x = self.norm_layers_1[i](x)
|
71 |
-
|
72 |
-
x_ = x
|
73 |
-
if self.pre_ln:
|
74 |
-
x = self.norm_layers_2[i](x)
|
75 |
-
y = self.ffn_layers[i](x, x_mask)
|
76 |
-
y = self.drop(y)
|
77 |
-
x = x_ + y
|
78 |
-
if not self.pre_ln:
|
79 |
-
x = self.norm_layers_2[i](x)
|
80 |
-
if self.pre_ln:
|
81 |
-
x = self.last_ln(x)
|
82 |
-
x = x * x_mask
|
83 |
-
return x
|
84 |
-
|
85 |
-
|
86 |
-
class MultiHeadAttention(nn.Module):
|
87 |
-
def __init__(self, channels, out_channels, n_heads, window_size=None, heads_share=True, p_dropout=0.,
|
88 |
-
block_length=None, proximal_bias=False, proximal_init=False):
|
89 |
-
super().__init__()
|
90 |
-
assert channels % n_heads == 0
|
91 |
-
|
92 |
-
self.channels = channels
|
93 |
-
self.out_channels = out_channels
|
94 |
-
self.n_heads = n_heads
|
95 |
-
self.window_size = window_size
|
96 |
-
self.heads_share = heads_share
|
97 |
-
self.block_length = block_length
|
98 |
-
self.proximal_bias = proximal_bias
|
99 |
-
self.p_dropout = p_dropout
|
100 |
-
self.attn = None
|
101 |
-
|
102 |
-
self.k_channels = channels // n_heads
|
103 |
-
self.conv_q = nn.Conv1d(channels, channels, 1)
|
104 |
-
self.conv_k = nn.Conv1d(channels, channels, 1)
|
105 |
-
self.conv_v = nn.Conv1d(channels, channels, 1)
|
106 |
-
if window_size is not None:
|
107 |
-
n_heads_rel = 1 if heads_share else n_heads
|
108 |
-
rel_stddev = self.k_channels ** -0.5
|
109 |
-
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
110 |
-
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
111 |
-
self.conv_o = nn.Conv1d(channels, out_channels, 1)
|
112 |
-
self.drop = nn.Dropout(p_dropout)
|
113 |
-
|
114 |
-
nn.init.xavier_uniform_(self.conv_q.weight)
|
115 |
-
nn.init.xavier_uniform_(self.conv_k.weight)
|
116 |
-
if proximal_init:
|
117 |
-
self.conv_k.weight.data.copy_(self.conv_q.weight.data)
|
118 |
-
self.conv_k.bias.data.copy_(self.conv_q.bias.data)
|
119 |
-
nn.init.xavier_uniform_(self.conv_v.weight)
|
120 |
-
|
121 |
-
def forward(self, x, c, attn_mask=None):
|
122 |
-
q = self.conv_q(x)
|
123 |
-
k = self.conv_k(c)
|
124 |
-
v = self.conv_v(c)
|
125 |
-
|
126 |
-
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
127 |
-
|
128 |
-
x = self.conv_o(x)
|
129 |
-
return x
|
130 |
-
|
131 |
-
def attention(self, query, key, value, mask=None):
|
132 |
-
# reshape [b, d, t] -> [b, n_h, t, d_k]
|
133 |
-
b, d, t_s, t_t = (*key.size(), query.size(2))
|
134 |
-
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
|
135 |
-
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
136 |
-
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
137 |
-
|
138 |
-
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.k_channels)
|
139 |
-
if self.window_size is not None:
|
140 |
-
assert t_s == t_t, "Relative attention is only available for self-attention."
|
141 |
-
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
|
142 |
-
rel_logits = self._matmul_with_relative_keys(query, key_relative_embeddings)
|
143 |
-
rel_logits = self._relative_position_to_absolute_position(rel_logits)
|
144 |
-
scores_local = rel_logits / math.sqrt(self.k_channels)
|
145 |
-
scores = scores + scores_local
|
146 |
-
if self.proximal_bias:
|
147 |
-
assert t_s == t_t, "Proximal bias is only available for self-attention."
|
148 |
-
scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
|
149 |
-
if mask is not None:
|
150 |
-
scores = scores.masked_fill(mask == 0, -1e4)
|
151 |
-
if self.block_length is not None:
|
152 |
-
block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
|
153 |
-
scores = scores * block_mask + -1e4 * (1 - block_mask)
|
154 |
-
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
|
155 |
-
p_attn = self.drop(p_attn)
|
156 |
-
output = torch.matmul(p_attn, value)
|
157 |
-
if self.window_size is not None:
|
158 |
-
relative_weights = self._absolute_position_to_relative_position(p_attn)
|
159 |
-
value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
|
160 |
-
output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
|
161 |
-
output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
|
162 |
-
return output, p_attn
|
163 |
-
|
164 |
-
def _matmul_with_relative_values(self, x, y):
|
165 |
-
"""
|
166 |
-
x: [b, h, l, m]
|
167 |
-
y: [h or 1, m, d]
|
168 |
-
ret: [b, h, l, d]
|
169 |
-
"""
|
170 |
-
ret = torch.matmul(x, y.unsqueeze(0))
|
171 |
-
return ret
|
172 |
-
|
173 |
-
def _matmul_with_relative_keys(self, x, y):
|
174 |
-
"""
|
175 |
-
x: [b, h, l, d]
|
176 |
-
y: [h or 1, m, d]
|
177 |
-
ret: [b, h, l, m]
|
178 |
-
"""
|
179 |
-
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
|
180 |
-
return ret
|
181 |
-
|
182 |
-
def _get_relative_embeddings(self, relative_embeddings, length):
|
183 |
-
max_relative_position = 2 * self.window_size + 1
|
184 |
-
# Pad first before slice to avoid using cond ops.
|
185 |
-
pad_length = max(length - (self.window_size + 1), 0)
|
186 |
-
slice_start_position = max((self.window_size + 1) - length, 0)
|
187 |
-
slice_end_position = slice_start_position + 2 * length - 1
|
188 |
-
if pad_length > 0:
|
189 |
-
padded_relative_embeddings = F.pad(
|
190 |
-
relative_embeddings,
|
191 |
-
convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
|
192 |
-
else:
|
193 |
-
padded_relative_embeddings = relative_embeddings
|
194 |
-
used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position]
|
195 |
-
return used_relative_embeddings
|
196 |
-
|
197 |
-
def _relative_position_to_absolute_position(self, x):
|
198 |
-
"""
|
199 |
-
x: [b, h, l, 2*l-1]
|
200 |
-
ret: [b, h, l, l]
|
201 |
-
"""
|
202 |
-
batch, heads, length, _ = x.size()
|
203 |
-
# Concat columns of pad to shift from relative to absolute indexing.
|
204 |
-
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
|
205 |
-
|
206 |
-
# Concat extra elements so to add up to shape (len+1, 2*len-1).
|
207 |
-
x_flat = x.view([batch, heads, length * 2 * length])
|
208 |
-
x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]))
|
209 |
-
|
210 |
-
# Reshape and slice out the padded elements.
|
211 |
-
x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1:]
|
212 |
-
return x_final
|
213 |
-
|
214 |
-
def _absolute_position_to_relative_position(self, x):
|
215 |
-
"""
|
216 |
-
x: [b, h, l, l]
|
217 |
-
ret: [b, h, l, 2*l-1]
|
218 |
-
"""
|
219 |
-
batch, heads, length, _ = x.size()
|
220 |
-
# padd along column
|
221 |
-
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]))
|
222 |
-
x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)])
|
223 |
-
# add 0's in the beginning that will skew the elements after reshape
|
224 |
-
x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
|
225 |
-
x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
|
226 |
-
return x_final
|
227 |
-
|
228 |
-
def _attention_bias_proximal(self, length):
|
229 |
-
"""Bias for self-attention to encourage attention to close positions.
|
230 |
-
Args:
|
231 |
-
length: an integer scalar.
|
232 |
-
Returns:
|
233 |
-
a Tensor with shape [1, 1, length, length]
|
234 |
-
"""
|
235 |
-
r = torch.arange(length, dtype=torch.float32)
|
236 |
-
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
|
237 |
-
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
|
238 |
-
|
239 |
-
|
240 |
-
class FFN(nn.Module):
|
241 |
-
def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None):
|
242 |
-
super().__init__()
|
243 |
-
self.in_channels = in_channels
|
244 |
-
self.out_channels = out_channels
|
245 |
-
self.filter_channels = filter_channels
|
246 |
-
self.kernel_size = kernel_size
|
247 |
-
self.p_dropout = p_dropout
|
248 |
-
self.activation = activation
|
249 |
-
|
250 |
-
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
|
251 |
-
self.conv_2 = nn.Conv1d(filter_channels, out_channels, 1)
|
252 |
-
self.drop = nn.Dropout(p_dropout)
|
253 |
-
|
254 |
-
def forward(self, x, x_mask):
|
255 |
-
x = self.conv_1(x * x_mask)
|
256 |
-
if self.activation == "gelu":
|
257 |
-
x = x * torch.sigmoid(1.702 * x)
|
258 |
-
else:
|
259 |
-
x = torch.relu(x)
|
260 |
-
x = self.drop(x)
|
261 |
-
x = self.conv_2(x * x_mask)
|
262 |
-
return x * x_mask
|
263 |
-
|
264 |
-
|
265 |
-
class LayerNorm(nn.Module):
|
266 |
-
def __init__(self, channels, eps=1e-4):
|
267 |
-
super().__init__()
|
268 |
-
self.channels = channels
|
269 |
-
self.eps = eps
|
270 |
-
|
271 |
-
self.gamma = nn.Parameter(torch.ones(channels))
|
272 |
-
self.beta = nn.Parameter(torch.zeros(channels))
|
273 |
-
|
274 |
-
def forward(self, x):
|
275 |
-
n_dims = len(x.shape)
|
276 |
-
mean = torch.mean(x, 1, keepdim=True)
|
277 |
-
variance = torch.mean((x - mean) ** 2, 1, keepdim=True)
|
278 |
-
|
279 |
-
x = (x - mean) * torch.rsqrt(variance + self.eps)
|
280 |
-
|
281 |
-
shape = [1, -1] + [1] * (n_dims - 2)
|
282 |
-
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
|
283 |
-
return x
|
284 |
-
|
285 |
-
|
286 |
-
class ConvReluNorm(nn.Module):
|
287 |
-
def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
|
288 |
-
super().__init__()
|
289 |
-
self.in_channels = in_channels
|
290 |
-
self.hidden_channels = hidden_channels
|
291 |
-
self.out_channels = out_channels
|
292 |
-
self.kernel_size = kernel_size
|
293 |
-
self.n_layers = n_layers
|
294 |
-
self.p_dropout = p_dropout
|
295 |
-
assert n_layers > 1, "Number of layers should be larger than 0."
|
296 |
-
|
297 |
-
self.conv_layers = nn.ModuleList()
|
298 |
-
self.norm_layers = nn.ModuleList()
|
299 |
-
self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
|
300 |
-
self.norm_layers.append(LayerNorm(hidden_channels))
|
301 |
-
self.relu_drop = nn.Sequential(
|
302 |
-
nn.ReLU(),
|
303 |
-
nn.Dropout(p_dropout))
|
304 |
-
for _ in range(n_layers - 1):
|
305 |
-
self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
|
306 |
-
self.norm_layers.append(LayerNorm(hidden_channels))
|
307 |
-
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
|
308 |
-
self.proj.weight.data.zero_()
|
309 |
-
self.proj.bias.data.zero_()
|
310 |
-
|
311 |
-
def forward(self, x, x_mask):
|
312 |
-
x_org = x
|
313 |
-
for i in range(self.n_layers):
|
314 |
-
x = self.conv_layers[i](x * x_mask)
|
315 |
-
x = self.norm_layers[i](x)
|
316 |
-
x = self.relu_drop(x)
|
317 |
-
x = x_org + self.proj(x)
|
318 |
-
return x * x_mask
|
319 |
-
|
320 |
-
|
321 |
-
class RelTransformerEncoder(nn.Module):
|
322 |
-
def __init__(self,
|
323 |
-
n_vocab,
|
324 |
-
out_channels,
|
325 |
-
hidden_channels,
|
326 |
-
filter_channels,
|
327 |
-
n_heads,
|
328 |
-
n_layers,
|
329 |
-
kernel_size,
|
330 |
-
p_dropout=0.0,
|
331 |
-
window_size=4,
|
332 |
-
block_length=None,
|
333 |
-
prenet=True,
|
334 |
-
pre_ln=True,
|
335 |
-
):
|
336 |
-
|
337 |
-
super().__init__()
|
338 |
-
|
339 |
-
self.n_vocab = n_vocab
|
340 |
-
self.out_channels = out_channels
|
341 |
-
self.hidden_channels = hidden_channels
|
342 |
-
self.filter_channels = filter_channels
|
343 |
-
self.n_heads = n_heads
|
344 |
-
self.n_layers = n_layers
|
345 |
-
self.kernel_size = kernel_size
|
346 |
-
self.p_dropout = p_dropout
|
347 |
-
self.window_size = window_size
|
348 |
-
self.block_length = block_length
|
349 |
-
self.prenet = prenet
|
350 |
-
if n_vocab > 0:
|
351 |
-
self.emb = Embedding(n_vocab, hidden_channels, padding_idx=0)
|
352 |
-
|
353 |
-
if prenet:
|
354 |
-
self.pre = ConvReluNorm(hidden_channels, hidden_channels, hidden_channels,
|
355 |
-
kernel_size=5, n_layers=3, p_dropout=0)
|
356 |
-
self.encoder = Encoder(
|
357 |
-
hidden_channels,
|
358 |
-
filter_channels,
|
359 |
-
n_heads,
|
360 |
-
n_layers,
|
361 |
-
kernel_size,
|
362 |
-
p_dropout,
|
363 |
-
window_size=window_size,
|
364 |
-
block_length=block_length,
|
365 |
-
pre_ln=pre_ln,
|
366 |
-
)
|
367 |
-
|
368 |
-
def forward(self, x, x_mask=None):
|
369 |
-
if self.n_vocab > 0:
|
370 |
-
x_lengths = (x > 0).long().sum(-1)
|
371 |
-
x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
|
372 |
-
else:
|
373 |
-
x_lengths = (x.abs().sum(-1) > 0).long().sum(-1)
|
374 |
-
x = torch.transpose(x, 1, -1) # [b, h, t]
|
375 |
-
x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
|
376 |
-
|
377 |
-
if self.prenet:
|
378 |
-
x = self.pre(x, x_mask)
|
379 |
-
x = self.encoder(x, x_mask)
|
380 |
-
return x.transpose(1, 2)
|
381 |
-
|
382 |
-
|
383 |
-
class Pooler(nn.Module):
|
384 |
-
"""
|
385 |
-
Parameter-free poolers to get the sentence embedding
|
386 |
-
'cls': [CLS] representation with BERT/RoBERTa's MLP pooler.
|
387 |
-
'cls_before_pooler': [CLS] representation without the original MLP pooler.
|
388 |
-
'avg': average of the last layers' hidden states at each token.
|
389 |
-
'avg_top2': average of the last two layers.
|
390 |
-
'avg_first_last': average of the first and the last layers.
|
391 |
-
"""
|
392 |
-
def __init__(self, pooler_type):
|
393 |
-
super().__init__()
|
394 |
-
self.pooler_type = pooler_type
|
395 |
-
assert self.pooler_type in ["cls", "cls_before_pooler", "avg", "avg_top2", "avg_first_last"], "unrecognized pooling type %s" % self.pooler_type
|
396 |
-
|
397 |
-
def forward(self, attention_mask, outputs):
|
398 |
-
last_hidden = outputs.last_hidden_state
|
399 |
-
pooler_output = outputs.pooler_output
|
400 |
-
hidden_states = outputs.hidden_states
|
401 |
-
|
402 |
-
if self.pooler_type in ['cls_before_pooler', 'cls']:
|
403 |
-
return last_hidden[:, 0]
|
404 |
-
elif self.pooler_type == "avg":
|
405 |
-
return ((last_hidden * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1))
|
406 |
-
elif self.pooler_type == "avg_first_last":
|
407 |
-
first_hidden = hidden_states[0]
|
408 |
-
last_hidden = hidden_states[-1]
|
409 |
-
pooled_result = ((first_hidden + last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1)
|
410 |
-
return pooled_result
|
411 |
-
elif self.pooler_type == "avg_top2":
|
412 |
-
second_last_hidden = hidden_states[-2]
|
413 |
-
last_hidden = hidden_states[-1]
|
414 |
-
pooled_result = ((last_hidden + second_last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1)
|
415 |
-
return pooled_result
|
416 |
-
else:
|
417 |
-
raise NotImplementedError
|
418 |
-
|
419 |
-
|
420 |
-
class Similarity(nn.Module):
|
421 |
-
"""
|
422 |
-
Dot product or cosine similarity
|
423 |
-
"""
|
424 |
-
|
425 |
-
def __init__(self, temp):
|
426 |
-
super().__init__()
|
427 |
-
self.temp = temp
|
428 |
-
self.cos = nn.CosineSimilarity(dim=-1)
|
429 |
-
self.record = None
|
430 |
-
self.pos_avg = 0.0
|
431 |
-
self.neg_avg = 0.0
|
432 |
-
|
433 |
-
def forward(self, x, y):
|
434 |
-
sim = self.cos(x, y)
|
435 |
-
self.record = sim.detach() # [64,64]
|
436 |
-
min_size = min(self.record.shape[0], self.record.shape[1]) # 64
|
437 |
-
num_item = self.record.shape[0] * self.record.shape[1] # 4096
|
438 |
-
self.pos_avg = self.record.diag().sum() / min_size
|
439 |
-
if num_item - min_size == 0:
|
440 |
-
self.neg_avg = (self.record.sum() - self.record.diag().sum()) / 1
|
441 |
-
return sim / self.temp
|
442 |
-
if torch.any(torch.isnan(self.record)).item() is True:
|
443 |
-
print("we got self.record has nan when compute neg_avg")
|
444 |
-
if torch.any(torch.isnan(self.record.diag())).item() is True:
|
445 |
-
print("we got self.record.diag() has nan when compute neg_avg")
|
446 |
-
self.neg_avg = (self.record.sum() - self.record.diag().sum()) / (num_item - min_size)
|
447 |
-
|
448 |
-
return sim / self.temp
|
449 |
-
|
450 |
-
|
451 |
-
class BertPredictionHeadTransform(nn.Module):
|
452 |
-
def __init__(self, hidden_size):
|
453 |
-
super().__init__()
|
454 |
-
self.dense = nn.Linear(hidden_size, hidden_size)
|
455 |
-
self.transform_act_fn = F.gelu
|
456 |
-
self.LayerNorm = nn.LayerNorm(hidden_size, eps=1e-12)
|
457 |
-
|
458 |
-
def forward(self, hidden_states):
|
459 |
-
hidden_states = self.dense(hidden_states)
|
460 |
-
hidden_states = self.transform_act_fn(hidden_states)
|
461 |
-
hidden_states = self.LayerNorm(hidden_states)
|
462 |
-
return hidden_states
|
463 |
-
|
464 |
-
|
465 |
-
class BertLMPredictionHead(nn.Module):
|
466 |
-
def __init__(self, hid_dim, out_dim):
|
467 |
-
super().__init__()
|
468 |
-
self.transform = BertPredictionHeadTransform(hid_dim)
|
469 |
-
self.decoder = nn.Linear(hid_dim, out_dim, bias=False)
|
470 |
-
self.bias = nn.Parameter(torch.zeros(out_dim))
|
471 |
-
self.decoder.bias = self.bias
|
472 |
-
|
473 |
-
def forward(self, hidden_states):
|
474 |
-
hidden_states = self.transform(hidden_states)
|
475 |
-
hidden_states = self.decoder(hidden_states)
|
476 |
-
return hidden_states
|
477 |
-
|
478 |
-
|
479 |
-
# V2_2
|
480 |
-
# change add to concat.
|
481 |
-
# now support finetune BERT
|
482 |
-
# grad_bert=0.1 & trainable_block_idx=0
|
483 |
-
class BERTRelTransformerEncoder(nn.Module):
|
484 |
-
def __init__(self,
|
485 |
-
n_vocab,
|
486 |
-
out_channels,
|
487 |
-
hidden_channels,
|
488 |
-
filter_channels,
|
489 |
-
n_heads,
|
490 |
-
n_layers,
|
491 |
-
kernel_size,
|
492 |
-
p_dropout=0.0,
|
493 |
-
window_size=4,
|
494 |
-
block_length=None,
|
495 |
-
prenet=True,
|
496 |
-
pre_ln=True,
|
497 |
-
):
|
498 |
-
|
499 |
-
super().__init__()
|
500 |
-
|
501 |
-
self.n_vocab = n_vocab
|
502 |
-
self.out_channels = out_channels
|
503 |
-
self.hidden_channels = hidden_channels
|
504 |
-
self.filter_channels = filter_channels
|
505 |
-
self.n_heads = n_heads
|
506 |
-
self.n_layers = n_layers
|
507 |
-
self.kernel_size = kernel_size
|
508 |
-
self.p_dropout = p_dropout
|
509 |
-
self.window_size = window_size
|
510 |
-
self.block_length = block_length
|
511 |
-
self.prenet = prenet
|
512 |
-
if n_vocab > 0:
|
513 |
-
self.emb = Embedding(n_vocab, hidden_channels, padding_idx=0)
|
514 |
-
|
515 |
-
if prenet:
|
516 |
-
self.pre = ConvReluNorm(hidden_channels, hidden_channels, hidden_channels,
|
517 |
-
kernel_size=5, n_layers=3, p_dropout=0)
|
518 |
-
self.encoder1 = Encoder(
|
519 |
-
hidden_channels,
|
520 |
-
filter_channels,
|
521 |
-
n_heads,
|
522 |
-
n_layers//2,
|
523 |
-
kernel_size,
|
524 |
-
p_dropout,
|
525 |
-
window_size=window_size,
|
526 |
-
block_length=block_length,
|
527 |
-
pre_ln=pre_ln,
|
528 |
-
)
|
529 |
-
|
530 |
-
self.encoder2 = Encoder(
|
531 |
-
hidden_channels,
|
532 |
-
filter_channels,
|
533 |
-
n_heads,
|
534 |
-
n_layers - n_layers//2,
|
535 |
-
kernel_size,
|
536 |
-
p_dropout,
|
537 |
-
window_size=window_size,
|
538 |
-
block_length=block_length,
|
539 |
-
pre_ln=pre_ln,
|
540 |
-
)
|
541 |
-
|
542 |
-
if hparams['ds_name'] in ['ljspeech', 'libritts', 'librispeech']:
|
543 |
-
model_name = 'bert-base-uncased'
|
544 |
-
elif hparams['ds_name'] in ['biaobei', 'wenetspeech']:
|
545 |
-
model_name = 'bert-base-chinese'
|
546 |
-
else:
|
547 |
-
raise NotImplementedError()
|
548 |
-
|
549 |
-
self.tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
|
550 |
-
config = transformers.AutoConfig.from_pretrained(model_name)
|
551 |
-
if hparams.get("load_bert_from_pretrained", True):
|
552 |
-
print("Load BERT from pretrained model ...")
|
553 |
-
self.bert = transformers.AutoModel.from_pretrained(model_name,config=config)
|
554 |
-
trainable_start_block = hparams.get("bert_trainable_start_block", 0)
|
555 |
-
else:
|
556 |
-
print("Initialize BERT from scratch!")
|
557 |
-
self.bert = transformers.BertModel(config=config)
|
558 |
-
trainable_start_block = 0
|
559 |
-
|
560 |
-
for k, v in self.bert.named_parameters():
|
561 |
-
if 'embeddings' in k:
|
562 |
-
v.requires_grad = False
|
563 |
-
elif 'encoder.layer' in k:
|
564 |
-
block_idx = int(k.split(".")[2])
|
565 |
-
if block_idx < trainable_start_block:
|
566 |
-
v.requires_grad = False
|
567 |
-
else:
|
568 |
-
v.requires_grad = True
|
569 |
-
elif 'cls' in k:
|
570 |
-
v.requires_grad = True
|
571 |
-
else:
|
572 |
-
print("Unhandled key: {}, set to requires_grad...".format(k))
|
573 |
-
v.requires_grad = True
|
574 |
-
|
575 |
-
self.bert_combine = nn.Sequential(*[
|
576 |
-
nn.Conv1d(768 + hidden_channels, hidden_channels, 3, 1, 1),
|
577 |
-
nn.ReLU(),
|
578 |
-
])
|
579 |
-
self.pooler = Pooler("avg")
|
580 |
-
self.sim = Similarity(temp=0.05)
|
581 |
-
|
582 |
-
def forward(self, x, x_mask=None, bert_feats=None, ph2word=None, **kwargs):
|
583 |
-
if self.n_vocab > 0:
|
584 |
-
x_lengths = (x > 0).long().sum(-1)
|
585 |
-
x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
|
586 |
-
else:
|
587 |
-
x_lengths = (x.abs().sum(-1) > 0).long().sum(-1)
|
588 |
-
x = torch.transpose(x, 1, -1) # [b, h, t]
|
589 |
-
x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
|
590 |
-
|
591 |
-
if self.prenet:
|
592 |
-
x = self.pre(x, x_mask)
|
593 |
-
x = self.encoder1(x, x_mask)
|
594 |
-
bert_outputs = self.bert(bert_feats['bert_input_ids'],
|
595 |
-
attention_mask=bert_feats['bert_attention_mask'],
|
596 |
-
token_type_ids=bert_feats['bert_token_type_ids'],
|
597 |
-
output_hidden_states=True)
|
598 |
-
bert_num_blocks = hparams.get("bert_num_blocks", 12) # total 1+12blocks in bert
|
599 |
-
bert_embedding = bert_outputs['hidden_states'][bert_num_blocks]
|
600 |
-
# bert_embedding = bert_outputs['last_hidden_state']
|
601 |
-
grad_bert = hparams.get("grad_bert", 0.1)
|
602 |
-
bert_embedding = bert_embedding.detach() * (1-grad_bert) + bert_embedding * grad_bert
|
603 |
-
bert_word_embedding, _ = group_hidden_by_segs(bert_embedding, bert_feats['bert_token2word'], bert_feats['bert_token2word'].max().item())
|
604 |
-
bert_ph_embedding = expand_word2ph(bert_word_embedding, ph2word)
|
605 |
-
bert_ph_embedding = bert_ph_embedding.transpose(1,2)
|
606 |
-
x = torch.cat([x, bert_ph_embedding], dim=1)
|
607 |
-
x = self.bert_combine(x)
|
608 |
-
x = self.encoder2(x, x_mask)
|
609 |
-
return x.transpose(1, 2)
|
610 |
-
|
611 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/spinner/Spinner.d.ts
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import Base from '../base/Base';
|
2 |
-
export default class Spinner extends Base { }
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/menu/methods/CreateBackground.js
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
var CreateBackground = function (scene, items, callback, scope) {
|
2 |
-
var background;
|
3 |
-
if (callback) {
|
4 |
-
items.scene = scene;
|
5 |
-
if (scope) {
|
6 |
-
background = callback.call(scope, items);
|
7 |
-
} else {
|
8 |
-
background = callback(items);
|
9 |
-
}
|
10 |
-
items.scene = undefined;
|
11 |
-
}
|
12 |
-
|
13 |
-
return background;
|
14 |
-
}
|
15 |
-
|
16 |
-
export default CreateBackground;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AiMimicry/sovits-models/vdecoder/hifigan/nvSTFT.py
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import os
|
3 |
-
os.environ["LRU_CACHE_CAPACITY"] = "3"
|
4 |
-
import random
|
5 |
-
import torch
|
6 |
-
import torch.utils.data
|
7 |
-
import numpy as np
|
8 |
-
import librosa
|
9 |
-
from librosa.util import normalize
|
10 |
-
from librosa.filters import mel as librosa_mel_fn
|
11 |
-
from scipy.io.wavfile import read
|
12 |
-
import soundfile as sf
|
13 |
-
|
14 |
-
def load_wav_to_torch(full_path, target_sr=None, return_empty_on_exception=False):
|
15 |
-
sampling_rate = None
|
16 |
-
try:
|
17 |
-
data, sampling_rate = sf.read(full_path, always_2d=True)# than soundfile.
|
18 |
-
except Exception as ex:
|
19 |
-
print(f"'{full_path}' failed to load.\nException:")
|
20 |
-
print(ex)
|
21 |
-
if return_empty_on_exception:
|
22 |
-
return [], sampling_rate or target_sr or 32000
|
23 |
-
else:
|
24 |
-
raise Exception(ex)
|
25 |
-
|
26 |
-
if len(data.shape) > 1:
|
27 |
-
data = data[:, 0]
|
28 |
-
assert len(data) > 2# check duration of audio file is > 2 samples (because otherwise the slice operation was on the wrong dimension)
|
29 |
-
|
30 |
-
if np.issubdtype(data.dtype, np.integer): # if audio data is type int
|
31 |
-
max_mag = -np.iinfo(data.dtype).min # maximum magnitude = min possible value of intXX
|
32 |
-
else: # if audio data is type fp32
|
33 |
-
max_mag = max(np.amax(data), -np.amin(data))
|
34 |
-
max_mag = (2**31)+1 if max_mag > (2**15) else ((2**15)+1 if max_mag > 1.01 else 1.0) # data should be either 16-bit INT, 32-bit INT or [-1 to 1] float32
|
35 |
-
|
36 |
-
data = torch.FloatTensor(data.astype(np.float32))/max_mag
|
37 |
-
|
38 |
-
if (torch.isinf(data) | torch.isnan(data)).any() and return_empty_on_exception:# resample will crash with inf/NaN inputs. return_empty_on_exception will return empty arr instead of except
|
39 |
-
return [], sampling_rate or target_sr or 32000
|
40 |
-
if target_sr is not None and sampling_rate != target_sr:
|
41 |
-
data = torch.from_numpy(librosa.core.resample(data.numpy(), orig_sr=sampling_rate, target_sr=target_sr))
|
42 |
-
sampling_rate = target_sr
|
43 |
-
|
44 |
-
return data, sampling_rate
|
45 |
-
|
46 |
-
def dynamic_range_compression(x, C=1, clip_val=1e-5):
|
47 |
-
return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
|
48 |
-
|
49 |
-
def dynamic_range_decompression(x, C=1):
|
50 |
-
return np.exp(x) / C
|
51 |
-
|
52 |
-
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
|
53 |
-
return torch.log(torch.clamp(x, min=clip_val) * C)
|
54 |
-
|
55 |
-
def dynamic_range_decompression_torch(x, C=1):
|
56 |
-
return torch.exp(x) / C
|
57 |
-
|
58 |
-
class STFT():
|
59 |
-
def __init__(self, sr=22050, n_mels=80, n_fft=1024, win_size=1024, hop_length=256, fmin=20, fmax=11025, clip_val=1e-5):
|
60 |
-
self.target_sr = sr
|
61 |
-
|
62 |
-
self.n_mels = n_mels
|
63 |
-
self.n_fft = n_fft
|
64 |
-
self.win_size = win_size
|
65 |
-
self.hop_length = hop_length
|
66 |
-
self.fmin = fmin
|
67 |
-
self.fmax = fmax
|
68 |
-
self.clip_val = clip_val
|
69 |
-
self.mel_basis = {}
|
70 |
-
self.hann_window = {}
|
71 |
-
|
72 |
-
def get_mel(self, y, center=False):
|
73 |
-
sampling_rate = self.target_sr
|
74 |
-
n_mels = self.n_mels
|
75 |
-
n_fft = self.n_fft
|
76 |
-
win_size = self.win_size
|
77 |
-
hop_length = self.hop_length
|
78 |
-
fmin = self.fmin
|
79 |
-
fmax = self.fmax
|
80 |
-
clip_val = self.clip_val
|
81 |
-
|
82 |
-
if torch.min(y) < -1.:
|
83 |
-
print('min value is ', torch.min(y))
|
84 |
-
if torch.max(y) > 1.:
|
85 |
-
print('max value is ', torch.max(y))
|
86 |
-
|
87 |
-
if fmax not in self.mel_basis:
|
88 |
-
mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax)
|
89 |
-
self.mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device)
|
90 |
-
self.hann_window[str(y.device)] = torch.hann_window(self.win_size).to(y.device)
|
91 |
-
|
92 |
-
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_length)/2), int((n_fft-hop_length)/2)), mode='reflect')
|
93 |
-
y = y.squeeze(1)
|
94 |
-
|
95 |
-
spec = torch.stft(y, n_fft, hop_length=hop_length, win_length=win_size, window=self.hann_window[str(y.device)],
|
96 |
-
center=center, pad_mode='reflect', normalized=False, onesided=True)
|
97 |
-
# print(111,spec)
|
98 |
-
spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))
|
99 |
-
# print(222,spec)
|
100 |
-
spec = torch.matmul(self.mel_basis[str(fmax)+'_'+str(y.device)], spec)
|
101 |
-
# print(333,spec)
|
102 |
-
spec = dynamic_range_compression_torch(spec, clip_val=clip_val)
|
103 |
-
# print(444,spec)
|
104 |
-
return spec
|
105 |
-
|
106 |
-
def __call__(self, audiopath):
|
107 |
-
audio, sr = load_wav_to_torch(audiopath, target_sr=self.target_sr)
|
108 |
-
spect = self.get_mel(audio.unsqueeze(0)).squeeze(0)
|
109 |
-
return spect
|
110 |
-
|
111 |
-
stft = STFT()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Akmyradov/TurkmenTTSweSTT/vits/commons.py
DELETED
@@ -1,161 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import numpy as np
|
3 |
-
import torch
|
4 |
-
from torch import nn
|
5 |
-
from torch.nn import functional as F
|
6 |
-
|
7 |
-
|
8 |
-
def init_weights(m, mean=0.0, std=0.01):
|
9 |
-
classname = m.__class__.__name__
|
10 |
-
if classname.find("Conv") != -1:
|
11 |
-
m.weight.data.normal_(mean, std)
|
12 |
-
|
13 |
-
|
14 |
-
def get_padding(kernel_size, dilation=1):
|
15 |
-
return int((kernel_size*dilation - dilation)/2)
|
16 |
-
|
17 |
-
|
18 |
-
def convert_pad_shape(pad_shape):
|
19 |
-
l = pad_shape[::-1]
|
20 |
-
pad_shape = [item for sublist in l for item in sublist]
|
21 |
-
return pad_shape
|
22 |
-
|
23 |
-
|
24 |
-
def intersperse(lst, item):
|
25 |
-
result = [item] * (len(lst) * 2 + 1)
|
26 |
-
result[1::2] = lst
|
27 |
-
return result
|
28 |
-
|
29 |
-
|
30 |
-
def kl_divergence(m_p, logs_p, m_q, logs_q):
|
31 |
-
"""KL(P||Q)"""
|
32 |
-
kl = (logs_q - logs_p) - 0.5
|
33 |
-
kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
|
34 |
-
return kl
|
35 |
-
|
36 |
-
|
37 |
-
def rand_gumbel(shape):
|
38 |
-
"""Sample from the Gumbel distribution, protect from overflows."""
|
39 |
-
uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
|
40 |
-
return -torch.log(-torch.log(uniform_samples))
|
41 |
-
|
42 |
-
|
43 |
-
def rand_gumbel_like(x):
|
44 |
-
g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
|
45 |
-
return g
|
46 |
-
|
47 |
-
|
48 |
-
def slice_segments(x, ids_str, segment_size=4):
|
49 |
-
ret = torch.zeros_like(x[:, :, :segment_size])
|
50 |
-
for i in range(x.size(0)):
|
51 |
-
idx_str = ids_str[i]
|
52 |
-
idx_end = idx_str + segment_size
|
53 |
-
ret[i] = x[i, :, idx_str:idx_end]
|
54 |
-
return ret
|
55 |
-
|
56 |
-
|
57 |
-
def rand_slice_segments(x, x_lengths=None, segment_size=4):
|
58 |
-
b, d, t = x.size()
|
59 |
-
if x_lengths is None:
|
60 |
-
x_lengths = t
|
61 |
-
ids_str_max = x_lengths - segment_size + 1
|
62 |
-
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
|
63 |
-
ret = slice_segments(x, ids_str, segment_size)
|
64 |
-
return ret, ids_str
|
65 |
-
|
66 |
-
|
67 |
-
def get_timing_signal_1d(
|
68 |
-
length, channels, min_timescale=1.0, max_timescale=1.0e4):
|
69 |
-
position = torch.arange(length, dtype=torch.float)
|
70 |
-
num_timescales = channels // 2
|
71 |
-
log_timescale_increment = (
|
72 |
-
math.log(float(max_timescale) / float(min_timescale)) /
|
73 |
-
(num_timescales - 1))
|
74 |
-
inv_timescales = min_timescale * torch.exp(
|
75 |
-
torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
|
76 |
-
scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
|
77 |
-
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
|
78 |
-
signal = F.pad(signal, [0, 0, 0, channels % 2])
|
79 |
-
signal = signal.view(1, channels, length)
|
80 |
-
return signal
|
81 |
-
|
82 |
-
|
83 |
-
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
|
84 |
-
b, channels, length = x.size()
|
85 |
-
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
86 |
-
return x + signal.to(dtype=x.dtype, device=x.device)
|
87 |
-
|
88 |
-
|
89 |
-
def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
|
90 |
-
b, channels, length = x.size()
|
91 |
-
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
92 |
-
return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
|
93 |
-
|
94 |
-
|
95 |
-
def subsequent_mask(length):
|
96 |
-
mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
|
97 |
-
return mask
|
98 |
-
|
99 |
-
|
100 |
-
@torch.jit.script
|
101 |
-
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
|
102 |
-
n_channels_int = n_channels[0]
|
103 |
-
in_act = input_a + input_b
|
104 |
-
t_act = torch.tanh(in_act[:, :n_channels_int, :])
|
105 |
-
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
|
106 |
-
acts = t_act * s_act
|
107 |
-
return acts
|
108 |
-
|
109 |
-
|
110 |
-
def convert_pad_shape(pad_shape):
|
111 |
-
l = pad_shape[::-1]
|
112 |
-
pad_shape = [item for sublist in l for item in sublist]
|
113 |
-
return pad_shape
|
114 |
-
|
115 |
-
|
116 |
-
def shift_1d(x):
|
117 |
-
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
|
118 |
-
return x
|
119 |
-
|
120 |
-
|
121 |
-
def sequence_mask(length, max_length=None):
|
122 |
-
if max_length is None:
|
123 |
-
max_length = length.max()
|
124 |
-
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
|
125 |
-
return x.unsqueeze(0) < length.unsqueeze(1)
|
126 |
-
|
127 |
-
|
128 |
-
def generate_path(duration, mask):
|
129 |
-
"""
|
130 |
-
duration: [b, 1, t_x]
|
131 |
-
mask: [b, 1, t_y, t_x]
|
132 |
-
"""
|
133 |
-
device = duration.device
|
134 |
-
|
135 |
-
b, _, t_y, t_x = mask.shape
|
136 |
-
cum_duration = torch.cumsum(duration, -1)
|
137 |
-
|
138 |
-
cum_duration_flat = cum_duration.view(b * t_x)
|
139 |
-
path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
|
140 |
-
path = path.view(b, t_x, t_y)
|
141 |
-
path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
|
142 |
-
path = path.unsqueeze(1).transpose(2,3) * mask
|
143 |
-
return path
|
144 |
-
|
145 |
-
|
146 |
-
def clip_grad_value_(parameters, clip_value, norm_type=2):
|
147 |
-
if isinstance(parameters, torch.Tensor):
|
148 |
-
parameters = [parameters]
|
149 |
-
parameters = list(filter(lambda p: p.grad is not None, parameters))
|
150 |
-
norm_type = float(norm_type)
|
151 |
-
if clip_value is not None:
|
152 |
-
clip_value = float(clip_value)
|
153 |
-
|
154 |
-
total_norm = 0
|
155 |
-
for p in parameters:
|
156 |
-
param_norm = p.grad.data.norm(norm_type)
|
157 |
-
total_norm += param_norm.item() ** norm_type
|
158 |
-
if clip_value is not None:
|
159 |
-
p.grad.data.clamp_(min=-clip_value, max=clip_value)
|
160 |
-
total_norm = total_norm ** (1. / norm_type)
|
161 |
-
return total_norm
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alpaca233/SadTalker/src/utils/safetensor_helper.py
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
def load_x_from_safetensor(checkpoint, key):
|
4 |
-
x_generator = {}
|
5 |
-
for k,v in checkpoint.items():
|
6 |
-
if key in k:
|
7 |
-
x_generator[k.replace(key+'.', '')] = v
|
8 |
-
return x_generator
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/cpp/longcode/jpgd.cpp
DELETED
@@ -1,3276 +0,0 @@
|
|
1 |
-
// jpgd.cpp - C++ class for JPEG decompression.
|
2 |
-
// Public domain, Rich Geldreich <[email protected]>
|
3 |
-
// Last updated Apr. 16, 2011
|
4 |
-
// Alex Evans: Linear memory allocator (taken from jpge.h).
|
5 |
-
//
|
6 |
-
// Supports progressive and baseline sequential JPEG image files, and the most common chroma subsampling factors: Y, H1V1, H2V1, H1V2, and H2V2.
|
7 |
-
//
|
8 |
-
// Chroma upsampling quality: H2V2 is upsampled in the frequency domain, H2V1 and H1V2 are upsampled using point sampling.
|
9 |
-
// Chroma upsampling reference: "Fast Scheme for Image Size Change in the Compressed Domain"
|
10 |
-
// http://vision.ai.uiuc.edu/~dugad/research/dct/index.html
|
11 |
-
|
12 |
-
#include "jpgd.h"
|
13 |
-
#include <string.h>
|
14 |
-
|
15 |
-
#include <assert.h>
|
16 |
-
// BEGIN EPIC MOD
|
17 |
-
#define JPGD_ASSERT(x) { assert(x); CA_ASSUME(x); } (void)0
|
18 |
-
// END EPIC MOD
|
19 |
-
|
20 |
-
#ifdef _MSC_VER
|
21 |
-
#pragma warning (disable : 4611) // warning C4611: interaction between '_setjmp' and C++ object destruction is non-portable
|
22 |
-
#endif
|
23 |
-
|
24 |
-
// Set to 1 to enable freq. domain chroma upsampling on images using H2V2 subsampling (0=faster nearest neighbor sampling).
|
25 |
-
// This is slower, but results in higher quality on images with highly saturated colors.
|
26 |
-
#define JPGD_SUPPORT_FREQ_DOMAIN_UPSAMPLING 1
|
27 |
-
|
28 |
-
#define JPGD_TRUE (1)
|
29 |
-
#define JPGD_FALSE (0)
|
30 |
-
|
31 |
-
#define JPGD_MAX(a,b) (((a)>(b)) ? (a) : (b))
|
32 |
-
#define JPGD_MIN(a,b) (((a)<(b)) ? (a) : (b))
|
33 |
-
|
34 |
-
namespace jpgd {
|
35 |
-
|
36 |
-
static inline void *jpgd_malloc(size_t nSize) { return FMemory::Malloc(nSize); }
|
37 |
-
static inline void jpgd_free(void *p) { FMemory::Free(p); }
|
38 |
-
|
39 |
-
// BEGIN EPIC MOD
|
40 |
-
//@UE3 - use UE3 BGRA encoding instead of assuming RGBA
|
41 |
-
// stolen from IImageWrapper.h
|
42 |
-
enum ERGBFormatJPG
|
43 |
-
{
|
44 |
-
Invalid = -1,
|
45 |
-
RGBA = 0,
|
46 |
-
BGRA = 1,
|
47 |
-
Gray = 2,
|
48 |
-
};
|
49 |
-
static ERGBFormatJPG jpg_format;
|
50 |
-
// END EPIC MOD
|
51 |
-
|
52 |
-
// DCT coefficients are stored in this sequence.
|
53 |
-
static int g_ZAG[64] = { 0,1,8,16,9,2,3,10,17,24,32,25,18,11,4,5,12,19,26,33,40,48,41,34,27,20,13,6,7,14,21,28,35,42,49,56,57,50,43,36,29,22,15,23,30,37,44,51,58,59,52,45,38,31,39,46,53,60,61,54,47,55,62,63 };
|
54 |
-
|
55 |
-
enum JPEG_MARKER
|
56 |
-
{
|
57 |
-
M_SOF0 = 0xC0, M_SOF1 = 0xC1, M_SOF2 = 0xC2, M_SOF3 = 0xC3, M_SOF5 = 0xC5, M_SOF6 = 0xC6, M_SOF7 = 0xC7, M_JPG = 0xC8,
|
58 |
-
M_SOF9 = 0xC9, M_SOF10 = 0xCA, M_SOF11 = 0xCB, M_SOF13 = 0xCD, M_SOF14 = 0xCE, M_SOF15 = 0xCF, M_DHT = 0xC4, M_DAC = 0xCC,
|
59 |
-
M_RST0 = 0xD0, M_RST1 = 0xD1, M_RST2 = 0xD2, M_RST3 = 0xD3, M_RST4 = 0xD4, M_RST5 = 0xD5, M_RST6 = 0xD6, M_RST7 = 0xD7,
|
60 |
-
M_SOI = 0xD8, M_EOI = 0xD9, M_SOS = 0xDA, M_DQT = 0xDB, M_DNL = 0xDC, M_DRI = 0xDD, M_DHP = 0xDE, M_EXP = 0xDF,
|
61 |
-
M_APP0 = 0xE0, M_APP15 = 0xEF, M_JPG0 = 0xF0, M_JPG13 = 0xFD, M_COM = 0xFE, M_TEM = 0x01, M_ERROR = 0x100, RST0 = 0xD0
|
62 |
-
};
|
63 |
-
|
64 |
-
enum JPEG_SUBSAMPLING { JPGD_GRAYSCALE = 0, JPGD_YH1V1, JPGD_YH2V1, JPGD_YH1V2, JPGD_YH2V2 };
|
65 |
-
|
66 |
-
#define CONST_BITS 13
|
67 |
-
#define PASS1_BITS 2
|
68 |
-
#define SCALEDONE ((int32)1)
|
69 |
-
|
70 |
-
#define FIX_0_298631336 ((int32)2446) /* FIX(0.298631336) */
|
71 |
-
#define FIX_0_390180644 ((int32)3196) /* FIX(0.390180644) */
|
72 |
-
#define FIX_0_541196100 ((int32)4433) /* FIX(0.541196100) */
|
73 |
-
#define FIX_0_765366865 ((int32)6270) /* FIX(0.765366865) */
|
74 |
-
#define FIX_0_899976223 ((int32)7373) /* FIX(0.899976223) */
|
75 |
-
#define FIX_1_175875602 ((int32)9633) /* FIX(1.175875602) */
|
76 |
-
#define FIX_1_501321110 ((int32)12299) /* FIX(1.501321110) */
|
77 |
-
#define FIX_1_847759065 ((int32)15137) /* FIX(1.847759065) */
|
78 |
-
#define FIX_1_961570560 ((int32)16069) /* FIX(1.961570560) */
|
79 |
-
#define FIX_2_053119869 ((int32)16819) /* FIX(2.053119869) */
|
80 |
-
#define FIX_2_562915447 ((int32)20995) /* FIX(2.562915447) */
|
81 |
-
#define FIX_3_072711026 ((int32)25172) /* FIX(3.072711026) */
|
82 |
-
|
83 |
-
#define DESCALE(x,n) (((x) + (SCALEDONE << ((n)-1))) >> (n))
|
84 |
-
#define DESCALE_ZEROSHIFT(x,n) (((x) + (128 << (n)) + (SCALEDONE << ((n)-1))) >> (n))
|
85 |
-
|
86 |
-
#define MULTIPLY(var, cnst) ((var) * (cnst))
|
87 |
-
|
88 |
-
#define CLAMP(i) ((static_cast<uint>(i) > 255) ? (((~i) >> 31) & 0xFF) : (i))
|
89 |
-
|
90 |
-
// Compiler creates a fast path 1D IDCT for X non-zero columns
|
91 |
-
template <int NONZERO_COLS>
|
92 |
-
struct Row
|
93 |
-
{
|
94 |
-
static void idct(int* pTemp, const jpgd_block_t* pSrc)
|
95 |
-
{
|
96 |
-
// ACCESS_COL() will be optimized at compile time to either an array access, or 0.
|
97 |
-
#define ACCESS_COL(x) (((x) < NONZERO_COLS) ? (int)pSrc[x] : 0)
|
98 |
-
|
99 |
-
const int z2 = ACCESS_COL(2), z3 = ACCESS_COL(6);
|
100 |
-
|
101 |
-
const int z1 = MULTIPLY(z2 + z3, FIX_0_541196100);
|
102 |
-
const int tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065);
|
103 |
-
const int tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865);
|
104 |
-
|
105 |
-
const int tmp0 = (ACCESS_COL(0) + ACCESS_COL(4)) << CONST_BITS;
|
106 |
-
const int tmp1 = (ACCESS_COL(0) - ACCESS_COL(4)) << CONST_BITS;
|
107 |
-
|
108 |
-
const int tmp10 = tmp0 + tmp3, tmp13 = tmp0 - tmp3, tmp11 = tmp1 + tmp2, tmp12 = tmp1 - tmp2;
|
109 |
-
|
110 |
-
const int atmp0 = ACCESS_COL(7), atmp1 = ACCESS_COL(5), atmp2 = ACCESS_COL(3), atmp3 = ACCESS_COL(1);
|
111 |
-
|
112 |
-
const int bz1 = atmp0 + atmp3, bz2 = atmp1 + atmp2, bz3 = atmp0 + atmp2, bz4 = atmp1 + atmp3;
|
113 |
-
const int bz5 = MULTIPLY(bz3 + bz4, FIX_1_175875602);
|
114 |
-
|
115 |
-
const int az1 = MULTIPLY(bz1, - FIX_0_899976223);
|
116 |
-
const int az2 = MULTIPLY(bz2, - FIX_2_562915447);
|
117 |
-
const int az3 = MULTIPLY(bz3, - FIX_1_961570560) + bz5;
|
118 |
-
const int az4 = MULTIPLY(bz4, - FIX_0_390180644) + bz5;
|
119 |
-
|
120 |
-
const int btmp0 = MULTIPLY(atmp0, FIX_0_298631336) + az1 + az3;
|
121 |
-
const int btmp1 = MULTIPLY(atmp1, FIX_2_053119869) + az2 + az4;
|
122 |
-
const int btmp2 = MULTIPLY(atmp2, FIX_3_072711026) + az2 + az3;
|
123 |
-
const int btmp3 = MULTIPLY(atmp3, FIX_1_501321110) + az1 + az4;
|
124 |
-
|
125 |
-
pTemp[0] = DESCALE(tmp10 + btmp3, CONST_BITS-PASS1_BITS);
|
126 |
-
pTemp[7] = DESCALE(tmp10 - btmp3, CONST_BITS-PASS1_BITS);
|
127 |
-
pTemp[1] = DESCALE(tmp11 + btmp2, CONST_BITS-PASS1_BITS);
|
128 |
-
pTemp[6] = DESCALE(tmp11 - btmp2, CONST_BITS-PASS1_BITS);
|
129 |
-
pTemp[2] = DESCALE(tmp12 + btmp1, CONST_BITS-PASS1_BITS);
|
130 |
-
pTemp[5] = DESCALE(tmp12 - btmp1, CONST_BITS-PASS1_BITS);
|
131 |
-
pTemp[3] = DESCALE(tmp13 + btmp0, CONST_BITS-PASS1_BITS);
|
132 |
-
pTemp[4] = DESCALE(tmp13 - btmp0, CONST_BITS-PASS1_BITS);
|
133 |
-
}
|
134 |
-
};
|
135 |
-
|
136 |
-
template <>
|
137 |
-
struct Row<0>
|
138 |
-
{
|
139 |
-
static void idct(int* pTemp, const jpgd_block_t* pSrc)
|
140 |
-
{
|
141 |
-
#ifdef _MSC_VER
|
142 |
-
pTemp; pSrc;
|
143 |
-
#endif
|
144 |
-
}
|
145 |
-
};
|
146 |
-
|
147 |
-
template <>
|
148 |
-
struct Row<1>
|
149 |
-
{
|
150 |
-
static void idct(int* pTemp, const jpgd_block_t* pSrc)
|
151 |
-
{
|
152 |
-
const int dcval = (pSrc[0] << PASS1_BITS);
|
153 |
-
|
154 |
-
pTemp[0] = dcval;
|
155 |
-
pTemp[1] = dcval;
|
156 |
-
pTemp[2] = dcval;
|
157 |
-
pTemp[3] = dcval;
|
158 |
-
pTemp[4] = dcval;
|
159 |
-
pTemp[5] = dcval;
|
160 |
-
pTemp[6] = dcval;
|
161 |
-
pTemp[7] = dcval;
|
162 |
-
}
|
163 |
-
};
|
164 |
-
|
165 |
-
// Compiler creates a fast path 1D IDCT for X non-zero rows
|
166 |
-
template <int NONZERO_ROWS>
|
167 |
-
struct Col
|
168 |
-
{
|
169 |
-
static void idct(uint8* pDst_ptr, const int* pTemp)
|
170 |
-
{
|
171 |
-
// ACCESS_ROW() will be optimized at compile time to either an array access, or 0.
|
172 |
-
#define ACCESS_ROW(x) (((x) < NONZERO_ROWS) ? pTemp[x * 8] : 0)
|
173 |
-
|
174 |
-
const int z2 = ACCESS_ROW(2);
|
175 |
-
const int z3 = ACCESS_ROW(6);
|
176 |
-
|
177 |
-
const int z1 = MULTIPLY(z2 + z3, FIX_0_541196100);
|
178 |
-
const int tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065);
|
179 |
-
const int tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865);
|
180 |
-
|
181 |
-
const int tmp0 = (ACCESS_ROW(0) + ACCESS_ROW(4)) << CONST_BITS;
|
182 |
-
const int tmp1 = (ACCESS_ROW(0) - ACCESS_ROW(4)) << CONST_BITS;
|
183 |
-
|
184 |
-
const int tmp10 = tmp0 + tmp3, tmp13 = tmp0 - tmp3, tmp11 = tmp1 + tmp2, tmp12 = tmp1 - tmp2;
|
185 |
-
|
186 |
-
const int atmp0 = ACCESS_ROW(7), atmp1 = ACCESS_ROW(5), atmp2 = ACCESS_ROW(3), atmp3 = ACCESS_ROW(1);
|
187 |
-
|
188 |
-
const int bz1 = atmp0 + atmp3, bz2 = atmp1 + atmp2, bz3 = atmp0 + atmp2, bz4 = atmp1 + atmp3;
|
189 |
-
const int bz5 = MULTIPLY(bz3 + bz4, FIX_1_175875602);
|
190 |
-
|
191 |
-
const int az1 = MULTIPLY(bz1, - FIX_0_899976223);
|
192 |
-
const int az2 = MULTIPLY(bz2, - FIX_2_562915447);
|
193 |
-
const int az3 = MULTIPLY(bz3, - FIX_1_961570560) + bz5;
|
194 |
-
const int az4 = MULTIPLY(bz4, - FIX_0_390180644) + bz5;
|
195 |
-
|
196 |
-
const int btmp0 = MULTIPLY(atmp0, FIX_0_298631336) + az1 + az3;
|
197 |
-
const int btmp1 = MULTIPLY(atmp1, FIX_2_053119869) + az2 + az4;
|
198 |
-
const int btmp2 = MULTIPLY(atmp2, FIX_3_072711026) + az2 + az3;
|
199 |
-
const int btmp3 = MULTIPLY(atmp3, FIX_1_501321110) + az1 + az4;
|
200 |
-
|
201 |
-
int i = DESCALE_ZEROSHIFT(tmp10 + btmp3, CONST_BITS+PASS1_BITS+3);
|
202 |
-
pDst_ptr[8*0] = (uint8)CLAMP(i);
|
203 |
-
|
204 |
-
i = DESCALE_ZEROSHIFT(tmp10 - btmp3, CONST_BITS+PASS1_BITS+3);
|
205 |
-
pDst_ptr[8*7] = (uint8)CLAMP(i);
|
206 |
-
|
207 |
-
i = DESCALE_ZEROSHIFT(tmp11 + btmp2, CONST_BITS+PASS1_BITS+3);
|
208 |
-
pDst_ptr[8*1] = (uint8)CLAMP(i);
|
209 |
-
|
210 |
-
i = DESCALE_ZEROSHIFT(tmp11 - btmp2, CONST_BITS+PASS1_BITS+3);
|
211 |
-
pDst_ptr[8*6] = (uint8)CLAMP(i);
|
212 |
-
|
213 |
-
i = DESCALE_ZEROSHIFT(tmp12 + btmp1, CONST_BITS+PASS1_BITS+3);
|
214 |
-
pDst_ptr[8*2] = (uint8)CLAMP(i);
|
215 |
-
|
216 |
-
i = DESCALE_ZEROSHIFT(tmp12 - btmp1, CONST_BITS+PASS1_BITS+3);
|
217 |
-
pDst_ptr[8*5] = (uint8)CLAMP(i);
|
218 |
-
|
219 |
-
i = DESCALE_ZEROSHIFT(tmp13 + btmp0, CONST_BITS+PASS1_BITS+3);
|
220 |
-
pDst_ptr[8*3] = (uint8)CLAMP(i);
|
221 |
-
|
222 |
-
i = DESCALE_ZEROSHIFT(tmp13 - btmp0, CONST_BITS+PASS1_BITS+3);
|
223 |
-
pDst_ptr[8*4] = (uint8)CLAMP(i);
|
224 |
-
}
|
225 |
-
};
|
226 |
-
|
227 |
-
template <>
|
228 |
-
struct Col<1>
|
229 |
-
{
|
230 |
-
static void idct(uint8* pDst_ptr, const int* pTemp)
|
231 |
-
{
|
232 |
-
int dcval = DESCALE_ZEROSHIFT(pTemp[0], PASS1_BITS+3);
|
233 |
-
const uint8 dcval_clamped = (uint8)CLAMP(dcval);
|
234 |
-
pDst_ptr[0*8] = dcval_clamped;
|
235 |
-
pDst_ptr[1*8] = dcval_clamped;
|
236 |
-
pDst_ptr[2*8] = dcval_clamped;
|
237 |
-
pDst_ptr[3*8] = dcval_clamped;
|
238 |
-
pDst_ptr[4*8] = dcval_clamped;
|
239 |
-
pDst_ptr[5*8] = dcval_clamped;
|
240 |
-
pDst_ptr[6*8] = dcval_clamped;
|
241 |
-
pDst_ptr[7*8] = dcval_clamped;
|
242 |
-
}
|
243 |
-
};
|
244 |
-
|
245 |
-
static const uint8 s_idct_row_table[] =
|
246 |
-
{
|
247 |
-
1,0,0,0,0,0,0,0, 2,0,0,0,0,0,0,0, 2,1,0,0,0,0,0,0, 2,1,1,0,0,0,0,0, 2,2,1,0,0,0,0,0, 3,2,1,0,0,0,0,0, 4,2,1,0,0,0,0,0, 4,3,1,0,0,0,0,0,
|
248 |
-
4,3,2,0,0,0,0,0, 4,3,2,1,0,0,0,0, 4,3,2,1,1,0,0,0, 4,3,2,2,1,0,0,0, 4,3,3,2,1,0,0,0, 4,4,3,2,1,0,0,0, 5,4,3,2,1,0,0,0, 6,4,3,2,1,0,0,0,
|
249 |
-
6,5,3,2,1,0,0,0, 6,5,4,2,1,0,0,0, 6,5,4,3,1,0,0,0, 6,5,4,3,2,0,0,0, 6,5,4,3,2,1,0,0, 6,5,4,3,2,1,1,0, 6,5,4,3,2,2,1,0, 6,5,4,3,3,2,1,0,
|
250 |
-
6,5,4,4,3,2,1,0, 6,5,5,4,3,2,1,0, 6,6,5,4,3,2,1,0, 7,6,5,4,3,2,1,0, 8,6,5,4,3,2,1,0, 8,7,5,4,3,2,1,0, 8,7,6,4,3,2,1,0, 8,7,6,5,3,2,1,0,
|
251 |
-
8,7,6,5,4,2,1,0, 8,7,6,5,4,3,1,0, 8,7,6,5,4,3,2,0, 8,7,6,5,4,3,2,1, 8,7,6,5,4,3,2,2, 8,7,6,5,4,3,3,2, 8,7,6,5,4,4,3,2, 8,7,6,5,5,4,3,2,
|
252 |
-
8,7,6,6,5,4,3,2, 8,7,7,6,5,4,3,2, 8,8,7,6,5,4,3,2, 8,8,8,6,5,4,3,2, 8,8,8,7,5,4,3,2, 8,8,8,7,6,4,3,2, 8,8,8,7,6,5,3,2, 8,8,8,7,6,5,4,2,
|
253 |
-
8,8,8,7,6,5,4,3, 8,8,8,7,6,5,4,4, 8,8,8,7,6,5,5,4, 8,8,8,7,6,6,5,4, 8,8,8,7,7,6,5,4, 8,8,8,8,7,6,5,4, 8,8,8,8,8,6,5,4, 8,8,8,8,8,7,5,4,
|
254 |
-
8,8,8,8,8,7,6,4, 8,8,8,8,8,7,6,5, 8,8,8,8,8,7,6,6, 8,8,8,8,8,7,7,6, 8,8,8,8,8,8,7,6, 8,8,8,8,8,8,8,6, 8,8,8,8,8,8,8,7, 8,8,8,8,8,8,8,8,
|
255 |
-
};
|
256 |
-
|
257 |
-
static const uint8 s_idct_col_table[] = { 1, 1, 2, 3, 3, 3, 3, 3, 3, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8 };
|
258 |
-
|
259 |
-
void idct(const jpgd_block_t* pSrc_ptr, uint8* pDst_ptr, int block_max_zag)
|
260 |
-
{
|
261 |
-
JPGD_ASSERT(block_max_zag >= 1);
|
262 |
-
JPGD_ASSERT(block_max_zag <= 64);
|
263 |
-
|
264 |
-
if (block_max_zag == 1)
|
265 |
-
{
|
266 |
-
int k = ((pSrc_ptr[0] + 4) >> 3) + 128;
|
267 |
-
k = CLAMP(k);
|
268 |
-
k = k | (k<<8);
|
269 |
-
k = k | (k<<16);
|
270 |
-
|
271 |
-
for (int i = 8; i > 0; i--)
|
272 |
-
{
|
273 |
-
*(int*)&pDst_ptr[0] = k;
|
274 |
-
*(int*)&pDst_ptr[4] = k;
|
275 |
-
pDst_ptr += 8;
|
276 |
-
}
|
277 |
-
return;
|
278 |
-
}
|
279 |
-
|
280 |
-
int temp[64];
|
281 |
-
|
282 |
-
const jpgd_block_t* pSrc = pSrc_ptr;
|
283 |
-
int* pTemp = temp;
|
284 |
-
|
285 |
-
const uint8* pRow_tab = &s_idct_row_table[(block_max_zag - 1) * 8];
|
286 |
-
int i;
|
287 |
-
for (i = 8; i > 0; i--, pRow_tab++)
|
288 |
-
{
|
289 |
-
switch (*pRow_tab)
|
290 |
-
{
|
291 |
-
case 0: Row<0>::idct(pTemp, pSrc); break;
|
292 |
-
case 1: Row<1>::idct(pTemp, pSrc); break;
|
293 |
-
case 2: Row<2>::idct(pTemp, pSrc); break;
|
294 |
-
case 3: Row<3>::idct(pTemp, pSrc); break;
|
295 |
-
case 4: Row<4>::idct(pTemp, pSrc); break;
|
296 |
-
case 5: Row<5>::idct(pTemp, pSrc); break;
|
297 |
-
case 6: Row<6>::idct(pTemp, pSrc); break;
|
298 |
-
case 7: Row<7>::idct(pTemp, pSrc); break;
|
299 |
-
case 8: Row<8>::idct(pTemp, pSrc); break;
|
300 |
-
}
|
301 |
-
|
302 |
-
pSrc += 8;
|
303 |
-
pTemp += 8;
|
304 |
-
}
|
305 |
-
|
306 |
-
pTemp = temp;
|
307 |
-
|
308 |
-
const int nonzero_rows = s_idct_col_table[block_max_zag - 1];
|
309 |
-
for (i = 8; i > 0; i--)
|
310 |
-
{
|
311 |
-
switch (nonzero_rows)
|
312 |
-
{
|
313 |
-
case 1: Col<1>::idct(pDst_ptr, pTemp); break;
|
314 |
-
case 2: Col<2>::idct(pDst_ptr, pTemp); break;
|
315 |
-
case 3: Col<3>::idct(pDst_ptr, pTemp); break;
|
316 |
-
case 4: Col<4>::idct(pDst_ptr, pTemp); break;
|
317 |
-
case 5: Col<5>::idct(pDst_ptr, pTemp); break;
|
318 |
-
case 6: Col<6>::idct(pDst_ptr, pTemp); break;
|
319 |
-
case 7: Col<7>::idct(pDst_ptr, pTemp); break;
|
320 |
-
case 8: Col<8>::idct(pDst_ptr, pTemp); break;
|
321 |
-
}
|
322 |
-
|
323 |
-
pTemp++;
|
324 |
-
pDst_ptr++;
|
325 |
-
}
|
326 |
-
}
|
327 |
-
|
328 |
-
void idct_4x4(const jpgd_block_t* pSrc_ptr, uint8* pDst_ptr)
|
329 |
-
{
|
330 |
-
int temp[64];
|
331 |
-
int* pTemp = temp;
|
332 |
-
const jpgd_block_t* pSrc = pSrc_ptr;
|
333 |
-
|
334 |
-
for (int i = 4; i > 0; i--)
|
335 |
-
{
|
336 |
-
Row<4>::idct(pTemp, pSrc);
|
337 |
-
pSrc += 8;
|
338 |
-
pTemp += 8;
|
339 |
-
}
|
340 |
-
|
341 |
-
pTemp = temp;
|
342 |
-
for (int i = 8; i > 0; i--)
|
343 |
-
{
|
344 |
-
Col<4>::idct(pDst_ptr, pTemp);
|
345 |
-
pTemp++;
|
346 |
-
pDst_ptr++;
|
347 |
-
}
|
348 |
-
}
|
349 |
-
|
350 |
-
// Retrieve one character from the input stream.
|
351 |
-
inline uint jpeg_decoder::get_char()
|
352 |
-
{
|
353 |
-
// Any bytes remaining in buffer?
|
354 |
-
if (!m_in_buf_left)
|
355 |
-
{
|
356 |
-
// Try to get more bytes.
|
357 |
-
prep_in_buffer();
|
358 |
-
// Still nothing to get?
|
359 |
-
if (!m_in_buf_left)
|
360 |
-
{
|
361 |
-
// Pad the end of the stream with 0xFF 0xD9 (EOI marker)
|
362 |
-
int t = m_tem_flag;
|
363 |
-
m_tem_flag ^= 1;
|
364 |
-
if (t)
|
365 |
-
return 0xD9;
|
366 |
-
else
|
367 |
-
return 0xFF;
|
368 |
-
}
|
369 |
-
}
|
370 |
-
|
371 |
-
uint c = *m_pIn_buf_ofs++;
|
372 |
-
m_in_buf_left--;
|
373 |
-
|
374 |
-
return c;
|
375 |
-
}
|
376 |
-
|
377 |
-
// Same as previous method, except can indicate if the character is a pad character or not.
|
378 |
-
inline uint jpeg_decoder::get_char(bool *pPadding_flag)
|
379 |
-
{
|
380 |
-
if (!m_in_buf_left)
|
381 |
-
{
|
382 |
-
prep_in_buffer();
|
383 |
-
if (!m_in_buf_left)
|
384 |
-
{
|
385 |
-
*pPadding_flag = true;
|
386 |
-
int t = m_tem_flag;
|
387 |
-
m_tem_flag ^= 1;
|
388 |
-
if (t)
|
389 |
-
return 0xD9;
|
390 |
-
else
|
391 |
-
return 0xFF;
|
392 |
-
}
|
393 |
-
}
|
394 |
-
|
395 |
-
*pPadding_flag = false;
|
396 |
-
|
397 |
-
uint c = *m_pIn_buf_ofs++;
|
398 |
-
m_in_buf_left--;
|
399 |
-
|
400 |
-
return c;
|
401 |
-
}
|
402 |
-
|
403 |
-
// Inserts a previously retrieved character back into the input buffer.
|
404 |
-
inline void jpeg_decoder::stuff_char(uint8 q)
|
405 |
-
{
|
406 |
-
*(--m_pIn_buf_ofs) = q;
|
407 |
-
m_in_buf_left++;
|
408 |
-
}
|
409 |
-
|
410 |
-
// Retrieves one character from the input stream, but does not read past markers. Will continue to return 0xFF when a marker is encountered.
|
411 |
-
inline uint8 jpeg_decoder::get_octet()
|
412 |
-
{
|
413 |
-
bool padding_flag;
|
414 |
-
int c = get_char(&padding_flag);
|
415 |
-
|
416 |
-
if (c == 0xFF)
|
417 |
-
{
|
418 |
-
if (padding_flag)
|
419 |
-
return 0xFF;
|
420 |
-
|
421 |
-
c = get_char(&padding_flag);
|
422 |
-
if (padding_flag)
|
423 |
-
{
|
424 |
-
stuff_char(0xFF);
|
425 |
-
return 0xFF;
|
426 |
-
}
|
427 |
-
|
428 |
-
if (c == 0x00)
|
429 |
-
return 0xFF;
|
430 |
-
else
|
431 |
-
{
|
432 |
-
stuff_char(static_cast<uint8>(c));
|
433 |
-
stuff_char(0xFF);
|
434 |
-
return 0xFF;
|
435 |
-
}
|
436 |
-
}
|
437 |
-
|
438 |
-
return static_cast<uint8>(c);
|
439 |
-
}
|
440 |
-
|
441 |
-
// Retrieves a variable number of bits from the input stream. Does not recognize markers.
|
442 |
-
inline uint jpeg_decoder::get_bits(int num_bits)
|
443 |
-
{
|
444 |
-
if (!num_bits)
|
445 |
-
return 0;
|
446 |
-
|
447 |
-
uint i = m_bit_buf >> (32 - num_bits);
|
448 |
-
|
449 |
-
if ((m_bits_left -= num_bits) <= 0)
|
450 |
-
{
|
451 |
-
m_bit_buf <<= (num_bits += m_bits_left);
|
452 |
-
|
453 |
-
uint c1 = get_char();
|
454 |
-
uint c2 = get_char();
|
455 |
-
m_bit_buf = (m_bit_buf & 0xFFFF0000) | (c1 << 8) | c2;
|
456 |
-
|
457 |
-
m_bit_buf <<= -m_bits_left;
|
458 |
-
|
459 |
-
m_bits_left += 16;
|
460 |
-
|
461 |
-
JPGD_ASSERT(m_bits_left >= 0);
|
462 |
-
}
|
463 |
-
else
|
464 |
-
m_bit_buf <<= num_bits;
|
465 |
-
|
466 |
-
return i;
|
467 |
-
}
|
468 |
-
|
469 |
-
// Retrieves a variable number of bits from the input stream. Markers will not be read into the input bit buffer. Instead, an infinite number of all 1's will be returned when a marker is encountered.
|
470 |
-
inline uint jpeg_decoder::get_bits_no_markers(int num_bits)
|
471 |
-
{
|
472 |
-
if (!num_bits)
|
473 |
-
return 0;
|
474 |
-
|
475 |
-
uint i = m_bit_buf >> (32 - num_bits);
|
476 |
-
|
477 |
-
if ((m_bits_left -= num_bits) <= 0)
|
478 |
-
{
|
479 |
-
m_bit_buf <<= (num_bits += m_bits_left);
|
480 |
-
|
481 |
-
if ((m_in_buf_left < 2) || (m_pIn_buf_ofs[0] == 0xFF) || (m_pIn_buf_ofs[1] == 0xFF))
|
482 |
-
{
|
483 |
-
uint c1 = get_octet();
|
484 |
-
uint c2 = get_octet();
|
485 |
-
m_bit_buf |= (c1 << 8) | c2;
|
486 |
-
}
|
487 |
-
else
|
488 |
-
{
|
489 |
-
m_bit_buf |= ((uint)m_pIn_buf_ofs[0] << 8) | m_pIn_buf_ofs[1];
|
490 |
-
m_in_buf_left -= 2;
|
491 |
-
m_pIn_buf_ofs += 2;
|
492 |
-
}
|
493 |
-
|
494 |
-
m_bit_buf <<= -m_bits_left;
|
495 |
-
|
496 |
-
m_bits_left += 16;
|
497 |
-
|
498 |
-
JPGD_ASSERT(m_bits_left >= 0);
|
499 |
-
}
|
500 |
-
else
|
501 |
-
m_bit_buf <<= num_bits;
|
502 |
-
|
503 |
-
return i;
|
504 |
-
}
|
505 |
-
|
506 |
-
// Decodes a Huffman encoded symbol.
|
507 |
-
inline int jpeg_decoder::huff_decode(huff_tables *pH)
|
508 |
-
{
|
509 |
-
int symbol;
|
510 |
-
|
511 |
-
// Check first 8-bits: do we have a complete symbol?
|
512 |
-
if ((symbol = pH->look_up[m_bit_buf >> 24]) < 0)
|
513 |
-
{
|
514 |
-
// Decode more bits, use a tree traversal to find symbol.
|
515 |
-
int ofs = 23;
|
516 |
-
do
|
517 |
-
{
|
518 |
-
symbol = pH->tree[-(int)(symbol + ((m_bit_buf >> ofs) & 1))];
|
519 |
-
ofs--;
|
520 |
-
} while (symbol < 0);
|
521 |
-
|
522 |
-
get_bits_no_markers(8 + (23 - ofs));
|
523 |
-
}
|
524 |
-
else
|
525 |
-
get_bits_no_markers(pH->code_size[symbol]);
|
526 |
-
|
527 |
-
return symbol;
|
528 |
-
}
|
529 |
-
|
530 |
-
// Decodes a Huffman encoded symbol.
|
531 |
-
inline int jpeg_decoder::huff_decode(huff_tables *pH, int& extra_bits)
|
532 |
-
{
|
533 |
-
int symbol;
|
534 |
-
|
535 |
-
// Check first 8-bits: do we have a complete symbol?
|
536 |
-
if ((symbol = pH->look_up2[m_bit_buf >> 24]) < 0)
|
537 |
-
{
|
538 |
-
// Use a tree traversal to find symbol.
|
539 |
-
int ofs = 23;
|
540 |
-
do
|
541 |
-
{
|
542 |
-
symbol = pH->tree[-(int)(symbol + ((m_bit_buf >> ofs) & 1))];
|
543 |
-
ofs--;
|
544 |
-
} while (symbol < 0);
|
545 |
-
|
546 |
-
get_bits_no_markers(8 + (23 - ofs));
|
547 |
-
|
548 |
-
extra_bits = get_bits_no_markers(symbol & 0xF);
|
549 |
-
}
|
550 |
-
else
|
551 |
-
{
|
552 |
-
JPGD_ASSERT(((symbol >> 8) & 31) == pH->code_size[symbol & 255] + ((symbol & 0x8000) ? (symbol & 15) : 0));
|
553 |
-
|
554 |
-
if (symbol & 0x8000)
|
555 |
-
{
|
556 |
-
get_bits_no_markers((symbol >> 8) & 31);
|
557 |
-
extra_bits = symbol >> 16;
|
558 |
-
}
|
559 |
-
else
|
560 |
-
{
|
561 |
-
int code_size = (symbol >> 8) & 31;
|
562 |
-
int num_extra_bits = symbol & 0xF;
|
563 |
-
int bits = code_size + num_extra_bits;
|
564 |
-
if (bits <= (m_bits_left + 16))
|
565 |
-
extra_bits = get_bits_no_markers(bits) & ((1 << num_extra_bits) - 1);
|
566 |
-
else
|
567 |
-
{
|
568 |
-
get_bits_no_markers(code_size);
|
569 |
-
extra_bits = get_bits_no_markers(num_extra_bits);
|
570 |
-
}
|
571 |
-
}
|
572 |
-
|
573 |
-
symbol &= 0xFF;
|
574 |
-
}
|
575 |
-
|
576 |
-
return symbol;
|
577 |
-
}
|
578 |
-
|
579 |
-
// Tables and macro used to fully decode the DPCM differences.
|
580 |
-
static const int s_extend_test[16] = { 0, 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080, 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000 };
|
581 |
-
static const int s_extend_offset[16] = { 0, -1, -3, -7, -15, -31, -63, -127, -255, -511, -1023, -2047, -4095, -8191, -16383, -32767 };
|
582 |
-
static const int s_extend_mask[] = { 0, (1<<0), (1<<1), (1<<2), (1<<3), (1<<4), (1<<5), (1<<6), (1<<7), (1<<8), (1<<9), (1<<10), (1<<11), (1<<12), (1<<13), (1<<14), (1<<15), (1<<16) };
|
583 |
-
#define HUFF_EXTEND(x,s) ((x) < s_extend_test[s] ? (x) + s_extend_offset[s] : (x))
|
584 |
-
|
585 |
-
// Clamps a value between 0-255.
|
586 |
-
inline uint8 jpeg_decoder::clamp(int i)
|
587 |
-
{
|
588 |
-
if (static_cast<uint>(i) > 255)
|
589 |
-
i = (((~i) >> 31) & 0xFF);
|
590 |
-
|
591 |
-
return static_cast<uint8>(i);
|
592 |
-
}
|
593 |
-
|
594 |
-
namespace DCT_Upsample
|
595 |
-
{
|
596 |
-
struct Matrix44
|
597 |
-
{
|
598 |
-
typedef int Element_Type;
|
599 |
-
enum { NUM_ROWS = 4, NUM_COLS = 4 };
|
600 |
-
|
601 |
-
Element_Type v[NUM_ROWS][NUM_COLS];
|
602 |
-
|
603 |
-
inline int rows() const { return NUM_ROWS; }
|
604 |
-
inline int cols() const { return NUM_COLS; }
|
605 |
-
|
606 |
-
inline const Element_Type & at(int r, int c) const { return v[r][c]; }
|
607 |
-
inline Element_Type & at(int r, int c) { return v[r][c]; }
|
608 |
-
|
609 |
-
inline Matrix44() { }
|
610 |
-
|
611 |
-
inline Matrix44& operator += (const Matrix44& a)
|
612 |
-
{
|
613 |
-
for (int r = 0; r < NUM_ROWS; r++)
|
614 |
-
{
|
615 |
-
at(r, 0) += a.at(r, 0);
|
616 |
-
at(r, 1) += a.at(r, 1);
|
617 |
-
at(r, 2) += a.at(r, 2);
|
618 |
-
at(r, 3) += a.at(r, 3);
|
619 |
-
}
|
620 |
-
return *this;
|
621 |
-
}
|
622 |
-
|
623 |
-
inline Matrix44& operator -= (const Matrix44& a)
|
624 |
-
{
|
625 |
-
for (int r = 0; r < NUM_ROWS; r++)
|
626 |
-
{
|
627 |
-
at(r, 0) -= a.at(r, 0);
|
628 |
-
at(r, 1) -= a.at(r, 1);
|
629 |
-
at(r, 2) -= a.at(r, 2);
|
630 |
-
at(r, 3) -= a.at(r, 3);
|
631 |
-
}
|
632 |
-
return *this;
|
633 |
-
}
|
634 |
-
|
635 |
-
friend inline Matrix44 operator + (const Matrix44& a, const Matrix44& b)
|
636 |
-
{
|
637 |
-
Matrix44 ret;
|
638 |
-
for (int r = 0; r < NUM_ROWS; r++)
|
639 |
-
{
|
640 |
-
ret.at(r, 0) = a.at(r, 0) + b.at(r, 0);
|
641 |
-
ret.at(r, 1) = a.at(r, 1) + b.at(r, 1);
|
642 |
-
ret.at(r, 2) = a.at(r, 2) + b.at(r, 2);
|
643 |
-
ret.at(r, 3) = a.at(r, 3) + b.at(r, 3);
|
644 |
-
}
|
645 |
-
return ret;
|
646 |
-
}
|
647 |
-
|
648 |
-
friend inline Matrix44 operator - (const Matrix44& a, const Matrix44& b)
|
649 |
-
{
|
650 |
-
Matrix44 ret;
|
651 |
-
for (int r = 0; r < NUM_ROWS; r++)
|
652 |
-
{
|
653 |
-
ret.at(r, 0) = a.at(r, 0) - b.at(r, 0);
|
654 |
-
ret.at(r, 1) = a.at(r, 1) - b.at(r, 1);
|
655 |
-
ret.at(r, 2) = a.at(r, 2) - b.at(r, 2);
|
656 |
-
ret.at(r, 3) = a.at(r, 3) - b.at(r, 3);
|
657 |
-
}
|
658 |
-
return ret;
|
659 |
-
}
|
660 |
-
|
661 |
-
static inline void add_and_store(jpgd_block_t* pDst, const Matrix44& a, const Matrix44& b)
|
662 |
-
{
|
663 |
-
for (int r = 0; r < 4; r++)
|
664 |
-
{
|
665 |
-
pDst[0*8 + r] = static_cast<jpgd_block_t>(a.at(r, 0) + b.at(r, 0));
|
666 |
-
pDst[1*8 + r] = static_cast<jpgd_block_t>(a.at(r, 1) + b.at(r, 1));
|
667 |
-
pDst[2*8 + r] = static_cast<jpgd_block_t>(a.at(r, 2) + b.at(r, 2));
|
668 |
-
pDst[3*8 + r] = static_cast<jpgd_block_t>(a.at(r, 3) + b.at(r, 3));
|
669 |
-
}
|
670 |
-
}
|
671 |
-
|
672 |
-
static inline void sub_and_store(jpgd_block_t* pDst, const Matrix44& a, const Matrix44& b)
|
673 |
-
{
|
674 |
-
for (int r = 0; r < 4; r++)
|
675 |
-
{
|
676 |
-
pDst[0*8 + r] = static_cast<jpgd_block_t>(a.at(r, 0) - b.at(r, 0));
|
677 |
-
pDst[1*8 + r] = static_cast<jpgd_block_t>(a.at(r, 1) - b.at(r, 1));
|
678 |
-
pDst[2*8 + r] = static_cast<jpgd_block_t>(a.at(r, 2) - b.at(r, 2));
|
679 |
-
pDst[3*8 + r] = static_cast<jpgd_block_t>(a.at(r, 3) - b.at(r, 3));
|
680 |
-
}
|
681 |
-
}
|
682 |
-
};
|
683 |
-
|
684 |
-
const int FRACT_BITS = 10;
|
685 |
-
const int SCALE = 1 << FRACT_BITS;
|
686 |
-
|
687 |
-
typedef int Temp_Type;
|
688 |
-
#define D(i) (((i) + (SCALE >> 1)) >> FRACT_BITS)
|
689 |
-
#define F(i) ((int)((i) * SCALE + .5f))
|
690 |
-
|
691 |
-
// Any decent C++ compiler will optimize this at compile time to a 0, or an array access.
|
692 |
-
#define AT(c, r) ((((c)>=NUM_COLS)||((r)>=NUM_ROWS)) ? 0 : pSrc[(c)+(r)*8])
|
693 |
-
|
694 |
-
// NUM_ROWS/NUM_COLS = # of non-zero rows/cols in input matrix
|
695 |
-
template<int NUM_ROWS, int NUM_COLS>
|
696 |
-
struct P_Q
|
697 |
-
{
|
698 |
-
static void calc(Matrix44& P, Matrix44& Q, const jpgd_block_t* pSrc)
|
699 |
-
{
|
700 |
-
// 4x8 = 4x8 times 8x8, matrix 0 is constant
|
701 |
-
const Temp_Type X000 = AT(0, 0);
|
702 |
-
const Temp_Type X001 = AT(0, 1);
|
703 |
-
const Temp_Type X002 = AT(0, 2);
|
704 |
-
const Temp_Type X003 = AT(0, 3);
|
705 |
-
const Temp_Type X004 = AT(0, 4);
|
706 |
-
const Temp_Type X005 = AT(0, 5);
|
707 |
-
const Temp_Type X006 = AT(0, 6);
|
708 |
-
const Temp_Type X007 = AT(0, 7);
|
709 |
-
const Temp_Type X010 = D(F(0.415735f) * AT(1, 0) + F(0.791065f) * AT(3, 0) + F(-0.352443f) * AT(5, 0) + F(0.277785f) * AT(7, 0));
|
710 |
-
const Temp_Type X011 = D(F(0.415735f) * AT(1, 1) + F(0.791065f) * AT(3, 1) + F(-0.352443f) * AT(5, 1) + F(0.277785f) * AT(7, 1));
|
711 |
-
const Temp_Type X012 = D(F(0.415735f) * AT(1, 2) + F(0.791065f) * AT(3, 2) + F(-0.352443f) * AT(5, 2) + F(0.277785f) * AT(7, 2));
|
712 |
-
const Temp_Type X013 = D(F(0.415735f) * AT(1, 3) + F(0.791065f) * AT(3, 3) + F(-0.352443f) * AT(5, 3) + F(0.277785f) * AT(7, 3));
|
713 |
-
const Temp_Type X014 = D(F(0.415735f) * AT(1, 4) + F(0.791065f) * AT(3, 4) + F(-0.352443f) * AT(5, 4) + F(0.277785f) * AT(7, 4));
|
714 |
-
const Temp_Type X015 = D(F(0.415735f) * AT(1, 5) + F(0.791065f) * AT(3, 5) + F(-0.352443f) * AT(5, 5) + F(0.277785f) * AT(7, 5));
|
715 |
-
const Temp_Type X016 = D(F(0.415735f) * AT(1, 6) + F(0.791065f) * AT(3, 6) + F(-0.352443f) * AT(5, 6) + F(0.277785f) * AT(7, 6));
|
716 |
-
const Temp_Type X017 = D(F(0.415735f) * AT(1, 7) + F(0.791065f) * AT(3, 7) + F(-0.352443f) * AT(5, 7) + F(0.277785f) * AT(7, 7));
|
717 |
-
const Temp_Type X020 = AT(4, 0);
|
718 |
-
const Temp_Type X021 = AT(4, 1);
|
719 |
-
const Temp_Type X022 = AT(4, 2);
|
720 |
-
const Temp_Type X023 = AT(4, 3);
|
721 |
-
const Temp_Type X024 = AT(4, 4);
|
722 |
-
const Temp_Type X025 = AT(4, 5);
|
723 |
-
const Temp_Type X026 = AT(4, 6);
|
724 |
-
const Temp_Type X027 = AT(4, 7);
|
725 |
-
const Temp_Type X030 = D(F(0.022887f) * AT(1, 0) + F(-0.097545f) * AT(3, 0) + F(0.490393f) * AT(5, 0) + F(0.865723f) * AT(7, 0));
|
726 |
-
const Temp_Type X031 = D(F(0.022887f) * AT(1, 1) + F(-0.097545f) * AT(3, 1) + F(0.490393f) * AT(5, 1) + F(0.865723f) * AT(7, 1));
|
727 |
-
const Temp_Type X032 = D(F(0.022887f) * AT(1, 2) + F(-0.097545f) * AT(3, 2) + F(0.490393f) * AT(5, 2) + F(0.865723f) * AT(7, 2));
|
728 |
-
const Temp_Type X033 = D(F(0.022887f) * AT(1, 3) + F(-0.097545f) * AT(3, 3) + F(0.490393f) * AT(5, 3) + F(0.865723f) * AT(7, 3));
|
729 |
-
const Temp_Type X034 = D(F(0.022887f) * AT(1, 4) + F(-0.097545f) * AT(3, 4) + F(0.490393f) * AT(5, 4) + F(0.865723f) * AT(7, 4));
|
730 |
-
const Temp_Type X035 = D(F(0.022887f) * AT(1, 5) + F(-0.097545f) * AT(3, 5) + F(0.490393f) * AT(5, 5) + F(0.865723f) * AT(7, 5));
|
731 |
-
const Temp_Type X036 = D(F(0.022887f) * AT(1, 6) + F(-0.097545f) * AT(3, 6) + F(0.490393f) * AT(5, 6) + F(0.865723f) * AT(7, 6));
|
732 |
-
const Temp_Type X037 = D(F(0.022887f) * AT(1, 7) + F(-0.097545f) * AT(3, 7) + F(0.490393f) * AT(5, 7) + F(0.865723f) * AT(7, 7));
|
733 |
-
|
734 |
-
// 4x4 = 4x8 times 8x4, matrix 1 is constant
|
735 |
-
P.at(0, 0) = X000;
|
736 |
-
P.at(0, 1) = D(X001 * F(0.415735f) + X003 * F(0.791065f) + X005 * F(-0.352443f) + X007 * F(0.277785f));
|
737 |
-
P.at(0, 2) = X004;
|
738 |
-
P.at(0, 3) = D(X001 * F(0.022887f) + X003 * F(-0.097545f) + X005 * F(0.490393f) + X007 * F(0.865723f));
|
739 |
-
P.at(1, 0) = X010;
|
740 |
-
P.at(1, 1) = D(X011 * F(0.415735f) + X013 * F(0.791065f) + X015 * F(-0.352443f) + X017 * F(0.277785f));
|
741 |
-
P.at(1, 2) = X014;
|
742 |
-
P.at(1, 3) = D(X011 * F(0.022887f) + X013 * F(-0.097545f) + X015 * F(0.490393f) + X017 * F(0.865723f));
|
743 |
-
P.at(2, 0) = X020;
|
744 |
-
P.at(2, 1) = D(X021 * F(0.415735f) + X023 * F(0.791065f) + X025 * F(-0.352443f) + X027 * F(0.277785f));
|
745 |
-
P.at(2, 2) = X024;
|
746 |
-
P.at(2, 3) = D(X021 * F(0.022887f) + X023 * F(-0.097545f) + X025 * F(0.490393f) + X027 * F(0.865723f));
|
747 |
-
P.at(3, 0) = X030;
|
748 |
-
P.at(3, 1) = D(X031 * F(0.415735f) + X033 * F(0.791065f) + X035 * F(-0.352443f) + X037 * F(0.277785f));
|
749 |
-
P.at(3, 2) = X034;
|
750 |
-
P.at(3, 3) = D(X031 * F(0.022887f) + X033 * F(-0.097545f) + X035 * F(0.490393f) + X037 * F(0.865723f));
|
751 |
-
// 40 muls 24 adds
|
752 |
-
|
753 |
-
// 4x4 = 4x8 times 8x4, matrix 1 is constant
|
754 |
-
Q.at(0, 0) = D(X001 * F(0.906127f) + X003 * F(-0.318190f) + X005 * F(0.212608f) + X007 * F(-0.180240f));
|
755 |
-
Q.at(0, 1) = X002;
|
756 |
-
Q.at(0, 2) = D(X001 * F(-0.074658f) + X003 * F(0.513280f) + X005 * F(0.768178f) + X007 * F(-0.375330f));
|
757 |
-
Q.at(0, 3) = X006;
|
758 |
-
Q.at(1, 0) = D(X011 * F(0.906127f) + X013 * F(-0.318190f) + X015 * F(0.212608f) + X017 * F(-0.180240f));
|
759 |
-
Q.at(1, 1) = X012;
|
760 |
-
Q.at(1, 2) = D(X011 * F(-0.074658f) + X013 * F(0.513280f) + X015 * F(0.768178f) + X017 * F(-0.375330f));
|
761 |
-
Q.at(1, 3) = X016;
|
762 |
-
Q.at(2, 0) = D(X021 * F(0.906127f) + X023 * F(-0.318190f) + X025 * F(0.212608f) + X027 * F(-0.180240f));
|
763 |
-
Q.at(2, 1) = X022;
|
764 |
-
Q.at(2, 2) = D(X021 * F(-0.074658f) + X023 * F(0.513280f) + X025 * F(0.768178f) + X027 * F(-0.375330f));
|
765 |
-
Q.at(2, 3) = X026;
|
766 |
-
Q.at(3, 0) = D(X031 * F(0.906127f) + X033 * F(-0.318190f) + X035 * F(0.212608f) + X037 * F(-0.180240f));
|
767 |
-
Q.at(3, 1) = X032;
|
768 |
-
Q.at(3, 2) = D(X031 * F(-0.074658f) + X033 * F(0.513280f) + X035 * F(0.768178f) + X037 * F(-0.375330f));
|
769 |
-
Q.at(3, 3) = X036;
|
770 |
-
// 40 muls 24 adds
|
771 |
-
}
|
772 |
-
};
|
773 |
-
|
774 |
-
template<int NUM_ROWS, int NUM_COLS>
|
775 |
-
struct R_S
|
776 |
-
{
|
777 |
-
static void calc(Matrix44& R, Matrix44& S, const jpgd_block_t* pSrc)
|
778 |
-
{
|
779 |
-
// 4x8 = 4x8 times 8x8, matrix 0 is constant
|
780 |
-
const Temp_Type X100 = D(F(0.906127f) * AT(1, 0) + F(-0.318190f) * AT(3, 0) + F(0.212608f) * AT(5, 0) + F(-0.180240f) * AT(7, 0));
|
781 |
-
const Temp_Type X101 = D(F(0.906127f) * AT(1, 1) + F(-0.318190f) * AT(3, 1) + F(0.212608f) * AT(5, 1) + F(-0.180240f) * AT(7, 1));
|
782 |
-
const Temp_Type X102 = D(F(0.906127f) * AT(1, 2) + F(-0.318190f) * AT(3, 2) + F(0.212608f) * AT(5, 2) + F(-0.180240f) * AT(7, 2));
|
783 |
-
const Temp_Type X103 = D(F(0.906127f) * AT(1, 3) + F(-0.318190f) * AT(3, 3) + F(0.212608f) * AT(5, 3) + F(-0.180240f) * AT(7, 3));
|
784 |
-
const Temp_Type X104 = D(F(0.906127f) * AT(1, 4) + F(-0.318190f) * AT(3, 4) + F(0.212608f) * AT(5, 4) + F(-0.180240f) * AT(7, 4));
|
785 |
-
const Temp_Type X105 = D(F(0.906127f) * AT(1, 5) + F(-0.318190f) * AT(3, 5) + F(0.212608f) * AT(5, 5) + F(-0.180240f) * AT(7, 5));
|
786 |
-
const Temp_Type X106 = D(F(0.906127f) * AT(1, 6) + F(-0.318190f) * AT(3, 6) + F(0.212608f) * AT(5, 6) + F(-0.180240f) * AT(7, 6));
|
787 |
-
const Temp_Type X107 = D(F(0.906127f) * AT(1, 7) + F(-0.318190f) * AT(3, 7) + F(0.212608f) * AT(5, 7) + F(-0.180240f) * AT(7, 7));
|
788 |
-
const Temp_Type X110 = AT(2, 0);
|
789 |
-
const Temp_Type X111 = AT(2, 1);
|
790 |
-
const Temp_Type X112 = AT(2, 2);
|
791 |
-
const Temp_Type X113 = AT(2, 3);
|
792 |
-
const Temp_Type X114 = AT(2, 4);
|
793 |
-
const Temp_Type X115 = AT(2, 5);
|
794 |
-
const Temp_Type X116 = AT(2, 6);
|
795 |
-
const Temp_Type X117 = AT(2, 7);
|
796 |
-
const Temp_Type X120 = D(F(-0.074658f) * AT(1, 0) + F(0.513280f) * AT(3, 0) + F(0.768178f) * AT(5, 0) + F(-0.375330f) * AT(7, 0));
|
797 |
-
const Temp_Type X121 = D(F(-0.074658f) * AT(1, 1) + F(0.513280f) * AT(3, 1) + F(0.768178f) * AT(5, 1) + F(-0.375330f) * AT(7, 1));
|
798 |
-
const Temp_Type X122 = D(F(-0.074658f) * AT(1, 2) + F(0.513280f) * AT(3, 2) + F(0.768178f) * AT(5, 2) + F(-0.375330f) * AT(7, 2));
|
799 |
-
const Temp_Type X123 = D(F(-0.074658f) * AT(1, 3) + F(0.513280f) * AT(3, 3) + F(0.768178f) * AT(5, 3) + F(-0.375330f) * AT(7, 3));
|
800 |
-
const Temp_Type X124 = D(F(-0.074658f) * AT(1, 4) + F(0.513280f) * AT(3, 4) + F(0.768178f) * AT(5, 4) + F(-0.375330f) * AT(7, 4));
|
801 |
-
const Temp_Type X125 = D(F(-0.074658f) * AT(1, 5) + F(0.513280f) * AT(3, 5) + F(0.768178f) * AT(5, 5) + F(-0.375330f) * AT(7, 5));
|
802 |
-
const Temp_Type X126 = D(F(-0.074658f) * AT(1, 6) + F(0.513280f) * AT(3, 6) + F(0.768178f) * AT(5, 6) + F(-0.375330f) * AT(7, 6));
|
803 |
-
const Temp_Type X127 = D(F(-0.074658f) * AT(1, 7) + F(0.513280f) * AT(3, 7) + F(0.768178f) * AT(5, 7) + F(-0.375330f) * AT(7, 7));
|
804 |
-
const Temp_Type X130 = AT(6, 0);
|
805 |
-
const Temp_Type X131 = AT(6, 1);
|
806 |
-
const Temp_Type X132 = AT(6, 2);
|
807 |
-
const Temp_Type X133 = AT(6, 3);
|
808 |
-
const Temp_Type X134 = AT(6, 4);
|
809 |
-
const Temp_Type X135 = AT(6, 5);
|
810 |
-
const Temp_Type X136 = AT(6, 6);
|
811 |
-
const Temp_Type X137 = AT(6, 7);
|
812 |
-
// 80 muls 48 adds
|
813 |
-
|
814 |
-
// 4x4 = 4x8 times 8x4, matrix 1 is constant
|
815 |
-
R.at(0, 0) = X100;
|
816 |
-
R.at(0, 1) = D(X101 * F(0.415735f) + X103 * F(0.791065f) + X105 * F(-0.352443f) + X107 * F(0.277785f));
|
817 |
-
R.at(0, 2) = X104;
|
818 |
-
R.at(0, 3) = D(X101 * F(0.022887f) + X103 * F(-0.097545f) + X105 * F(0.490393f) + X107 * F(0.865723f));
|
819 |
-
R.at(1, 0) = X110;
|
820 |
-
R.at(1, 1) = D(X111 * F(0.415735f) + X113 * F(0.791065f) + X115 * F(-0.352443f) + X117 * F(0.277785f));
|
821 |
-
R.at(1, 2) = X114;
|
822 |
-
R.at(1, 3) = D(X111 * F(0.022887f) + X113 * F(-0.097545f) + X115 * F(0.490393f) + X117 * F(0.865723f));
|
823 |
-
R.at(2, 0) = X120;
|
824 |
-
R.at(2, 1) = D(X121 * F(0.415735f) + X123 * F(0.791065f) + X125 * F(-0.352443f) + X127 * F(0.277785f));
|
825 |
-
R.at(2, 2) = X124;
|
826 |
-
R.at(2, 3) = D(X121 * F(0.022887f) + X123 * F(-0.097545f) + X125 * F(0.490393f) + X127 * F(0.865723f));
|
827 |
-
R.at(3, 0) = X130;
|
828 |
-
R.at(3, 1) = D(X131 * F(0.415735f) + X133 * F(0.791065f) + X135 * F(-0.352443f) + X137 * F(0.277785f));
|
829 |
-
R.at(3, 2) = X134;
|
830 |
-
R.at(3, 3) = D(X131 * F(0.022887f) + X133 * F(-0.097545f) + X135 * F(0.490393f) + X137 * F(0.865723f));
|
831 |
-
// 40 muls 24 adds
|
832 |
-
// 4x4 = 4x8 times 8x4, matrix 1 is constant
|
833 |
-
S.at(0, 0) = D(X101 * F(0.906127f) + X103 * F(-0.318190f) + X105 * F(0.212608f) + X107 * F(-0.180240f));
|
834 |
-
S.at(0, 1) = X102;
|
835 |
-
S.at(0, 2) = D(X101 * F(-0.074658f) + X103 * F(0.513280f) + X105 * F(0.768178f) + X107 * F(-0.375330f));
|
836 |
-
S.at(0, 3) = X106;
|
837 |
-
S.at(1, 0) = D(X111 * F(0.906127f) + X113 * F(-0.318190f) + X115 * F(0.212608f) + X117 * F(-0.180240f));
|
838 |
-
S.at(1, 1) = X112;
|
839 |
-
S.at(1, 2) = D(X111 * F(-0.074658f) + X113 * F(0.513280f) + X115 * F(0.768178f) + X117 * F(-0.375330f));
|
840 |
-
S.at(1, 3) = X116;
|
841 |
-
S.at(2, 0) = D(X121 * F(0.906127f) + X123 * F(-0.318190f) + X125 * F(0.212608f) + X127 * F(-0.180240f));
|
842 |
-
S.at(2, 1) = X122;
|
843 |
-
S.at(2, 2) = D(X121 * F(-0.074658f) + X123 * F(0.513280f) + X125 * F(0.768178f) + X127 * F(-0.375330f));
|
844 |
-
S.at(2, 3) = X126;
|
845 |
-
S.at(3, 0) = D(X131 * F(0.906127f) + X133 * F(-0.318190f) + X135 * F(0.212608f) + X137 * F(-0.180240f));
|
846 |
-
S.at(3, 1) = X132;
|
847 |
-
S.at(3, 2) = D(X131 * F(-0.074658f) + X133 * F(0.513280f) + X135 * F(0.768178f) + X137 * F(-0.375330f));
|
848 |
-
S.at(3, 3) = X136;
|
849 |
-
// 40 muls 24 adds
|
850 |
-
}
|
851 |
-
};
|
852 |
-
} // end namespace DCT_Upsample
|
853 |
-
|
854 |
-
// Unconditionally frees all allocated m_blocks.
|
855 |
-
void jpeg_decoder::free_all_blocks()
|
856 |
-
{
|
857 |
-
m_pStream = NULL;
|
858 |
-
for (mem_block *b = m_pMem_blocks; b; )
|
859 |
-
{
|
860 |
-
mem_block *n = b->m_pNext;
|
861 |
-
jpgd_free(b);
|
862 |
-
b = n;
|
863 |
-
}
|
864 |
-
m_pMem_blocks = NULL;
|
865 |
-
}
|
866 |
-
|
867 |
-
// This method handles all errors.
|
868 |
-
// It could easily be changed to use C++ exceptions.
|
869 |
-
void jpeg_decoder::stop_decoding(jpgd_status status)
|
870 |
-
{
|
871 |
-
m_error_code = status;
|
872 |
-
free_all_blocks();
|
873 |
-
longjmp(m_jmp_state, status);
|
874 |
-
|
875 |
-
// we shouldn't get here as longjmp shouldn't return, but we put it here to make it explicit
|
876 |
-
// that this function doesn't return, otherwise we get this error:
|
877 |
-
//
|
878 |
-
// error : function declared 'noreturn' should not return
|
879 |
-
exit(1);
|
880 |
-
}
|
881 |
-
|
882 |
-
void *jpeg_decoder::alloc(size_t nSize, bool zero)
|
883 |
-
{
|
884 |
-
nSize = (JPGD_MAX(nSize, 1) + 3) & ~3;
|
885 |
-
char *rv = NULL;
|
886 |
-
for (mem_block *b = m_pMem_blocks; b; b = b->m_pNext)
|
887 |
-
{
|
888 |
-
if ((b->m_used_count + nSize) <= b->m_size)
|
889 |
-
{
|
890 |
-
rv = b->m_data + b->m_used_count;
|
891 |
-
b->m_used_count += nSize;
|
892 |
-
break;
|
893 |
-
}
|
894 |
-
}
|
895 |
-
if (!rv)
|
896 |
-
{
|
897 |
-
int capacity = JPGD_MAX(32768 - 256, (nSize + 2047) & ~2047);
|
898 |
-
mem_block *b = (mem_block*)jpgd_malloc(sizeof(mem_block) + capacity);
|
899 |
-
if (!b) stop_decoding(JPGD_NOTENOUGHMEM);
|
900 |
-
b->m_pNext = m_pMem_blocks; m_pMem_blocks = b;
|
901 |
-
b->m_used_count = nSize;
|
902 |
-
b->m_size = capacity;
|
903 |
-
rv = b->m_data;
|
904 |
-
}
|
905 |
-
if (zero) memset(rv, 0, nSize);
|
906 |
-
return rv;
|
907 |
-
}
|
908 |
-
|
909 |
-
void jpeg_decoder::word_clear(void *p, uint16 c, uint n)
|
910 |
-
{
|
911 |
-
uint8 *pD = (uint8*)p;
|
912 |
-
const uint8 l = c & 0xFF, h = (c >> 8) & 0xFF;
|
913 |
-
while (n)
|
914 |
-
{
|
915 |
-
pD[0] = l; pD[1] = h; pD += 2;
|
916 |
-
n--;
|
917 |
-
}
|
918 |
-
}
|
919 |
-
|
920 |
-
// Refill the input buffer.
|
921 |
-
// This method will sit in a loop until (A) the buffer is full or (B)
|
922 |
-
// the stream's read() method reports and end of file condition.
|
923 |
-
void jpeg_decoder::prep_in_buffer()
|
924 |
-
{
|
925 |
-
m_in_buf_left = 0;
|
926 |
-
m_pIn_buf_ofs = m_in_buf;
|
927 |
-
|
928 |
-
if (m_eof_flag)
|
929 |
-
return;
|
930 |
-
|
931 |
-
do
|
932 |
-
{
|
933 |
-
int bytes_read = m_pStream->read(m_in_buf + m_in_buf_left, JPGD_IN_BUF_SIZE - m_in_buf_left, &m_eof_flag);
|
934 |
-
if (bytes_read == -1)
|
935 |
-
stop_decoding(JPGD_STREAM_READ);
|
936 |
-
|
937 |
-
m_in_buf_left += bytes_read;
|
938 |
-
} while ((m_in_buf_left < JPGD_IN_BUF_SIZE) && (!m_eof_flag));
|
939 |
-
|
940 |
-
m_total_bytes_read += m_in_buf_left;
|
941 |
-
|
942 |
-
// Pad the end of the block with M_EOI (prevents the decompressor from going off the rails if the stream is invalid).
|
943 |
-
// (This dates way back to when this decompressor was written in C/asm, and the all-asm Huffman decoder did some fancy things to increase perf.)
|
944 |
-
word_clear(m_pIn_buf_ofs + m_in_buf_left, 0xD9FF, 64);
|
945 |
-
}
|
946 |
-
|
947 |
-
// Read a Huffman code table.
|
948 |
-
void jpeg_decoder::read_dht_marker()
|
949 |
-
{
|
950 |
-
int i, index, count;
|
951 |
-
uint8 huff_num[17];
|
952 |
-
uint8 huff_val[256];
|
953 |
-
|
954 |
-
uint num_left = get_bits(16);
|
955 |
-
|
956 |
-
if (num_left < 2)
|
957 |
-
stop_decoding(JPGD_BAD_DHT_MARKER);
|
958 |
-
|
959 |
-
num_left -= 2;
|
960 |
-
|
961 |
-
while (num_left)
|
962 |
-
{
|
963 |
-
index = get_bits(8);
|
964 |
-
|
965 |
-
huff_num[0] = 0;
|
966 |
-
|
967 |
-
count = 0;
|
968 |
-
|
969 |
-
for (i = 1; i <= 16; i++)
|
970 |
-
{
|
971 |
-
huff_num[i] = static_cast<uint8>(get_bits(8));
|
972 |
-
count += huff_num[i];
|
973 |
-
}
|
974 |
-
|
975 |
-
if (count > 255)
|
976 |
-
stop_decoding(JPGD_BAD_DHT_COUNTS);
|
977 |
-
|
978 |
-
for (i = 0; i < count; i++)
|
979 |
-
huff_val[i] = static_cast<uint8>(get_bits(8));
|
980 |
-
|
981 |
-
i = 1 + 16 + count;
|
982 |
-
|
983 |
-
if (num_left < (uint)i)
|
984 |
-
stop_decoding(JPGD_BAD_DHT_MARKER);
|
985 |
-
|
986 |
-
num_left -= i;
|
987 |
-
|
988 |
-
if ((index & 0x10) > 0x10)
|
989 |
-
stop_decoding(JPGD_BAD_DHT_INDEX);
|
990 |
-
|
991 |
-
index = (index & 0x0F) + ((index & 0x10) >> 4) * (JPGD_MAX_HUFF_TABLES >> 1);
|
992 |
-
|
993 |
-
if (index >= JPGD_MAX_HUFF_TABLES)
|
994 |
-
stop_decoding(JPGD_BAD_DHT_INDEX);
|
995 |
-
|
996 |
-
if (!m_huff_num[index])
|
997 |
-
m_huff_num[index] = (uint8 *)alloc(17);
|
998 |
-
|
999 |
-
if (!m_huff_val[index])
|
1000 |
-
m_huff_val[index] = (uint8 *)alloc(256);
|
1001 |
-
|
1002 |
-
m_huff_ac[index] = (index & 0x10) != 0;
|
1003 |
-
memcpy(m_huff_num[index], huff_num, 17);
|
1004 |
-
memcpy(m_huff_val[index], huff_val, 256);
|
1005 |
-
}
|
1006 |
-
}
|
1007 |
-
|
1008 |
-
// Read a quantization table.
|
1009 |
-
void jpeg_decoder::read_dqt_marker()
|
1010 |
-
{
|
1011 |
-
int n, i, prec;
|
1012 |
-
uint num_left;
|
1013 |
-
uint temp;
|
1014 |
-
|
1015 |
-
num_left = get_bits(16);
|
1016 |
-
|
1017 |
-
if (num_left < 2)
|
1018 |
-
stop_decoding(JPGD_BAD_DQT_MARKER);
|
1019 |
-
|
1020 |
-
num_left -= 2;
|
1021 |
-
|
1022 |
-
while (num_left)
|
1023 |
-
{
|
1024 |
-
n = get_bits(8);
|
1025 |
-
prec = n >> 4;
|
1026 |
-
n &= 0x0F;
|
1027 |
-
|
1028 |
-
if (n >= JPGD_MAX_QUANT_TABLES)
|
1029 |
-
stop_decoding(JPGD_BAD_DQT_TABLE);
|
1030 |
-
|
1031 |
-
if (!m_quant[n])
|
1032 |
-
m_quant[n] = (jpgd_quant_t *)alloc(64 * sizeof(jpgd_quant_t));
|
1033 |
-
|
1034 |
-
// read quantization entries, in zag order
|
1035 |
-
for (i = 0; i < 64; i++)
|
1036 |
-
{
|
1037 |
-
temp = get_bits(8);
|
1038 |
-
|
1039 |
-
if (prec)
|
1040 |
-
temp = (temp << 8) + get_bits(8);
|
1041 |
-
|
1042 |
-
m_quant[n][i] = static_cast<jpgd_quant_t>(temp);
|
1043 |
-
}
|
1044 |
-
|
1045 |
-
i = 64 + 1;
|
1046 |
-
|
1047 |
-
if (prec)
|
1048 |
-
i += 64;
|
1049 |
-
|
1050 |
-
if (num_left < (uint)i)
|
1051 |
-
stop_decoding(JPGD_BAD_DQT_LENGTH);
|
1052 |
-
|
1053 |
-
num_left -= i;
|
1054 |
-
}
|
1055 |
-
}
|
1056 |
-
|
1057 |
-
// Read the start of frame (SOF) marker.
|
1058 |
-
void jpeg_decoder::read_sof_marker()
|
1059 |
-
{
|
1060 |
-
int i;
|
1061 |
-
uint num_left;
|
1062 |
-
|
1063 |
-
num_left = get_bits(16);
|
1064 |
-
|
1065 |
-
if (get_bits(8) != 8) /* precision: sorry, only 8-bit precision is supported right now */
|
1066 |
-
stop_decoding(JPGD_BAD_PRECISION);
|
1067 |
-
|
1068 |
-
m_image_y_size = get_bits(16);
|
1069 |
-
|
1070 |
-
if ((m_image_y_size < 1) || (m_image_y_size > JPGD_MAX_HEIGHT))
|
1071 |
-
stop_decoding(JPGD_BAD_HEIGHT);
|
1072 |
-
|
1073 |
-
m_image_x_size = get_bits(16);
|
1074 |
-
|
1075 |
-
if ((m_image_x_size < 1) || (m_image_x_size > JPGD_MAX_WIDTH))
|
1076 |
-
stop_decoding(JPGD_BAD_WIDTH);
|
1077 |
-
|
1078 |
-
m_comps_in_frame = get_bits(8);
|
1079 |
-
|
1080 |
-
if (m_comps_in_frame > JPGD_MAX_COMPONENTS)
|
1081 |
-
stop_decoding(JPGD_TOO_MANY_COMPONENTS);
|
1082 |
-
|
1083 |
-
if (num_left != (uint)(m_comps_in_frame * 3 + 8))
|
1084 |
-
stop_decoding(JPGD_BAD_SOF_LENGTH);
|
1085 |
-
|
1086 |
-
for (i = 0; i < m_comps_in_frame; i++)
|
1087 |
-
{
|
1088 |
-
m_comp_ident[i] = get_bits(8);
|
1089 |
-
m_comp_h_samp[i] = get_bits(4);
|
1090 |
-
m_comp_v_samp[i] = get_bits(4);
|
1091 |
-
m_comp_quant[i] = get_bits(8);
|
1092 |
-
}
|
1093 |
-
}
|
1094 |
-
|
1095 |
-
// Used to skip unrecognized markers.
|
1096 |
-
void jpeg_decoder::skip_variable_marker()
|
1097 |
-
{
|
1098 |
-
uint num_left;
|
1099 |
-
|
1100 |
-
num_left = get_bits(16);
|
1101 |
-
|
1102 |
-
if (num_left < 2)
|
1103 |
-
stop_decoding(JPGD_BAD_VARIABLE_MARKER);
|
1104 |
-
|
1105 |
-
num_left -= 2;
|
1106 |
-
|
1107 |
-
while (num_left)
|
1108 |
-
{
|
1109 |
-
get_bits(8);
|
1110 |
-
num_left--;
|
1111 |
-
}
|
1112 |
-
}
|
1113 |
-
|
1114 |
-
// Read a define restart interval (DRI) marker.
|
1115 |
-
void jpeg_decoder::read_dri_marker()
|
1116 |
-
{
|
1117 |
-
if (get_bits(16) != 4)
|
1118 |
-
stop_decoding(JPGD_BAD_DRI_LENGTH);
|
1119 |
-
|
1120 |
-
m_restart_interval = get_bits(16);
|
1121 |
-
}
|
1122 |
-
|
1123 |
-
// Read a start of scan (SOS) marker.
|
1124 |
-
void jpeg_decoder::read_sos_marker()
|
1125 |
-
{
|
1126 |
-
uint num_left;
|
1127 |
-
int i, ci, n, c, cc;
|
1128 |
-
|
1129 |
-
num_left = get_bits(16);
|
1130 |
-
|
1131 |
-
n = get_bits(8);
|
1132 |
-
|
1133 |
-
m_comps_in_scan = n;
|
1134 |
-
|
1135 |
-
num_left -= 3;
|
1136 |
-
|
1137 |
-
if ( (num_left != (uint)(n * 2 + 3)) || (n < 1) || (n > JPGD_MAX_COMPS_IN_SCAN) )
|
1138 |
-
stop_decoding(JPGD_BAD_SOS_LENGTH);
|
1139 |
-
|
1140 |
-
for (i = 0; i < n; i++)
|
1141 |
-
{
|
1142 |
-
cc = get_bits(8);
|
1143 |
-
c = get_bits(8);
|
1144 |
-
num_left -= 2;
|
1145 |
-
|
1146 |
-
for (ci = 0; ci < m_comps_in_frame; ci++)
|
1147 |
-
if (cc == m_comp_ident[ci])
|
1148 |
-
break;
|
1149 |
-
|
1150 |
-
if (ci >= m_comps_in_frame)
|
1151 |
-
stop_decoding(JPGD_BAD_SOS_COMP_ID);
|
1152 |
-
|
1153 |
-
m_comp_list[i] = ci;
|
1154 |
-
m_comp_dc_tab[ci] = (c >> 4) & 15;
|
1155 |
-
m_comp_ac_tab[ci] = (c & 15) + (JPGD_MAX_HUFF_TABLES >> 1);
|
1156 |
-
}
|
1157 |
-
|
1158 |
-
m_spectral_start = get_bits(8);
|
1159 |
-
m_spectral_end = get_bits(8);
|
1160 |
-
m_successive_high = get_bits(4);
|
1161 |
-
m_successive_low = get_bits(4);
|
1162 |
-
|
1163 |
-
if (!m_progressive_flag)
|
1164 |
-
{
|
1165 |
-
m_spectral_start = 0;
|
1166 |
-
m_spectral_end = 63;
|
1167 |
-
}
|
1168 |
-
|
1169 |
-
num_left -= 3;
|
1170 |
-
|
1171 |
-
while (num_left) /* read past whatever is num_left */
|
1172 |
-
{
|
1173 |
-
get_bits(8);
|
1174 |
-
num_left--;
|
1175 |
-
}
|
1176 |
-
}
|
1177 |
-
|
1178 |
-
// Finds the next marker.
|
1179 |
-
int jpeg_decoder::next_marker()
|
1180 |
-
{
|
1181 |
-
uint c, bytes;
|
1182 |
-
|
1183 |
-
bytes = 0;
|
1184 |
-
|
1185 |
-
do
|
1186 |
-
{
|
1187 |
-
do
|
1188 |
-
{
|
1189 |
-
bytes++;
|
1190 |
-
c = get_bits(8);
|
1191 |
-
} while (c != 0xFF);
|
1192 |
-
|
1193 |
-
do
|
1194 |
-
{
|
1195 |
-
c = get_bits(8);
|
1196 |
-
} while (c == 0xFF);
|
1197 |
-
|
1198 |
-
} while (c == 0);
|
1199 |
-
|
1200 |
-
// If bytes > 0 here, there where extra bytes before the marker (not good).
|
1201 |
-
|
1202 |
-
return c;
|
1203 |
-
}
|
1204 |
-
|
1205 |
-
// Process markers. Returns when an SOFx, SOI, EOI, or SOS marker is
|
1206 |
-
// encountered.
|
1207 |
-
int jpeg_decoder::process_markers()
|
1208 |
-
{
|
1209 |
-
int c;
|
1210 |
-
|
1211 |
-
for ( ; ; )
|
1212 |
-
{
|
1213 |
-
c = next_marker();
|
1214 |
-
|
1215 |
-
switch (c)
|
1216 |
-
{
|
1217 |
-
case M_SOF0:
|
1218 |
-
case M_SOF1:
|
1219 |
-
case M_SOF2:
|
1220 |
-
case M_SOF3:
|
1221 |
-
case M_SOF5:
|
1222 |
-
case M_SOF6:
|
1223 |
-
case M_SOF7:
|
1224 |
-
// case M_JPG:
|
1225 |
-
case M_SOF9:
|
1226 |
-
case M_SOF10:
|
1227 |
-
case M_SOF11:
|
1228 |
-
case M_SOF13:
|
1229 |
-
case M_SOF14:
|
1230 |
-
case M_SOF15:
|
1231 |
-
case M_SOI:
|
1232 |
-
case M_EOI:
|
1233 |
-
case M_SOS:
|
1234 |
-
{
|
1235 |
-
return c;
|
1236 |
-
}
|
1237 |
-
case M_DHT:
|
1238 |
-
{
|
1239 |
-
read_dht_marker();
|
1240 |
-
break;
|
1241 |
-
}
|
1242 |
-
// No arithmitic support - dumb patents!
|
1243 |
-
case M_DAC:
|
1244 |
-
{
|
1245 |
-
stop_decoding(JPGD_NO_ARITHMITIC_SUPPORT);
|
1246 |
-
break;
|
1247 |
-
}
|
1248 |
-
case M_DQT:
|
1249 |
-
{
|
1250 |
-
read_dqt_marker();
|
1251 |
-
break;
|
1252 |
-
}
|
1253 |
-
case M_DRI:
|
1254 |
-
{
|
1255 |
-
read_dri_marker();
|
1256 |
-
break;
|
1257 |
-
}
|
1258 |
-
//case M_APP0: /* no need to read the JFIF marker */
|
1259 |
-
|
1260 |
-
case M_JPG:
|
1261 |
-
case M_RST0: /* no parameters */
|
1262 |
-
case M_RST1:
|
1263 |
-
case M_RST2:
|
1264 |
-
case M_RST3:
|
1265 |
-
case M_RST4:
|
1266 |
-
case M_RST5:
|
1267 |
-
case M_RST6:
|
1268 |
-
case M_RST7:
|
1269 |
-
case M_TEM:
|
1270 |
-
{
|
1271 |
-
stop_decoding(JPGD_UNEXPECTED_MARKER);
|
1272 |
-
break;
|
1273 |
-
}
|
1274 |
-
default: /* must be DNL, DHP, EXP, APPn, JPGn, COM, or RESn or APP0 */
|
1275 |
-
{
|
1276 |
-
skip_variable_marker();
|
1277 |
-
break;
|
1278 |
-
}
|
1279 |
-
}
|
1280 |
-
}
|
1281 |
-
}
|
1282 |
-
|
1283 |
-
// Finds the start of image (SOI) marker.
|
1284 |
-
// This code is rather defensive: it only checks the first 512 bytes to avoid
|
1285 |
-
// false positives.
|
1286 |
-
void jpeg_decoder::locate_soi_marker()
|
1287 |
-
{
|
1288 |
-
uint lastchar, thischar;
|
1289 |
-
uint bytesleft;
|
1290 |
-
|
1291 |
-
lastchar = get_bits(8);
|
1292 |
-
|
1293 |
-
thischar = get_bits(8);
|
1294 |
-
|
1295 |
-
/* ok if it's a normal JPEG file without a special header */
|
1296 |
-
|
1297 |
-
if ((lastchar == 0xFF) && (thischar == M_SOI))
|
1298 |
-
return;
|
1299 |
-
|
1300 |
-
bytesleft = 4096; //512;
|
1301 |
-
|
1302 |
-
for ( ; ; )
|
1303 |
-
{
|
1304 |
-
if (--bytesleft == 0)
|
1305 |
-
stop_decoding(JPGD_NOT_JPEG);
|
1306 |
-
|
1307 |
-
lastchar = thischar;
|
1308 |
-
|
1309 |
-
thischar = get_bits(8);
|
1310 |
-
|
1311 |
-
if (lastchar == 0xFF)
|
1312 |
-
{
|
1313 |
-
if (thischar == M_SOI)
|
1314 |
-
break;
|
1315 |
-
else if (thischar == M_EOI) // get_bits will keep returning M_EOI if we read past the end
|
1316 |
-
stop_decoding(JPGD_NOT_JPEG);
|
1317 |
-
}
|
1318 |
-
}
|
1319 |
-
|
1320 |
-
// Check the next character after marker: if it's not 0xFF, it can't be the start of the next marker, so the file is bad.
|
1321 |
-
thischar = (m_bit_buf >> 24) & 0xFF;
|
1322 |
-
|
1323 |
-
if (thischar != 0xFF)
|
1324 |
-
stop_decoding(JPGD_NOT_JPEG);
|
1325 |
-
}
|
1326 |
-
|
1327 |
-
// Find a start of frame (SOF) marker.
|
1328 |
-
void jpeg_decoder::locate_sof_marker()
|
1329 |
-
{
|
1330 |
-
locate_soi_marker();
|
1331 |
-
|
1332 |
-
int c = process_markers();
|
1333 |
-
|
1334 |
-
switch (c)
|
1335 |
-
{
|
1336 |
-
case M_SOF2:
|
1337 |
-
m_progressive_flag = JPGD_TRUE;
|
1338 |
-
case M_SOF0: /* baseline DCT */
|
1339 |
-
case M_SOF1: /* extended sequential DCT */
|
1340 |
-
{
|
1341 |
-
read_sof_marker();
|
1342 |
-
break;
|
1343 |
-
}
|
1344 |
-
case M_SOF9: /* Arithmitic coding */
|
1345 |
-
{
|
1346 |
-
stop_decoding(JPGD_NO_ARITHMITIC_SUPPORT);
|
1347 |
-
break;
|
1348 |
-
}
|
1349 |
-
default:
|
1350 |
-
{
|
1351 |
-
stop_decoding(JPGD_UNSUPPORTED_MARKER);
|
1352 |
-
break;
|
1353 |
-
}
|
1354 |
-
}
|
1355 |
-
}
|
1356 |
-
|
1357 |
-
// Find a start of scan (SOS) marker.
|
1358 |
-
int jpeg_decoder::locate_sos_marker()
|
1359 |
-
{
|
1360 |
-
int c;
|
1361 |
-
|
1362 |
-
c = process_markers();
|
1363 |
-
|
1364 |
-
if (c == M_EOI)
|
1365 |
-
return JPGD_FALSE;
|
1366 |
-
else if (c != M_SOS)
|
1367 |
-
stop_decoding(JPGD_UNEXPECTED_MARKER);
|
1368 |
-
|
1369 |
-
read_sos_marker();
|
1370 |
-
|
1371 |
-
return JPGD_TRUE;
|
1372 |
-
}
|
1373 |
-
|
1374 |
-
// Reset everything to default/uninitialized state.
|
1375 |
-
void jpeg_decoder::init(jpeg_decoder_stream *pStream)
|
1376 |
-
{
|
1377 |
-
m_pMem_blocks = NULL;
|
1378 |
-
m_error_code = JPGD_SUCCESS;
|
1379 |
-
m_ready_flag = false;
|
1380 |
-
m_image_x_size = m_image_y_size = 0;
|
1381 |
-
m_pStream = pStream;
|
1382 |
-
m_progressive_flag = JPGD_FALSE;
|
1383 |
-
|
1384 |
-
memset(m_huff_ac, 0, sizeof(m_huff_ac));
|
1385 |
-
memset(m_huff_num, 0, sizeof(m_huff_num));
|
1386 |
-
memset(m_huff_val, 0, sizeof(m_huff_val));
|
1387 |
-
memset(m_quant, 0, sizeof(m_quant));
|
1388 |
-
|
1389 |
-
m_scan_type = 0;
|
1390 |
-
m_comps_in_frame = 0;
|
1391 |
-
|
1392 |
-
memset(m_comp_h_samp, 0, sizeof(m_comp_h_samp));
|
1393 |
-
memset(m_comp_v_samp, 0, sizeof(m_comp_v_samp));
|
1394 |
-
memset(m_comp_quant, 0, sizeof(m_comp_quant));
|
1395 |
-
memset(m_comp_ident, 0, sizeof(m_comp_ident));
|
1396 |
-
memset(m_comp_h_blocks, 0, sizeof(m_comp_h_blocks));
|
1397 |
-
memset(m_comp_v_blocks, 0, sizeof(m_comp_v_blocks));
|
1398 |
-
|
1399 |
-
m_comps_in_scan = 0;
|
1400 |
-
memset(m_comp_list, 0, sizeof(m_comp_list));
|
1401 |
-
memset(m_comp_dc_tab, 0, sizeof(m_comp_dc_tab));
|
1402 |
-
memset(m_comp_ac_tab, 0, sizeof(m_comp_ac_tab));
|
1403 |
-
|
1404 |
-
m_spectral_start = 0;
|
1405 |
-
m_spectral_end = 0;
|
1406 |
-
m_successive_low = 0;
|
1407 |
-
m_successive_high = 0;
|
1408 |
-
m_max_mcu_x_size = 0;
|
1409 |
-
m_max_mcu_y_size = 0;
|
1410 |
-
m_blocks_per_mcu = 0;
|
1411 |
-
m_max_blocks_per_row = 0;
|
1412 |
-
m_mcus_per_row = 0;
|
1413 |
-
m_mcus_per_col = 0;
|
1414 |
-
m_expanded_blocks_per_component = 0;
|
1415 |
-
m_expanded_blocks_per_mcu = 0;
|
1416 |
-
m_expanded_blocks_per_row = 0;
|
1417 |
-
m_freq_domain_chroma_upsample = false;
|
1418 |
-
|
1419 |
-
memset(m_mcu_org, 0, sizeof(m_mcu_org));
|
1420 |
-
|
1421 |
-
m_total_lines_left = 0;
|
1422 |
-
m_mcu_lines_left = 0;
|
1423 |
-
m_real_dest_bytes_per_scan_line = 0;
|
1424 |
-
m_dest_bytes_per_scan_line = 0;
|
1425 |
-
m_dest_bytes_per_pixel = 0;
|
1426 |
-
|
1427 |
-
memset(m_pHuff_tabs, 0, sizeof(m_pHuff_tabs));
|
1428 |
-
|
1429 |
-
memset(m_dc_coeffs, 0, sizeof(m_dc_coeffs));
|
1430 |
-
memset(m_ac_coeffs, 0, sizeof(m_ac_coeffs));
|
1431 |
-
memset(m_block_y_mcu, 0, sizeof(m_block_y_mcu));
|
1432 |
-
|
1433 |
-
m_eob_run = 0;
|
1434 |
-
|
1435 |
-
memset(m_block_y_mcu, 0, sizeof(m_block_y_mcu));
|
1436 |
-
|
1437 |
-
m_pIn_buf_ofs = m_in_buf;
|
1438 |
-
m_in_buf_left = 0;
|
1439 |
-
m_eof_flag = false;
|
1440 |
-
m_tem_flag = 0;
|
1441 |
-
|
1442 |
-
memset(m_in_buf_pad_start, 0, sizeof(m_in_buf_pad_start));
|
1443 |
-
memset(m_in_buf, 0, sizeof(m_in_buf));
|
1444 |
-
memset(m_in_buf_pad_end, 0, sizeof(m_in_buf_pad_end));
|
1445 |
-
|
1446 |
-
m_restart_interval = 0;
|
1447 |
-
m_restarts_left = 0;
|
1448 |
-
m_next_restart_num = 0;
|
1449 |
-
|
1450 |
-
m_max_mcus_per_row = 0;
|
1451 |
-
m_max_blocks_per_mcu = 0;
|
1452 |
-
m_max_mcus_per_col = 0;
|
1453 |
-
|
1454 |
-
memset(m_last_dc_val, 0, sizeof(m_last_dc_val));
|
1455 |
-
m_pMCU_coefficients = NULL;
|
1456 |
-
m_pSample_buf = NULL;
|
1457 |
-
|
1458 |
-
m_total_bytes_read = 0;
|
1459 |
-
|
1460 |
-
m_pScan_line_0 = NULL;
|
1461 |
-
m_pScan_line_1 = NULL;
|
1462 |
-
|
1463 |
-
// Ready the input buffer.
|
1464 |
-
prep_in_buffer();
|
1465 |
-
|
1466 |
-
// Prime the bit buffer.
|
1467 |
-
m_bits_left = 16;
|
1468 |
-
m_bit_buf = 0;
|
1469 |
-
|
1470 |
-
get_bits(16);
|
1471 |
-
get_bits(16);
|
1472 |
-
|
1473 |
-
for (int i = 0; i < JPGD_MAX_BLOCKS_PER_MCU; i++)
|
1474 |
-
m_mcu_block_max_zag[i] = 64;
|
1475 |
-
}
|
1476 |
-
|
1477 |
-
#define SCALEBITS 16
|
1478 |
-
#define ONE_HALF ((int) 1 << (SCALEBITS-1))
|
1479 |
-
#define FIX(x) ((int) ((x) * (1L<<SCALEBITS) + 0.5f))
|
1480 |
-
|
1481 |
-
// Create a few tables that allow us to quickly convert YCbCr to RGB.
|
1482 |
-
void jpeg_decoder::create_look_ups()
|
1483 |
-
{
|
1484 |
-
for (int i = 0; i <= 255; i++)
|
1485 |
-
{
|
1486 |
-
int k = i - 128;
|
1487 |
-
m_crr[i] = ( FIX(1.40200f) * k + ONE_HALF) >> SCALEBITS;
|
1488 |
-
m_cbb[i] = ( FIX(1.77200f) * k + ONE_HALF) >> SCALEBITS;
|
1489 |
-
m_crg[i] = (-FIX(0.71414f)) * k;
|
1490 |
-
m_cbg[i] = (-FIX(0.34414f)) * k + ONE_HALF;
|
1491 |
-
}
|
1492 |
-
}
|
1493 |
-
|
1494 |
-
// This method throws back into the stream any bytes that where read
|
1495 |
-
// into the bit buffer during initial marker scanning.
|
1496 |
-
void jpeg_decoder::fix_in_buffer()
|
1497 |
-
{
|
1498 |
-
// In case any 0xFF's where pulled into the buffer during marker scanning.
|
1499 |
-
JPGD_ASSERT((m_bits_left & 7) == 0);
|
1500 |
-
|
1501 |
-
if (m_bits_left == 16)
|
1502 |
-
stuff_char( (uint8)(m_bit_buf & 0xFF));
|
1503 |
-
|
1504 |
-
if (m_bits_left >= 8)
|
1505 |
-
stuff_char( (uint8)((m_bit_buf >> 8) & 0xFF));
|
1506 |
-
|
1507 |
-
stuff_char((uint8)((m_bit_buf >> 16) & 0xFF));
|
1508 |
-
stuff_char((uint8)((m_bit_buf >> 24) & 0xFF));
|
1509 |
-
|
1510 |
-
m_bits_left = 16;
|
1511 |
-
get_bits_no_markers(16);
|
1512 |
-
get_bits_no_markers(16);
|
1513 |
-
}
|
1514 |
-
|
1515 |
-
void jpeg_decoder::transform_mcu(int mcu_row)
|
1516 |
-
{
|
1517 |
-
jpgd_block_t* pSrc_ptr = m_pMCU_coefficients;
|
1518 |
-
uint8* pDst_ptr = m_pSample_buf + mcu_row * m_blocks_per_mcu * 64;
|
1519 |
-
|
1520 |
-
for (int mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++)
|
1521 |
-
{
|
1522 |
-
idct(pSrc_ptr, pDst_ptr, m_mcu_block_max_zag[mcu_block]);
|
1523 |
-
pSrc_ptr += 64;
|
1524 |
-
pDst_ptr += 64;
|
1525 |
-
}
|
1526 |
-
}
|
1527 |
-
|
1528 |
-
static const uint8 s_max_rc[64] =
|
1529 |
-
{
|
1530 |
-
17, 18, 34, 50, 50, 51, 52, 52, 52, 68, 84, 84, 84, 84, 85, 86, 86, 86, 86, 86,
|
1531 |
-
102, 118, 118, 118, 118, 118, 118, 119, 120, 120, 120, 120, 120, 120, 120, 136,
|
1532 |
-
136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136,
|
1533 |
-
136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136
|
1534 |
-
};
|
1535 |
-
|
1536 |
-
void jpeg_decoder::transform_mcu_expand(int mcu_row)
|
1537 |
-
{
|
1538 |
-
jpgd_block_t* pSrc_ptr = m_pMCU_coefficients;
|
1539 |
-
uint8* pDst_ptr = m_pSample_buf + mcu_row * m_expanded_blocks_per_mcu * 64;
|
1540 |
-
|
1541 |
-
// Y IDCT
|
1542 |
-
int mcu_block;
|
1543 |
-
for (mcu_block = 0; mcu_block < m_expanded_blocks_per_component; mcu_block++)
|
1544 |
-
{
|
1545 |
-
idct(pSrc_ptr, pDst_ptr, m_mcu_block_max_zag[mcu_block]);
|
1546 |
-
pSrc_ptr += 64;
|
1547 |
-
pDst_ptr += 64;
|
1548 |
-
}
|
1549 |
-
|
1550 |
-
// Chroma IDCT, with upsampling
|
1551 |
-
jpgd_block_t temp_block[64];
|
1552 |
-
|
1553 |
-
for (int i = 0; i < 2; i++)
|
1554 |
-
{
|
1555 |
-
DCT_Upsample::Matrix44 P, Q, R, S;
|
1556 |
-
|
1557 |
-
JPGD_ASSERT(m_mcu_block_max_zag[mcu_block] >= 1);
|
1558 |
-
JPGD_ASSERT(m_mcu_block_max_zag[mcu_block] <= 64);
|
1559 |
-
|
1560 |
-
switch (s_max_rc[m_mcu_block_max_zag[mcu_block++] - 1])
|
1561 |
-
{
|
1562 |
-
case 1*16+1:
|
1563 |
-
DCT_Upsample::P_Q<1, 1>::calc(P, Q, pSrc_ptr);
|
1564 |
-
DCT_Upsample::R_S<1, 1>::calc(R, S, pSrc_ptr);
|
1565 |
-
break;
|
1566 |
-
case 1*16+2:
|
1567 |
-
DCT_Upsample::P_Q<1, 2>::calc(P, Q, pSrc_ptr);
|
1568 |
-
DCT_Upsample::R_S<1, 2>::calc(R, S, pSrc_ptr);
|
1569 |
-
break;
|
1570 |
-
case 2*16+2:
|
1571 |
-
DCT_Upsample::P_Q<2, 2>::calc(P, Q, pSrc_ptr);
|
1572 |
-
DCT_Upsample::R_S<2, 2>::calc(R, S, pSrc_ptr);
|
1573 |
-
break;
|
1574 |
-
case 3*16+2:
|
1575 |
-
DCT_Upsample::P_Q<3, 2>::calc(P, Q, pSrc_ptr);
|
1576 |
-
DCT_Upsample::R_S<3, 2>::calc(R, S, pSrc_ptr);
|
1577 |
-
break;
|
1578 |
-
case 3*16+3:
|
1579 |
-
DCT_Upsample::P_Q<3, 3>::calc(P, Q, pSrc_ptr);
|
1580 |
-
DCT_Upsample::R_S<3, 3>::calc(R, S, pSrc_ptr);
|
1581 |
-
break;
|
1582 |
-
case 3*16+4:
|
1583 |
-
DCT_Upsample::P_Q<3, 4>::calc(P, Q, pSrc_ptr);
|
1584 |
-
DCT_Upsample::R_S<3, 4>::calc(R, S, pSrc_ptr);
|
1585 |
-
break;
|
1586 |
-
case 4*16+4:
|
1587 |
-
DCT_Upsample::P_Q<4, 4>::calc(P, Q, pSrc_ptr);
|
1588 |
-
DCT_Upsample::R_S<4, 4>::calc(R, S, pSrc_ptr);
|
1589 |
-
break;
|
1590 |
-
case 5*16+4:
|
1591 |
-
DCT_Upsample::P_Q<5, 4>::calc(P, Q, pSrc_ptr);
|
1592 |
-
DCT_Upsample::R_S<5, 4>::calc(R, S, pSrc_ptr);
|
1593 |
-
break;
|
1594 |
-
case 5*16+5:
|
1595 |
-
DCT_Upsample::P_Q<5, 5>::calc(P, Q, pSrc_ptr);
|
1596 |
-
DCT_Upsample::R_S<5, 5>::calc(R, S, pSrc_ptr);
|
1597 |
-
break;
|
1598 |
-
case 5*16+6:
|
1599 |
-
DCT_Upsample::P_Q<5, 6>::calc(P, Q, pSrc_ptr);
|
1600 |
-
DCT_Upsample::R_S<5, 6>::calc(R, S, pSrc_ptr);
|
1601 |
-
break;
|
1602 |
-
case 6*16+6:
|
1603 |
-
DCT_Upsample::P_Q<6, 6>::calc(P, Q, pSrc_ptr);
|
1604 |
-
DCT_Upsample::R_S<6, 6>::calc(R, S, pSrc_ptr);
|
1605 |
-
break;
|
1606 |
-
case 7*16+6:
|
1607 |
-
DCT_Upsample::P_Q<7, 6>::calc(P, Q, pSrc_ptr);
|
1608 |
-
DCT_Upsample::R_S<7, 6>::calc(R, S, pSrc_ptr);
|
1609 |
-
break;
|
1610 |
-
case 7*16+7:
|
1611 |
-
DCT_Upsample::P_Q<7, 7>::calc(P, Q, pSrc_ptr);
|
1612 |
-
DCT_Upsample::R_S<7, 7>::calc(R, S, pSrc_ptr);
|
1613 |
-
break;
|
1614 |
-
case 7*16+8:
|
1615 |
-
DCT_Upsample::P_Q<7, 8>::calc(P, Q, pSrc_ptr);
|
1616 |
-
DCT_Upsample::R_S<7, 8>::calc(R, S, pSrc_ptr);
|
1617 |
-
break;
|
1618 |
-
case 8*16+8:
|
1619 |
-
DCT_Upsample::P_Q<8, 8>::calc(P, Q, pSrc_ptr);
|
1620 |
-
DCT_Upsample::R_S<8, 8>::calc(R, S, pSrc_ptr);
|
1621 |
-
break;
|
1622 |
-
default:
|
1623 |
-
JPGD_ASSERT(false);
|
1624 |
-
}
|
1625 |
-
|
1626 |
-
DCT_Upsample::Matrix44 a(P + Q); P -= Q;
|
1627 |
-
DCT_Upsample::Matrix44& b = P;
|
1628 |
-
DCT_Upsample::Matrix44 c(R + S); R -= S;
|
1629 |
-
DCT_Upsample::Matrix44& d = R;
|
1630 |
-
|
1631 |
-
DCT_Upsample::Matrix44::add_and_store(temp_block, a, c);
|
1632 |
-
idct_4x4(temp_block, pDst_ptr);
|
1633 |
-
pDst_ptr += 64;
|
1634 |
-
|
1635 |
-
DCT_Upsample::Matrix44::sub_and_store(temp_block, a, c);
|
1636 |
-
idct_4x4(temp_block, pDst_ptr);
|
1637 |
-
pDst_ptr += 64;
|
1638 |
-
|
1639 |
-
DCT_Upsample::Matrix44::add_and_store(temp_block, b, d);
|
1640 |
-
idct_4x4(temp_block, pDst_ptr);
|
1641 |
-
pDst_ptr += 64;
|
1642 |
-
|
1643 |
-
DCT_Upsample::Matrix44::sub_and_store(temp_block, b, d);
|
1644 |
-
idct_4x4(temp_block, pDst_ptr);
|
1645 |
-
pDst_ptr += 64;
|
1646 |
-
|
1647 |
-
pSrc_ptr += 64;
|
1648 |
-
}
|
1649 |
-
}
|
1650 |
-
|
1651 |
-
// Loads and dequantizes the next row of (already decoded) coefficients.
|
1652 |
-
// Progressive images only.
|
1653 |
-
void jpeg_decoder::load_next_row()
|
1654 |
-
{
|
1655 |
-
int i;
|
1656 |
-
jpgd_block_t *p;
|
1657 |
-
jpgd_quant_t *q;
|
1658 |
-
int mcu_row, mcu_block, row_block = 0;
|
1659 |
-
int component_num, component_id;
|
1660 |
-
int block_x_mcu[JPGD_MAX_COMPONENTS];
|
1661 |
-
|
1662 |
-
memset(block_x_mcu, 0, JPGD_MAX_COMPONENTS * sizeof(int));
|
1663 |
-
|
1664 |
-
for (mcu_row = 0; mcu_row < m_mcus_per_row; mcu_row++)
|
1665 |
-
{
|
1666 |
-
int block_x_mcu_ofs = 0, block_y_mcu_ofs = 0;
|
1667 |
-
|
1668 |
-
for (mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++)
|
1669 |
-
{
|
1670 |
-
component_id = m_mcu_org[mcu_block];
|
1671 |
-
q = m_quant[m_comp_quant[component_id]];
|
1672 |
-
|
1673 |
-
p = m_pMCU_coefficients + 64 * mcu_block;
|
1674 |
-
|
1675 |
-
jpgd_block_t* pAC = coeff_buf_getp(m_ac_coeffs[component_id], block_x_mcu[component_id] + block_x_mcu_ofs, m_block_y_mcu[component_id] + block_y_mcu_ofs);
|
1676 |
-
jpgd_block_t* pDC = coeff_buf_getp(m_dc_coeffs[component_id], block_x_mcu[component_id] + block_x_mcu_ofs, m_block_y_mcu[component_id] + block_y_mcu_ofs);
|
1677 |
-
p[0] = pDC[0];
|
1678 |
-
memcpy(&p[1], &pAC[1], 63 * sizeof(jpgd_block_t));
|
1679 |
-
|
1680 |
-
for (i = 63; i > 0; i--)
|
1681 |
-
if (p[g_ZAG[i]])
|
1682 |
-
break;
|
1683 |
-
|
1684 |
-
m_mcu_block_max_zag[mcu_block] = i + 1;
|
1685 |
-
|
1686 |
-
for ( ; i >= 0; i--)
|
1687 |
-
if (p[g_ZAG[i]])
|
1688 |
-
p[g_ZAG[i]] = static_cast<jpgd_block_t>(p[g_ZAG[i]] * q[i]);
|
1689 |
-
|
1690 |
-
row_block++;
|
1691 |
-
|
1692 |
-
if (m_comps_in_scan == 1)
|
1693 |
-
block_x_mcu[component_id]++;
|
1694 |
-
else
|
1695 |
-
{
|
1696 |
-
if (++block_x_mcu_ofs == m_comp_h_samp[component_id])
|
1697 |
-
{
|
1698 |
-
block_x_mcu_ofs = 0;
|
1699 |
-
|
1700 |
-
if (++block_y_mcu_ofs == m_comp_v_samp[component_id])
|
1701 |
-
{
|
1702 |
-
block_y_mcu_ofs = 0;
|
1703 |
-
|
1704 |
-
block_x_mcu[component_id] += m_comp_h_samp[component_id];
|
1705 |
-
}
|
1706 |
-
}
|
1707 |
-
}
|
1708 |
-
}
|
1709 |
-
|
1710 |
-
if (m_freq_domain_chroma_upsample)
|
1711 |
-
transform_mcu_expand(mcu_row);
|
1712 |
-
else
|
1713 |
-
transform_mcu(mcu_row);
|
1714 |
-
}
|
1715 |
-
|
1716 |
-
if (m_comps_in_scan == 1)
|
1717 |
-
m_block_y_mcu[m_comp_list[0]]++;
|
1718 |
-
else
|
1719 |
-
{
|
1720 |
-
for (component_num = 0; component_num < m_comps_in_scan; component_num++)
|
1721 |
-
{
|
1722 |
-
component_id = m_comp_list[component_num];
|
1723 |
-
|
1724 |
-
m_block_y_mcu[component_id] += m_comp_v_samp[component_id];
|
1725 |
-
}
|
1726 |
-
}
|
1727 |
-
}
|
1728 |
-
|
1729 |
-
// Restart interval processing.
|
1730 |
-
void jpeg_decoder::process_restart()
|
1731 |
-
{
|
1732 |
-
int i;
|
1733 |
-
int c = 0;
|
1734 |
-
|
1735 |
-
// Align to a byte boundry
|
1736 |
-
// FIXME: Is this really necessary? get_bits_no_markers() never reads in markers!
|
1737 |
-
//get_bits_no_markers(m_bits_left & 7);
|
1738 |
-
|
1739 |
-
// Let's scan a little bit to find the marker, but not _too_ far.
|
1740 |
-
// 1536 is a "fudge factor" that determines how much to scan.
|
1741 |
-
for (i = 1536; i > 0; i--)
|
1742 |
-
if (get_char() == 0xFF)
|
1743 |
-
break;
|
1744 |
-
|
1745 |
-
if (i == 0)
|
1746 |
-
stop_decoding(JPGD_BAD_RESTART_MARKER);
|
1747 |
-
|
1748 |
-
for ( ; i > 0; i--)
|
1749 |
-
if ((c = get_char()) != 0xFF)
|
1750 |
-
break;
|
1751 |
-
|
1752 |
-
if (i == 0)
|
1753 |
-
stop_decoding(JPGD_BAD_RESTART_MARKER);
|
1754 |
-
|
1755 |
-
// Is it the expected marker? If not, something bad happened.
|
1756 |
-
if (c != (m_next_restart_num + M_RST0))
|
1757 |
-
stop_decoding(JPGD_BAD_RESTART_MARKER);
|
1758 |
-
|
1759 |
-
// Reset each component's DC prediction values.
|
1760 |
-
memset(&m_last_dc_val, 0, m_comps_in_frame * sizeof(uint));
|
1761 |
-
|
1762 |
-
m_eob_run = 0;
|
1763 |
-
|
1764 |
-
m_restarts_left = m_restart_interval;
|
1765 |
-
|
1766 |
-
m_next_restart_num = (m_next_restart_num + 1) & 7;
|
1767 |
-
|
1768 |
-
// Get the bit buffer going again...
|
1769 |
-
|
1770 |
-
m_bits_left = 16;
|
1771 |
-
get_bits_no_markers(16);
|
1772 |
-
get_bits_no_markers(16);
|
1773 |
-
}
|
1774 |
-
|
1775 |
-
static inline int dequantize_ac(int c, int q) { c *= q; return c; }
|
1776 |
-
|
1777 |
-
// Decodes and dequantizes the next row of coefficients.
|
1778 |
-
void jpeg_decoder::decode_next_row()
|
1779 |
-
{
|
1780 |
-
int row_block = 0;
|
1781 |
-
|
1782 |
-
for (int mcu_row = 0; mcu_row < m_mcus_per_row; mcu_row++)
|
1783 |
-
{
|
1784 |
-
if ((m_restart_interval) && (m_restarts_left == 0))
|
1785 |
-
process_restart();
|
1786 |
-
|
1787 |
-
jpgd_block_t* p = m_pMCU_coefficients;
|
1788 |
-
for (int mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++, p += 64)
|
1789 |
-
{
|
1790 |
-
int component_id = m_mcu_org[mcu_block];
|
1791 |
-
jpgd_quant_t* q = m_quant[m_comp_quant[component_id]];
|
1792 |
-
|
1793 |
-
int r, s;
|
1794 |
-
s = huff_decode(m_pHuff_tabs[m_comp_dc_tab[component_id]], r);
|
1795 |
-
s = HUFF_EXTEND(r, s);
|
1796 |
-
|
1797 |
-
m_last_dc_val[component_id] = (s += m_last_dc_val[component_id]);
|
1798 |
-
|
1799 |
-
p[0] = static_cast<jpgd_block_t>(s * q[0]);
|
1800 |
-
|
1801 |
-
int prev_num_set = m_mcu_block_max_zag[mcu_block];
|
1802 |
-
|
1803 |
-
huff_tables *pH = m_pHuff_tabs[m_comp_ac_tab[component_id]];
|
1804 |
-
|
1805 |
-
int k;
|
1806 |
-
for (k = 1; k < 64; k++)
|
1807 |
-
{
|
1808 |
-
int extra_bits;
|
1809 |
-
s = huff_decode(pH, extra_bits);
|
1810 |
-
|
1811 |
-
r = s >> 4;
|
1812 |
-
s &= 15;
|
1813 |
-
|
1814 |
-
if (s)
|
1815 |
-
{
|
1816 |
-
if (r)
|
1817 |
-
{
|
1818 |
-
if ((k + r) > 63)
|
1819 |
-
stop_decoding(JPGD_DECODE_ERROR);
|
1820 |
-
|
1821 |
-
if (k < prev_num_set)
|
1822 |
-
{
|
1823 |
-
int n = JPGD_MIN(r, prev_num_set - k);
|
1824 |
-
int kt = k;
|
1825 |
-
while (n--)
|
1826 |
-
p[g_ZAG[kt++]] = 0;
|
1827 |
-
}
|
1828 |
-
|
1829 |
-
k += r;
|
1830 |
-
}
|
1831 |
-
|
1832 |
-
s = HUFF_EXTEND(extra_bits, s);
|
1833 |
-
|
1834 |
-
JPGD_ASSERT(k < 64);
|
1835 |
-
|
1836 |
-
p[g_ZAG[k]] = static_cast<jpgd_block_t>(dequantize_ac(s, q[k])); //s * q[k];
|
1837 |
-
}
|
1838 |
-
else
|
1839 |
-
{
|
1840 |
-
if (r == 15)
|
1841 |
-
{
|
1842 |
-
if ((k + 16) > 64)
|
1843 |
-
stop_decoding(JPGD_DECODE_ERROR);
|
1844 |
-
|
1845 |
-
if (k < prev_num_set)
|
1846 |
-
{
|
1847 |
-
int n = JPGD_MIN(16, prev_num_set - k);
|
1848 |
-
int kt = k;
|
1849 |
-
while (n--)
|
1850 |
-
{
|
1851 |
-
JPGD_ASSERT(kt <= 63);
|
1852 |
-
p[g_ZAG[kt++]] = 0;
|
1853 |
-
}
|
1854 |
-
}
|
1855 |
-
|
1856 |
-
k += 16 - 1; // - 1 because the loop counter is k
|
1857 |
-
// BEGIN EPIC MOD
|
1858 |
-
JPGD_ASSERT(k < 64 && p[g_ZAG[k]] == 0);
|
1859 |
-
// END EPIC MOD
|
1860 |
-
}
|
1861 |
-
else
|
1862 |
-
break;
|
1863 |
-
}
|
1864 |
-
}
|
1865 |
-
|
1866 |
-
if (k < prev_num_set)
|
1867 |
-
{
|
1868 |
-
int kt = k;
|
1869 |
-
while (kt < prev_num_set)
|
1870 |
-
p[g_ZAG[kt++]] = 0;
|
1871 |
-
}
|
1872 |
-
|
1873 |
-
m_mcu_block_max_zag[mcu_block] = k;
|
1874 |
-
|
1875 |
-
row_block++;
|
1876 |
-
}
|
1877 |
-
|
1878 |
-
if (m_freq_domain_chroma_upsample)
|
1879 |
-
transform_mcu_expand(mcu_row);
|
1880 |
-
else
|
1881 |
-
transform_mcu(mcu_row);
|
1882 |
-
|
1883 |
-
m_restarts_left--;
|
1884 |
-
}
|
1885 |
-
}
|
1886 |
-
|
1887 |
-
// YCbCr H1V1 (1x1:1:1, 3 m_blocks per MCU) to RGB
|
1888 |
-
void jpeg_decoder::H1V1Convert()
|
1889 |
-
{
|
1890 |
-
int row = m_max_mcu_y_size - m_mcu_lines_left;
|
1891 |
-
uint8 *d = m_pScan_line_0;
|
1892 |
-
uint8 *s = m_pSample_buf + row * 8;
|
1893 |
-
|
1894 |
-
for (int i = m_max_mcus_per_row; i > 0; i--)
|
1895 |
-
{
|
1896 |
-
for (int j = 0; j < 8; j++)
|
1897 |
-
{
|
1898 |
-
int y = s[j];
|
1899 |
-
int cb = s[64+j];
|
1900 |
-
int cr = s[128+j];
|
1901 |
-
|
1902 |
-
if (jpg_format == ERGBFormatJPG::BGRA)
|
1903 |
-
{
|
1904 |
-
d[0] = clamp(y + m_cbb[cb]);
|
1905 |
-
d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16));
|
1906 |
-
d[2] = clamp(y + m_crr[cr]);
|
1907 |
-
d[3] = 255;
|
1908 |
-
}
|
1909 |
-
else
|
1910 |
-
{
|
1911 |
-
d[0] = clamp(y + m_crr[cr]);
|
1912 |
-
d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16));
|
1913 |
-
d[2] = clamp(y + m_cbb[cb]);
|
1914 |
-
d[3] = 255;
|
1915 |
-
}
|
1916 |
-
d += 4;
|
1917 |
-
}
|
1918 |
-
|
1919 |
-
s += 64*3;
|
1920 |
-
}
|
1921 |
-
}
|
1922 |
-
|
1923 |
-
// YCbCr H2V1 (2x1:1:1, 4 m_blocks per MCU) to RGB
|
1924 |
-
void jpeg_decoder::H2V1Convert()
|
1925 |
-
{
|
1926 |
-
int row = m_max_mcu_y_size - m_mcu_lines_left;
|
1927 |
-
uint8 *d0 = m_pScan_line_0;
|
1928 |
-
uint8 *y = m_pSample_buf + row * 8;
|
1929 |
-
uint8 *c = m_pSample_buf + 2*64 + row * 8;
|
1930 |
-
|
1931 |
-
for (int i = m_max_mcus_per_row; i > 0; i--)
|
1932 |
-
{
|
1933 |
-
for (int l = 0; l < 2; l++)
|
1934 |
-
{
|
1935 |
-
for (int j = 0; j < 4; j++)
|
1936 |
-
{
|
1937 |
-
int cb = c[0];
|
1938 |
-
int cr = c[64];
|
1939 |
-
|
1940 |
-
int rc = m_crr[cr];
|
1941 |
-
int gc = ((m_crg[cr] + m_cbg[cb]) >> 16);
|
1942 |
-
int bc = m_cbb[cb];
|
1943 |
-
|
1944 |
-
int yy = y[j<<1];
|
1945 |
-
if (jpg_format == ERGBFormatJPG::BGRA)
|
1946 |
-
{
|
1947 |
-
d0[0] = clamp(yy+bc);
|
1948 |
-
d0[1] = clamp(yy+gc);
|
1949 |
-
d0[2] = clamp(yy+rc);
|
1950 |
-
d0[3] = 255;
|
1951 |
-
yy = y[(j<<1)+1];
|
1952 |
-
d0[4] = clamp(yy+bc);
|
1953 |
-
d0[5] = clamp(yy+gc);
|
1954 |
-
d0[6] = clamp(yy+rc);
|
1955 |
-
d0[7] = 255;
|
1956 |
-
}
|
1957 |
-
else
|
1958 |
-
{
|
1959 |
-
d0[0] = clamp(yy+rc);
|
1960 |
-
d0[1] = clamp(yy+gc);
|
1961 |
-
d0[2] = clamp(yy+bc);
|
1962 |
-
d0[3] = 255;
|
1963 |
-
yy = y[(j<<1)+1];
|
1964 |
-
d0[4] = clamp(yy+rc);
|
1965 |
-
d0[5] = clamp(yy+gc);
|
1966 |
-
d0[6] = clamp(yy+bc);
|
1967 |
-
d0[7] = 255;
|
1968 |
-
}
|
1969 |
-
|
1970 |
-
d0 += 8;
|
1971 |
-
|
1972 |
-
c++;
|
1973 |
-
}
|
1974 |
-
y += 64;
|
1975 |
-
}
|
1976 |
-
|
1977 |
-
y += 64*4 - 64*2;
|
1978 |
-
c += 64*4 - 8;
|
1979 |
-
}
|
1980 |
-
}
|
1981 |
-
|
1982 |
-
// YCbCr H2V1 (1x2:1:1, 4 m_blocks per MCU) to RGB
|
1983 |
-
void jpeg_decoder::H1V2Convert()
|
1984 |
-
{
|
1985 |
-
int row = m_max_mcu_y_size - m_mcu_lines_left;
|
1986 |
-
uint8 *d0 = m_pScan_line_0;
|
1987 |
-
uint8 *d1 = m_pScan_line_1;
|
1988 |
-
uint8 *y;
|
1989 |
-
uint8 *c;
|
1990 |
-
|
1991 |
-
if (row < 8)
|
1992 |
-
y = m_pSample_buf + row * 8;
|
1993 |
-
else
|
1994 |
-
y = m_pSample_buf + 64*1 + (row & 7) * 8;
|
1995 |
-
|
1996 |
-
c = m_pSample_buf + 64*2 + (row >> 1) * 8;
|
1997 |
-
|
1998 |
-
for (int i = m_max_mcus_per_row; i > 0; i--)
|
1999 |
-
{
|
2000 |
-
for (int j = 0; j < 8; j++)
|
2001 |
-
{
|
2002 |
-
int cb = c[0+j];
|
2003 |
-
int cr = c[64+j];
|
2004 |
-
|
2005 |
-
int rc = m_crr[cr];
|
2006 |
-
int gc = ((m_crg[cr] + m_cbg[cb]) >> 16);
|
2007 |
-
int bc = m_cbb[cb];
|
2008 |
-
|
2009 |
-
int yy = y[j];
|
2010 |
-
if (jpg_format == ERGBFormatJPG::BGRA)
|
2011 |
-
{
|
2012 |
-
d0[0] = clamp(yy+bc);
|
2013 |
-
d0[1] = clamp(yy+gc);
|
2014 |
-
d0[2] = clamp(yy+rc);
|
2015 |
-
d0[3] = 255;
|
2016 |
-
yy = y[8+j];
|
2017 |
-
d1[0] = clamp(yy+bc);
|
2018 |
-
d1[1] = clamp(yy+gc);
|
2019 |
-
d1[2] = clamp(yy+rc);
|
2020 |
-
d1[3] = 255;
|
2021 |
-
}
|
2022 |
-
else
|
2023 |
-
{
|
2024 |
-
d0[0] = clamp(yy+rc);
|
2025 |
-
d0[1] = clamp(yy+gc);
|
2026 |
-
d0[2] = clamp(yy+bc);
|
2027 |
-
d0[3] = 255;
|
2028 |
-
yy = y[8+j];
|
2029 |
-
d1[0] = clamp(yy+rc);
|
2030 |
-
d1[1] = clamp(yy+gc);
|
2031 |
-
d1[2] = clamp(yy+bc);
|
2032 |
-
d1[3] = 255;
|
2033 |
-
}
|
2034 |
-
|
2035 |
-
d0 += 4;
|
2036 |
-
d1 += 4;
|
2037 |
-
}
|
2038 |
-
|
2039 |
-
y += 64*4;
|
2040 |
-
c += 64*4;
|
2041 |
-
}
|
2042 |
-
}
|
2043 |
-
|
2044 |
-
// YCbCr H2V2 (2x2:1:1, 6 m_blocks per MCU) to RGB
|
2045 |
-
void jpeg_decoder::H2V2Convert()
|
2046 |
-
{
|
2047 |
-
int row = m_max_mcu_y_size - m_mcu_lines_left;
|
2048 |
-
uint8 *d0 = m_pScan_line_0;
|
2049 |
-
uint8 *d1 = m_pScan_line_1;
|
2050 |
-
uint8 *y;
|
2051 |
-
uint8 *c;
|
2052 |
-
|
2053 |
-
if (row < 8)
|
2054 |
-
y = m_pSample_buf + row * 8;
|
2055 |
-
else
|
2056 |
-
y = m_pSample_buf + 64*2 + (row & 7) * 8;
|
2057 |
-
|
2058 |
-
c = m_pSample_buf + 64*4 + (row >> 1) * 8;
|
2059 |
-
|
2060 |
-
for (int i = m_max_mcus_per_row; i > 0; i--)
|
2061 |
-
{
|
2062 |
-
for (int l = 0; l < 2; l++)
|
2063 |
-
{
|
2064 |
-
for (int j = 0; j < 8; j += 2)
|
2065 |
-
{
|
2066 |
-
int cb = c[0];
|
2067 |
-
int cr = c[64];
|
2068 |
-
|
2069 |
-
int rc = m_crr[cr];
|
2070 |
-
int gc = ((m_crg[cr] + m_cbg[cb]) >> 16);
|
2071 |
-
int bc = m_cbb[cb];
|
2072 |
-
|
2073 |
-
int yy = y[j];
|
2074 |
-
if (jpg_format == ERGBFormatJPG::BGRA)
|
2075 |
-
{
|
2076 |
-
d0[0] = clamp(yy+bc);
|
2077 |
-
d0[1] = clamp(yy+gc);
|
2078 |
-
d0[2] = clamp(yy+rc);
|
2079 |
-
d0[3] = 255;
|
2080 |
-
yy = y[j+1];
|
2081 |
-
d0[4] = clamp(yy+bc);
|
2082 |
-
d0[5] = clamp(yy+gc);
|
2083 |
-
d0[6] = clamp(yy+rc);
|
2084 |
-
d0[7] = 255;
|
2085 |
-
yy = y[j+8];
|
2086 |
-
d1[0] = clamp(yy+bc);
|
2087 |
-
d1[1] = clamp(yy+gc);
|
2088 |
-
d1[2] = clamp(yy+rc);
|
2089 |
-
d1[3] = 255;
|
2090 |
-
yy = y[j+8+1];
|
2091 |
-
d1[4] = clamp(yy+bc);
|
2092 |
-
d1[5] = clamp(yy+gc);
|
2093 |
-
d1[6] = clamp(yy+rc);
|
2094 |
-
d1[7] = 255;
|
2095 |
-
}
|
2096 |
-
else
|
2097 |
-
{
|
2098 |
-
d0[0] = clamp(yy+rc);
|
2099 |
-
d0[1] = clamp(yy+gc);
|
2100 |
-
d0[2] = clamp(yy+bc);
|
2101 |
-
d0[3] = 255;
|
2102 |
-
yy = y[j+1];
|
2103 |
-
d0[4] = clamp(yy+rc);
|
2104 |
-
d0[5] = clamp(yy+gc);
|
2105 |
-
d0[6] = clamp(yy+bc);
|
2106 |
-
d0[7] = 255;
|
2107 |
-
yy = y[j+8];
|
2108 |
-
d1[0] = clamp(yy+rc);
|
2109 |
-
d1[1] = clamp(yy+gc);
|
2110 |
-
d1[2] = clamp(yy+bc);
|
2111 |
-
d1[3] = 255;
|
2112 |
-
yy = y[j+8+1];
|
2113 |
-
d1[4] = clamp(yy+rc);
|
2114 |
-
d1[5] = clamp(yy+gc);
|
2115 |
-
d1[6] = clamp(yy+bc);
|
2116 |
-
d1[7] = 255;
|
2117 |
-
}
|
2118 |
-
|
2119 |
-
d0 += 8;
|
2120 |
-
d1 += 8;
|
2121 |
-
|
2122 |
-
c++;
|
2123 |
-
}
|
2124 |
-
y += 64;
|
2125 |
-
}
|
2126 |
-
|
2127 |
-
y += 64*6 - 64*2;
|
2128 |
-
c += 64*6 - 8;
|
2129 |
-
}
|
2130 |
-
}
|
2131 |
-
|
2132 |
-
// Y (1 block per MCU) to 8-bit grayscale
|
2133 |
-
void jpeg_decoder::gray_convert()
|
2134 |
-
{
|
2135 |
-
int row = m_max_mcu_y_size - m_mcu_lines_left;
|
2136 |
-
uint8 *d = m_pScan_line_0;
|
2137 |
-
uint8 *s = m_pSample_buf + row * 8;
|
2138 |
-
|
2139 |
-
for (int i = m_max_mcus_per_row; i > 0; i--)
|
2140 |
-
{
|
2141 |
-
*(uint *)d = *(uint *)s;
|
2142 |
-
*(uint *)(&d[4]) = *(uint *)(&s[4]);
|
2143 |
-
|
2144 |
-
s += 64;
|
2145 |
-
d += 8;
|
2146 |
-
}
|
2147 |
-
}
|
2148 |
-
|
2149 |
-
void jpeg_decoder::expanded_convert()
|
2150 |
-
{
|
2151 |
-
int row = m_max_mcu_y_size - m_mcu_lines_left;
|
2152 |
-
|
2153 |
-
uint8* Py = m_pSample_buf + (row / 8) * 64 * m_comp_h_samp[0] + (row & 7) * 8;
|
2154 |
-
|
2155 |
-
uint8* d = m_pScan_line_0;
|
2156 |
-
|
2157 |
-
for (int i = m_max_mcus_per_row; i > 0; i--)
|
2158 |
-
{
|
2159 |
-
for (int k = 0; k < m_max_mcu_x_size; k += 8)
|
2160 |
-
{
|
2161 |
-
const int Y_ofs = k * 8;
|
2162 |
-
const int Cb_ofs = Y_ofs + 64 * m_expanded_blocks_per_component;
|
2163 |
-
const int Cr_ofs = Y_ofs + 64 * m_expanded_blocks_per_component * 2;
|
2164 |
-
for (int j = 0; j < 8; j++)
|
2165 |
-
{
|
2166 |
-
int y = Py[Y_ofs + j];
|
2167 |
-
int cb = Py[Cb_ofs + j];
|
2168 |
-
int cr = Py[Cr_ofs + j];
|
2169 |
-
|
2170 |
-
if (jpg_format == ERGBFormatJPG::BGRA)
|
2171 |
-
{
|
2172 |
-
d[0] = clamp(y + m_cbb[cb]);
|
2173 |
-
d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16));
|
2174 |
-
d[2] = clamp(y + m_crr[cr]);
|
2175 |
-
d[3] = 255;
|
2176 |
-
}
|
2177 |
-
else
|
2178 |
-
{
|
2179 |
-
d[0] = clamp(y + m_crr[cr]);
|
2180 |
-
d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16));
|
2181 |
-
d[2] = clamp(y + m_cbb[cb]);
|
2182 |
-
d[3] = 255;
|
2183 |
-
}
|
2184 |
-
|
2185 |
-
d += 4;
|
2186 |
-
}
|
2187 |
-
}
|
2188 |
-
|
2189 |
-
Py += 64 * m_expanded_blocks_per_mcu;
|
2190 |
-
}
|
2191 |
-
}
|
2192 |
-
|
2193 |
-
// Find end of image (EOI) marker, so we can return to the user the exact size of the input stream.
|
2194 |
-
void jpeg_decoder::find_eoi()
|
2195 |
-
{
|
2196 |
-
if (!m_progressive_flag)
|
2197 |
-
{
|
2198 |
-
// Attempt to read the EOI marker.
|
2199 |
-
//get_bits_no_markers(m_bits_left & 7);
|
2200 |
-
|
2201 |
-
// Prime the bit buffer
|
2202 |
-
m_bits_left = 16;
|
2203 |
-
get_bits(16);
|
2204 |
-
get_bits(16);
|
2205 |
-
|
2206 |
-
// The next marker _should_ be EOI
|
2207 |
-
process_markers();
|
2208 |
-
}
|
2209 |
-
|
2210 |
-
m_total_bytes_read -= m_in_buf_left;
|
2211 |
-
}
|
2212 |
-
|
2213 |
-
int jpeg_decoder::decode(const void** pScan_line, uint* pScan_line_len)
|
2214 |
-
{
|
2215 |
-
if ((m_error_code) || (!m_ready_flag))
|
2216 |
-
return JPGD_FAILED;
|
2217 |
-
|
2218 |
-
if (m_total_lines_left == 0)
|
2219 |
-
return JPGD_DONE;
|
2220 |
-
|
2221 |
-
if (m_mcu_lines_left == 0)
|
2222 |
-
{
|
2223 |
-
if (setjmp(m_jmp_state))
|
2224 |
-
return JPGD_FAILED;
|
2225 |
-
|
2226 |
-
if (m_progressive_flag)
|
2227 |
-
load_next_row();
|
2228 |
-
else
|
2229 |
-
decode_next_row();
|
2230 |
-
|
2231 |
-
// Find the EOI marker if that was the last row.
|
2232 |
-
if (m_total_lines_left <= m_max_mcu_y_size)
|
2233 |
-
find_eoi();
|
2234 |
-
|
2235 |
-
m_mcu_lines_left = m_max_mcu_y_size;
|
2236 |
-
}
|
2237 |
-
|
2238 |
-
if (m_freq_domain_chroma_upsample)
|
2239 |
-
{
|
2240 |
-
expanded_convert();
|
2241 |
-
*pScan_line = m_pScan_line_0;
|
2242 |
-
}
|
2243 |
-
else
|
2244 |
-
{
|
2245 |
-
switch (m_scan_type)
|
2246 |
-
{
|
2247 |
-
case JPGD_YH2V2:
|
2248 |
-
{
|
2249 |
-
if ((m_mcu_lines_left & 1) == 0)
|
2250 |
-
{
|
2251 |
-
H2V2Convert();
|
2252 |
-
*pScan_line = m_pScan_line_0;
|
2253 |
-
}
|
2254 |
-
else
|
2255 |
-
*pScan_line = m_pScan_line_1;
|
2256 |
-
|
2257 |
-
break;
|
2258 |
-
}
|
2259 |
-
case JPGD_YH2V1:
|
2260 |
-
{
|
2261 |
-
H2V1Convert();
|
2262 |
-
*pScan_line = m_pScan_line_0;
|
2263 |
-
break;
|
2264 |
-
}
|
2265 |
-
case JPGD_YH1V2:
|
2266 |
-
{
|
2267 |
-
if ((m_mcu_lines_left & 1) == 0)
|
2268 |
-
{
|
2269 |
-
H1V2Convert();
|
2270 |
-
*pScan_line = m_pScan_line_0;
|
2271 |
-
}
|
2272 |
-
else
|
2273 |
-
*pScan_line = m_pScan_line_1;
|
2274 |
-
|
2275 |
-
break;
|
2276 |
-
}
|
2277 |
-
case JPGD_YH1V1:
|
2278 |
-
{
|
2279 |
-
H1V1Convert();
|
2280 |
-
*pScan_line = m_pScan_line_0;
|
2281 |
-
break;
|
2282 |
-
}
|
2283 |
-
case JPGD_GRAYSCALE:
|
2284 |
-
{
|
2285 |
-
gray_convert();
|
2286 |
-
*pScan_line = m_pScan_line_0;
|
2287 |
-
|
2288 |
-
break;
|
2289 |
-
}
|
2290 |
-
}
|
2291 |
-
}
|
2292 |
-
|
2293 |
-
*pScan_line_len = m_real_dest_bytes_per_scan_line;
|
2294 |
-
|
2295 |
-
m_mcu_lines_left--;
|
2296 |
-
m_total_lines_left--;
|
2297 |
-
|
2298 |
-
return JPGD_SUCCESS;
|
2299 |
-
}
|
2300 |
-
|
2301 |
-
// Creates the tables needed for efficient Huffman decoding.
|
2302 |
-
void jpeg_decoder::make_huff_table(int index, huff_tables *pH)
|
2303 |
-
{
|
2304 |
-
int p, i, l, si;
|
2305 |
-
uint8 huffsize[257];
|
2306 |
-
uint huffcode[257];
|
2307 |
-
uint code;
|
2308 |
-
uint subtree;
|
2309 |
-
int code_size;
|
2310 |
-
int lastp;
|
2311 |
-
int nextfreeentry;
|
2312 |
-
int currententry;
|
2313 |
-
|
2314 |
-
pH->ac_table = m_huff_ac[index] != 0;
|
2315 |
-
|
2316 |
-
p = 0;
|
2317 |
-
|
2318 |
-
for (l = 1; l <= 16; l++)
|
2319 |
-
{
|
2320 |
-
for (i = 1; i <= m_huff_num[index][l]; i++)
|
2321 |
-
huffsize[p++] = static_cast<uint8>(l);
|
2322 |
-
}
|
2323 |
-
|
2324 |
-
huffsize[p] = 0;
|
2325 |
-
|
2326 |
-
lastp = p;
|
2327 |
-
|
2328 |
-
code = 0;
|
2329 |
-
si = huffsize[0];
|
2330 |
-
p = 0;
|
2331 |
-
|
2332 |
-
while (huffsize[p])
|
2333 |
-
{
|
2334 |
-
while (huffsize[p] == si)
|
2335 |
-
{
|
2336 |
-
huffcode[p++] = code;
|
2337 |
-
code++;
|
2338 |
-
}
|
2339 |
-
|
2340 |
-
code <<= 1;
|
2341 |
-
si++;
|
2342 |
-
}
|
2343 |
-
|
2344 |
-
memset(pH->look_up, 0, sizeof(pH->look_up));
|
2345 |
-
memset(pH->look_up2, 0, sizeof(pH->look_up2));
|
2346 |
-
memset(pH->tree, 0, sizeof(pH->tree));
|
2347 |
-
memset(pH->code_size, 0, sizeof(pH->code_size));
|
2348 |
-
|
2349 |
-
nextfreeentry = -1;
|
2350 |
-
|
2351 |
-
p = 0;
|
2352 |
-
|
2353 |
-
while (p < lastp)
|
2354 |
-
{
|
2355 |
-
i = m_huff_val[index][p];
|
2356 |
-
code = huffcode[p];
|
2357 |
-
code_size = huffsize[p];
|
2358 |
-
|
2359 |
-
pH->code_size[i] = static_cast<uint8>(code_size);
|
2360 |
-
|
2361 |
-
if (code_size <= 8)
|
2362 |
-
{
|
2363 |
-
code <<= (8 - code_size);
|
2364 |
-
|
2365 |
-
for (l = 1 << (8 - code_size); l > 0; l--)
|
2366 |
-
{
|
2367 |
-
JPGD_ASSERT(i < 256);
|
2368 |
-
|
2369 |
-
pH->look_up[code] = i;
|
2370 |
-
|
2371 |
-
bool has_extrabits = false;
|
2372 |
-
int extra_bits = 0;
|
2373 |
-
int num_extra_bits = i & 15;
|
2374 |
-
|
2375 |
-
int bits_to_fetch = code_size;
|
2376 |
-
if (num_extra_bits)
|
2377 |
-
{
|
2378 |
-
int total_codesize = code_size + num_extra_bits;
|
2379 |
-
if (total_codesize <= 8)
|
2380 |
-
{
|
2381 |
-
has_extrabits = true;
|
2382 |
-
extra_bits = ((1 << num_extra_bits) - 1) & (code >> (8 - total_codesize));
|
2383 |
-
JPGD_ASSERT(extra_bits <= 0x7FFF);
|
2384 |
-
bits_to_fetch += num_extra_bits;
|
2385 |
-
}
|
2386 |
-
}
|
2387 |
-
|
2388 |
-
if (!has_extrabits)
|
2389 |
-
pH->look_up2[code] = i | (bits_to_fetch << 8);
|
2390 |
-
else
|
2391 |
-
pH->look_up2[code] = i | 0x8000 | (extra_bits << 16) | (bits_to_fetch << 8);
|
2392 |
-
|
2393 |
-
code++;
|
2394 |
-
}
|
2395 |
-
}
|
2396 |
-
else
|
2397 |
-
{
|
2398 |
-
subtree = (code >> (code_size - 8)) & 0xFF;
|
2399 |
-
|
2400 |
-
currententry = pH->look_up[subtree];
|
2401 |
-
|
2402 |
-
if (currententry == 0)
|
2403 |
-
{
|
2404 |
-
pH->look_up[subtree] = currententry = nextfreeentry;
|
2405 |
-
pH->look_up2[subtree] = currententry = nextfreeentry;
|
2406 |
-
|
2407 |
-
nextfreeentry -= 2;
|
2408 |
-
}
|
2409 |
-
|
2410 |
-
code <<= (16 - (code_size - 8));
|
2411 |
-
|
2412 |
-
for (l = code_size; l > 9; l--)
|
2413 |
-
{
|
2414 |
-
if ((code & 0x8000) == 0)
|
2415 |
-
currententry--;
|
2416 |
-
|
2417 |
-
if (pH->tree[-currententry - 1] == 0)
|
2418 |
-
{
|
2419 |
-
pH->tree[-currententry - 1] = nextfreeentry;
|
2420 |
-
|
2421 |
-
currententry = nextfreeentry;
|
2422 |
-
|
2423 |
-
nextfreeentry -= 2;
|
2424 |
-
}
|
2425 |
-
else
|
2426 |
-
currententry = pH->tree[-currententry - 1];
|
2427 |
-
|
2428 |
-
code <<= 1;
|
2429 |
-
}
|
2430 |
-
|
2431 |
-
if ((code & 0x8000) == 0)
|
2432 |
-
currententry--;
|
2433 |
-
|
2434 |
-
pH->tree[-currententry - 1] = i;
|
2435 |
-
}
|
2436 |
-
|
2437 |
-
p++;
|
2438 |
-
}
|
2439 |
-
}
|
2440 |
-
|
2441 |
-
// Verifies the quantization tables needed for this scan are available.
|
2442 |
-
void jpeg_decoder::check_quant_tables()
|
2443 |
-
{
|
2444 |
-
for (int i = 0; i < m_comps_in_scan; i++)
|
2445 |
-
if (m_quant[m_comp_quant[m_comp_list[i]]] == NULL)
|
2446 |
-
stop_decoding(JPGD_UNDEFINED_QUANT_TABLE);
|
2447 |
-
}
|
2448 |
-
|
2449 |
-
// Verifies that all the Huffman tables needed for this scan are available.
|
2450 |
-
void jpeg_decoder::check_huff_tables()
|
2451 |
-
{
|
2452 |
-
for (int i = 0; i < m_comps_in_scan; i++)
|
2453 |
-
{
|
2454 |
-
if ((m_spectral_start == 0) && (m_huff_num[m_comp_dc_tab[m_comp_list[i]]] == NULL))
|
2455 |
-
stop_decoding(JPGD_UNDEFINED_HUFF_TABLE);
|
2456 |
-
|
2457 |
-
if ((m_spectral_end > 0) && (m_huff_num[m_comp_ac_tab[m_comp_list[i]]] == NULL))
|
2458 |
-
stop_decoding(JPGD_UNDEFINED_HUFF_TABLE);
|
2459 |
-
}
|
2460 |
-
|
2461 |
-
for (int i = 0; i < JPGD_MAX_HUFF_TABLES; i++)
|
2462 |
-
if (m_huff_num[i])
|
2463 |
-
{
|
2464 |
-
if (!m_pHuff_tabs[i])
|
2465 |
-
m_pHuff_tabs[i] = (huff_tables *)alloc(sizeof(huff_tables));
|
2466 |
-
|
2467 |
-
make_huff_table(i, m_pHuff_tabs[i]);
|
2468 |
-
}
|
2469 |
-
}
|
2470 |
-
|
2471 |
-
// Determines the component order inside each MCU.
|
2472 |
-
// Also calcs how many MCU's are on each row, etc.
|
2473 |
-
void jpeg_decoder::calc_mcu_block_order()
|
2474 |
-
{
|
2475 |
-
int component_num, component_id;
|
2476 |
-
int max_h_samp = 0, max_v_samp = 0;
|
2477 |
-
|
2478 |
-
for (component_id = 0; component_id < m_comps_in_frame; component_id++)
|
2479 |
-
{
|
2480 |
-
if (m_comp_h_samp[component_id] > max_h_samp)
|
2481 |
-
max_h_samp = m_comp_h_samp[component_id];
|
2482 |
-
|
2483 |
-
if (m_comp_v_samp[component_id] > max_v_samp)
|
2484 |
-
max_v_samp = m_comp_v_samp[component_id];
|
2485 |
-
}
|
2486 |
-
|
2487 |
-
for (component_id = 0; component_id < m_comps_in_frame; component_id++)
|
2488 |
-
{
|
2489 |
-
m_comp_h_blocks[component_id] = ((((m_image_x_size * m_comp_h_samp[component_id]) + (max_h_samp - 1)) / max_h_samp) + 7) / 8;
|
2490 |
-
m_comp_v_blocks[component_id] = ((((m_image_y_size * m_comp_v_samp[component_id]) + (max_v_samp - 1)) / max_v_samp) + 7) / 8;
|
2491 |
-
}
|
2492 |
-
|
2493 |
-
if (m_comps_in_scan == 1)
|
2494 |
-
{
|
2495 |
-
m_mcus_per_row = m_comp_h_blocks[m_comp_list[0]];
|
2496 |
-
m_mcus_per_col = m_comp_v_blocks[m_comp_list[0]];
|
2497 |
-
}
|
2498 |
-
else
|
2499 |
-
{
|
2500 |
-
m_mcus_per_row = (((m_image_x_size + 7) / 8) + (max_h_samp - 1)) / max_h_samp;
|
2501 |
-
m_mcus_per_col = (((m_image_y_size + 7) / 8) + (max_v_samp - 1)) / max_v_samp;
|
2502 |
-
}
|
2503 |
-
|
2504 |
-
if (m_comps_in_scan == 1)
|
2505 |
-
{
|
2506 |
-
m_mcu_org[0] = m_comp_list[0];
|
2507 |
-
|
2508 |
-
m_blocks_per_mcu = 1;
|
2509 |
-
}
|
2510 |
-
else
|
2511 |
-
{
|
2512 |
-
m_blocks_per_mcu = 0;
|
2513 |
-
|
2514 |
-
for (component_num = 0; component_num < m_comps_in_scan; component_num++)
|
2515 |
-
{
|
2516 |
-
int num_blocks;
|
2517 |
-
|
2518 |
-
component_id = m_comp_list[component_num];
|
2519 |
-
|
2520 |
-
num_blocks = m_comp_h_samp[component_id] * m_comp_v_samp[component_id];
|
2521 |
-
|
2522 |
-
while (num_blocks--)
|
2523 |
-
m_mcu_org[m_blocks_per_mcu++] = component_id;
|
2524 |
-
}
|
2525 |
-
}
|
2526 |
-
}
|
2527 |
-
|
2528 |
-
// Starts a new scan.
|
2529 |
-
int jpeg_decoder::init_scan()
|
2530 |
-
{
|
2531 |
-
if (!locate_sos_marker())
|
2532 |
-
return JPGD_FALSE;
|
2533 |
-
|
2534 |
-
calc_mcu_block_order();
|
2535 |
-
|
2536 |
-
check_huff_tables();
|
2537 |
-
|
2538 |
-
check_quant_tables();
|
2539 |
-
|
2540 |
-
memset(m_last_dc_val, 0, m_comps_in_frame * sizeof(uint));
|
2541 |
-
|
2542 |
-
m_eob_run = 0;
|
2543 |
-
|
2544 |
-
if (m_restart_interval)
|
2545 |
-
{
|
2546 |
-
m_restarts_left = m_restart_interval;
|
2547 |
-
m_next_restart_num = 0;
|
2548 |
-
}
|
2549 |
-
|
2550 |
-
fix_in_buffer();
|
2551 |
-
|
2552 |
-
return JPGD_TRUE;
|
2553 |
-
}
|
2554 |
-
|
2555 |
-
// Starts a frame. Determines if the number of components or sampling factors
|
2556 |
-
// are supported.
|
2557 |
-
void jpeg_decoder::init_frame()
|
2558 |
-
{
|
2559 |
-
int i;
|
2560 |
-
|
2561 |
-
if (m_comps_in_frame == 1)
|
2562 |
-
{
|
2563 |
-
if ((m_comp_h_samp[0] != 1) || (m_comp_v_samp[0] != 1))
|
2564 |
-
stop_decoding(JPGD_UNSUPPORTED_SAMP_FACTORS);
|
2565 |
-
|
2566 |
-
m_scan_type = JPGD_GRAYSCALE;
|
2567 |
-
m_max_blocks_per_mcu = 1;
|
2568 |
-
m_max_mcu_x_size = 8;
|
2569 |
-
m_max_mcu_y_size = 8;
|
2570 |
-
}
|
2571 |
-
else if (m_comps_in_frame == 3)
|
2572 |
-
{
|
2573 |
-
if ( ((m_comp_h_samp[1] != 1) || (m_comp_v_samp[1] != 1)) ||
|
2574 |
-
((m_comp_h_samp[2] != 1) || (m_comp_v_samp[2] != 1)) )
|
2575 |
-
stop_decoding(JPGD_UNSUPPORTED_SAMP_FACTORS);
|
2576 |
-
|
2577 |
-
if ((m_comp_h_samp[0] == 1) && (m_comp_v_samp[0] == 1))
|
2578 |
-
{
|
2579 |
-
m_scan_type = JPGD_YH1V1;
|
2580 |
-
|
2581 |
-
m_max_blocks_per_mcu = 3;
|
2582 |
-
m_max_mcu_x_size = 8;
|
2583 |
-
m_max_mcu_y_size = 8;
|
2584 |
-
}
|
2585 |
-
else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 1))
|
2586 |
-
{
|
2587 |
-
m_scan_type = JPGD_YH2V1;
|
2588 |
-
m_max_blocks_per_mcu = 4;
|
2589 |
-
m_max_mcu_x_size = 16;
|
2590 |
-
m_max_mcu_y_size = 8;
|
2591 |
-
}
|
2592 |
-
else if ((m_comp_h_samp[0] == 1) && (m_comp_v_samp[0] == 2))
|
2593 |
-
{
|
2594 |
-
m_scan_type = JPGD_YH1V2;
|
2595 |
-
m_max_blocks_per_mcu = 4;
|
2596 |
-
m_max_mcu_x_size = 8;
|
2597 |
-
m_max_mcu_y_size = 16;
|
2598 |
-
}
|
2599 |
-
else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 2))
|
2600 |
-
{
|
2601 |
-
m_scan_type = JPGD_YH2V2;
|
2602 |
-
m_max_blocks_per_mcu = 6;
|
2603 |
-
m_max_mcu_x_size = 16;
|
2604 |
-
m_max_mcu_y_size = 16;
|
2605 |
-
}
|
2606 |
-
else
|
2607 |
-
stop_decoding(JPGD_UNSUPPORTED_SAMP_FACTORS);
|
2608 |
-
}
|
2609 |
-
else
|
2610 |
-
stop_decoding(JPGD_UNSUPPORTED_COLORSPACE);
|
2611 |
-
|
2612 |
-
m_max_mcus_per_row = (m_image_x_size + (m_max_mcu_x_size - 1)) / m_max_mcu_x_size;
|
2613 |
-
m_max_mcus_per_col = (m_image_y_size + (m_max_mcu_y_size - 1)) / m_max_mcu_y_size;
|
2614 |
-
|
2615 |
-
// These values are for the *destination* pixels: after conversion.
|
2616 |
-
if (m_scan_type == JPGD_GRAYSCALE)
|
2617 |
-
m_dest_bytes_per_pixel = 1;
|
2618 |
-
else
|
2619 |
-
m_dest_bytes_per_pixel = 4;
|
2620 |
-
|
2621 |
-
m_dest_bytes_per_scan_line = ((m_image_x_size + 15) & 0xFFF0) * m_dest_bytes_per_pixel;
|
2622 |
-
|
2623 |
-
m_real_dest_bytes_per_scan_line = (m_image_x_size * m_dest_bytes_per_pixel);
|
2624 |
-
|
2625 |
-
// Initialize two scan line buffers.
|
2626 |
-
m_pScan_line_0 = (uint8 *)alloc(m_dest_bytes_per_scan_line, true);
|
2627 |
-
if ((m_scan_type == JPGD_YH1V2) || (m_scan_type == JPGD_YH2V2))
|
2628 |
-
m_pScan_line_1 = (uint8 *)alloc(m_dest_bytes_per_scan_line, true);
|
2629 |
-
|
2630 |
-
m_max_blocks_per_row = m_max_mcus_per_row * m_max_blocks_per_mcu;
|
2631 |
-
|
2632 |
-
// Should never happen
|
2633 |
-
if (m_max_blocks_per_row > JPGD_MAX_BLOCKS_PER_ROW)
|
2634 |
-
stop_decoding(JPGD_ASSERTION_ERROR);
|
2635 |
-
|
2636 |
-
// Allocate the coefficient buffer, enough for one MCU
|
2637 |
-
m_pMCU_coefficients = (jpgd_block_t*)alloc(m_max_blocks_per_mcu * 64 * sizeof(jpgd_block_t));
|
2638 |
-
|
2639 |
-
for (i = 0; i < m_max_blocks_per_mcu; i++)
|
2640 |
-
m_mcu_block_max_zag[i] = 64;
|
2641 |
-
|
2642 |
-
m_expanded_blocks_per_component = m_comp_h_samp[0] * m_comp_v_samp[0];
|
2643 |
-
m_expanded_blocks_per_mcu = m_expanded_blocks_per_component * m_comps_in_frame;
|
2644 |
-
m_expanded_blocks_per_row = m_max_mcus_per_row * m_expanded_blocks_per_mcu;
|
2645 |
-
// Freq. domain chroma upsampling is only supported for H2V2 subsampling factor.
|
2646 |
-
// BEGIN EPIC MOD
|
2647 |
-
#if JPGD_SUPPORT_FREQ_DOMAIN_UPSAMPLING
|
2648 |
-
m_freq_domain_chroma_upsample = (m_expanded_blocks_per_mcu == 4*3);
|
2649 |
-
#else
|
2650 |
-
m_freq_domain_chroma_upsample = 0;
|
2651 |
-
#endif
|
2652 |
-
// END EPIC MOD
|
2653 |
-
|
2654 |
-
if (m_freq_domain_chroma_upsample)
|
2655 |
-
m_pSample_buf = (uint8 *)alloc(m_expanded_blocks_per_row * 64);
|
2656 |
-
else
|
2657 |
-
m_pSample_buf = (uint8 *)alloc(m_max_blocks_per_row * 64);
|
2658 |
-
|
2659 |
-
m_total_lines_left = m_image_y_size;
|
2660 |
-
|
2661 |
-
m_mcu_lines_left = 0;
|
2662 |
-
|
2663 |
-
create_look_ups();
|
2664 |
-
}
|
2665 |
-
|
2666 |
-
// The coeff_buf series of methods originally stored the coefficients
|
2667 |
-
// into a "virtual" file which was located in EMS, XMS, or a disk file. A cache
|
2668 |
-
// was used to make this process more efficient. Now, we can store the entire
|
2669 |
-
// thing in RAM.
|
2670 |
-
jpeg_decoder::coeff_buf* jpeg_decoder::coeff_buf_open(int block_num_x, int block_num_y, int block_len_x, int block_len_y)
|
2671 |
-
{
|
2672 |
-
coeff_buf* cb = (coeff_buf*)alloc(sizeof(coeff_buf));
|
2673 |
-
|
2674 |
-
cb->block_num_x = block_num_x;
|
2675 |
-
cb->block_num_y = block_num_y;
|
2676 |
-
cb->block_len_x = block_len_x;
|
2677 |
-
cb->block_len_y = block_len_y;
|
2678 |
-
cb->block_size = (block_len_x * block_len_y) * sizeof(jpgd_block_t);
|
2679 |
-
cb->pData = (uint8 *)alloc(cb->block_size * block_num_x * block_num_y, true);
|
2680 |
-
return cb;
|
2681 |
-
}
|
2682 |
-
|
2683 |
-
inline jpgd_block_t *jpeg_decoder::coeff_buf_getp(coeff_buf *cb, int block_x, int block_y)
|
2684 |
-
{
|
2685 |
-
JPGD_ASSERT((block_x < cb->block_num_x) && (block_y < cb->block_num_y));
|
2686 |
-
return (jpgd_block_t *)(cb->pData + block_x * cb->block_size + block_y * (cb->block_size * cb->block_num_x));
|
2687 |
-
}
|
2688 |
-
|
2689 |
-
// The following methods decode the various types of m_blocks encountered
|
2690 |
-
// in progressively encoded images.
|
2691 |
-
void jpeg_decoder::decode_block_dc_first(jpeg_decoder *pD, int component_id, int block_x, int block_y)
|
2692 |
-
{
|
2693 |
-
int s, r;
|
2694 |
-
jpgd_block_t *p = pD->coeff_buf_getp(pD->m_dc_coeffs[component_id], block_x, block_y);
|
2695 |
-
|
2696 |
-
if ((s = pD->huff_decode(pD->m_pHuff_tabs[pD->m_comp_dc_tab[component_id]])) != 0)
|
2697 |
-
{
|
2698 |
-
r = pD->get_bits_no_markers(s);
|
2699 |
-
s = HUFF_EXTEND(r, s);
|
2700 |
-
}
|
2701 |
-
|
2702 |
-
pD->m_last_dc_val[component_id] = (s += pD->m_last_dc_val[component_id]);
|
2703 |
-
|
2704 |
-
p[0] = static_cast<jpgd_block_t>(s << pD->m_successive_low);
|
2705 |
-
}
|
2706 |
-
|
2707 |
-
void jpeg_decoder::decode_block_dc_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y)
|
2708 |
-
{
|
2709 |
-
if (pD->get_bits_no_markers(1))
|
2710 |
-
{
|
2711 |
-
jpgd_block_t *p = pD->coeff_buf_getp(pD->m_dc_coeffs[component_id], block_x, block_y);
|
2712 |
-
|
2713 |
-
p[0] |= (1 << pD->m_successive_low);
|
2714 |
-
}
|
2715 |
-
}
|
2716 |
-
|
2717 |
-
void jpeg_decoder::decode_block_ac_first(jpeg_decoder *pD, int component_id, int block_x, int block_y)
|
2718 |
-
{
|
2719 |
-
int k, s, r;
|
2720 |
-
|
2721 |
-
if (pD->m_eob_run)
|
2722 |
-
{
|
2723 |
-
pD->m_eob_run--;
|
2724 |
-
return;
|
2725 |
-
}
|
2726 |
-
|
2727 |
-
jpgd_block_t *p = pD->coeff_buf_getp(pD->m_ac_coeffs[component_id], block_x, block_y);
|
2728 |
-
|
2729 |
-
for (k = pD->m_spectral_start; k <= pD->m_spectral_end; k++)
|
2730 |
-
{
|
2731 |
-
s = pD->huff_decode(pD->m_pHuff_tabs[pD->m_comp_ac_tab[component_id]]);
|
2732 |
-
|
2733 |
-
r = s >> 4;
|
2734 |
-
s &= 15;
|
2735 |
-
|
2736 |
-
if (s)
|
2737 |
-
{
|
2738 |
-
if ((k += r) > 63)
|
2739 |
-
pD->stop_decoding(JPGD_DECODE_ERROR);
|
2740 |
-
|
2741 |
-
r = pD->get_bits_no_markers(s);
|
2742 |
-
s = HUFF_EXTEND(r, s);
|
2743 |
-
|
2744 |
-
p[g_ZAG[k]] = static_cast<jpgd_block_t>(s << pD->m_successive_low);
|
2745 |
-
}
|
2746 |
-
else
|
2747 |
-
{
|
2748 |
-
if (r == 15)
|
2749 |
-
{
|
2750 |
-
if ((k += 15) > 63)
|
2751 |
-
pD->stop_decoding(JPGD_DECODE_ERROR);
|
2752 |
-
}
|
2753 |
-
else
|
2754 |
-
{
|
2755 |
-
pD->m_eob_run = 1 << r;
|
2756 |
-
|
2757 |
-
if (r)
|
2758 |
-
pD->m_eob_run += pD->get_bits_no_markers(r);
|
2759 |
-
|
2760 |
-
pD->m_eob_run--;
|
2761 |
-
|
2762 |
-
break;
|
2763 |
-
}
|
2764 |
-
}
|
2765 |
-
}
|
2766 |
-
}
|
2767 |
-
|
2768 |
-
void jpeg_decoder::decode_block_ac_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y)
|
2769 |
-
{
|
2770 |
-
int s, k, r;
|
2771 |
-
int p1 = 1 << pD->m_successive_low;
|
2772 |
-
int m1 = (-1) << pD->m_successive_low;
|
2773 |
-
jpgd_block_t *p = pD->coeff_buf_getp(pD->m_ac_coeffs[component_id], block_x, block_y);
|
2774 |
-
|
2775 |
-
k = pD->m_spectral_start;
|
2776 |
-
|
2777 |
-
if (pD->m_eob_run == 0)
|
2778 |
-
{
|
2779 |
-
for ( ; k <= pD->m_spectral_end; k++)
|
2780 |
-
{
|
2781 |
-
s = pD->huff_decode(pD->m_pHuff_tabs[pD->m_comp_ac_tab[component_id]]);
|
2782 |
-
|
2783 |
-
r = s >> 4;
|
2784 |
-
s &= 15;
|
2785 |
-
|
2786 |
-
if (s)
|
2787 |
-
{
|
2788 |
-
if (s != 1)
|
2789 |
-
pD->stop_decoding(JPGD_DECODE_ERROR);
|
2790 |
-
|
2791 |
-
if (pD->get_bits_no_markers(1))
|
2792 |
-
s = p1;
|
2793 |
-
else
|
2794 |
-
s = m1;
|
2795 |
-
}
|
2796 |
-
else
|
2797 |
-
{
|
2798 |
-
if (r != 15)
|
2799 |
-
{
|
2800 |
-
pD->m_eob_run = 1 << r;
|
2801 |
-
|
2802 |
-
if (r)
|
2803 |
-
pD->m_eob_run += pD->get_bits_no_markers(r);
|
2804 |
-
|
2805 |
-
break;
|
2806 |
-
}
|
2807 |
-
}
|
2808 |
-
|
2809 |
-
do
|
2810 |
-
{
|
2811 |
-
// BEGIN EPIC MOD
|
2812 |
-
JPGD_ASSERT(k < 64);
|
2813 |
-
// END EPIC MOD
|
2814 |
-
|
2815 |
-
jpgd_block_t *this_coef = p + g_ZAG[k];
|
2816 |
-
|
2817 |
-
if (*this_coef != 0)
|
2818 |
-
{
|
2819 |
-
if (pD->get_bits_no_markers(1))
|
2820 |
-
{
|
2821 |
-
if ((*this_coef & p1) == 0)
|
2822 |
-
{
|
2823 |
-
if (*this_coef >= 0)
|
2824 |
-
*this_coef = static_cast<jpgd_block_t>(*this_coef + p1);
|
2825 |
-
else
|
2826 |
-
*this_coef = static_cast<jpgd_block_t>(*this_coef + m1);
|
2827 |
-
}
|
2828 |
-
}
|
2829 |
-
}
|
2830 |
-
else
|
2831 |
-
{
|
2832 |
-
if (--r < 0)
|
2833 |
-
break;
|
2834 |
-
}
|
2835 |
-
|
2836 |
-
k++;
|
2837 |
-
|
2838 |
-
} while (k <= pD->m_spectral_end);
|
2839 |
-
|
2840 |
-
if ((s) && (k < 64))
|
2841 |
-
{
|
2842 |
-
p[g_ZAG[k]] = static_cast<jpgd_block_t>(s);
|
2843 |
-
}
|
2844 |
-
}
|
2845 |
-
}
|
2846 |
-
|
2847 |
-
if (pD->m_eob_run > 0)
|
2848 |
-
{
|
2849 |
-
for ( ; k <= pD->m_spectral_end; k++)
|
2850 |
-
{
|
2851 |
-
// BEGIN EPIC MOD
|
2852 |
-
JPGD_ASSERT(k < 64);
|
2853 |
-
// END EPIC MOD
|
2854 |
-
|
2855 |
-
jpgd_block_t *this_coef = p + g_ZAG[k];
|
2856 |
-
|
2857 |
-
if (*this_coef != 0)
|
2858 |
-
{
|
2859 |
-
if (pD->get_bits_no_markers(1))
|
2860 |
-
{
|
2861 |
-
if ((*this_coef & p1) == 0)
|
2862 |
-
{
|
2863 |
-
if (*this_coef >= 0)
|
2864 |
-
*this_coef = static_cast<jpgd_block_t>(*this_coef + p1);
|
2865 |
-
else
|
2866 |
-
*this_coef = static_cast<jpgd_block_t>(*this_coef + m1);
|
2867 |
-
}
|
2868 |
-
}
|
2869 |
-
}
|
2870 |
-
}
|
2871 |
-
|
2872 |
-
pD->m_eob_run--;
|
2873 |
-
}
|
2874 |
-
}
|
2875 |
-
|
2876 |
-
// Decode a scan in a progressively encoded image.
|
2877 |
-
void jpeg_decoder::decode_scan(pDecode_block_func decode_block_func)
|
2878 |
-
{
|
2879 |
-
int mcu_row, mcu_col, mcu_block;
|
2880 |
-
int block_x_mcu[JPGD_MAX_COMPONENTS], m_block_y_mcu[JPGD_MAX_COMPONENTS];
|
2881 |
-
|
2882 |
-
memset(m_block_y_mcu, 0, sizeof(m_block_y_mcu));
|
2883 |
-
|
2884 |
-
for (mcu_col = 0; mcu_col < m_mcus_per_col; mcu_col++)
|
2885 |
-
{
|
2886 |
-
int component_num, component_id;
|
2887 |
-
|
2888 |
-
memset(block_x_mcu, 0, sizeof(block_x_mcu));
|
2889 |
-
|
2890 |
-
for (mcu_row = 0; mcu_row < m_mcus_per_row; mcu_row++)
|
2891 |
-
{
|
2892 |
-
int block_x_mcu_ofs = 0, block_y_mcu_ofs = 0;
|
2893 |
-
|
2894 |
-
if ((m_restart_interval) && (m_restarts_left == 0))
|
2895 |
-
process_restart();
|
2896 |
-
|
2897 |
-
for (mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++)
|
2898 |
-
{
|
2899 |
-
component_id = m_mcu_org[mcu_block];
|
2900 |
-
|
2901 |
-
decode_block_func(this, component_id, block_x_mcu[component_id] + block_x_mcu_ofs, m_block_y_mcu[component_id] + block_y_mcu_ofs);
|
2902 |
-
|
2903 |
-
if (m_comps_in_scan == 1)
|
2904 |
-
block_x_mcu[component_id]++;
|
2905 |
-
else
|
2906 |
-
{
|
2907 |
-
if (++block_x_mcu_ofs == m_comp_h_samp[component_id])
|
2908 |
-
{
|
2909 |
-
block_x_mcu_ofs = 0;
|
2910 |
-
|
2911 |
-
if (++block_y_mcu_ofs == m_comp_v_samp[component_id])
|
2912 |
-
{
|
2913 |
-
block_y_mcu_ofs = 0;
|
2914 |
-
block_x_mcu[component_id] += m_comp_h_samp[component_id];
|
2915 |
-
}
|
2916 |
-
}
|
2917 |
-
}
|
2918 |
-
}
|
2919 |
-
|
2920 |
-
m_restarts_left--;
|
2921 |
-
}
|
2922 |
-
|
2923 |
-
if (m_comps_in_scan == 1)
|
2924 |
-
m_block_y_mcu[m_comp_list[0]]++;
|
2925 |
-
else
|
2926 |
-
{
|
2927 |
-
for (component_num = 0; component_num < m_comps_in_scan; component_num++)
|
2928 |
-
{
|
2929 |
-
component_id = m_comp_list[component_num];
|
2930 |
-
m_block_y_mcu[component_id] += m_comp_v_samp[component_id];
|
2931 |
-
}
|
2932 |
-
}
|
2933 |
-
}
|
2934 |
-
}
|
2935 |
-
|
2936 |
-
// Decode a progressively encoded image.
|
2937 |
-
void jpeg_decoder::init_progressive()
|
2938 |
-
{
|
2939 |
-
int i;
|
2940 |
-
|
2941 |
-
if (m_comps_in_frame == 4)
|
2942 |
-
stop_decoding(JPGD_UNSUPPORTED_COLORSPACE);
|
2943 |
-
|
2944 |
-
// Allocate the coefficient buffers.
|
2945 |
-
for (i = 0; i < m_comps_in_frame; i++)
|
2946 |
-
{
|
2947 |
-
m_dc_coeffs[i] = coeff_buf_open(m_max_mcus_per_row * m_comp_h_samp[i], m_max_mcus_per_col * m_comp_v_samp[i], 1, 1);
|
2948 |
-
m_ac_coeffs[i] = coeff_buf_open(m_max_mcus_per_row * m_comp_h_samp[i], m_max_mcus_per_col * m_comp_v_samp[i], 8, 8);
|
2949 |
-
}
|
2950 |
-
|
2951 |
-
for ( ; ; )
|
2952 |
-
{
|
2953 |
-
int dc_only_scan, refinement_scan;
|
2954 |
-
pDecode_block_func decode_block_func;
|
2955 |
-
|
2956 |
-
if (!init_scan())
|
2957 |
-
break;
|
2958 |
-
|
2959 |
-
dc_only_scan = (m_spectral_start == 0);
|
2960 |
-
refinement_scan = (m_successive_high != 0);
|
2961 |
-
|
2962 |
-
if ((m_spectral_start > m_spectral_end) || (m_spectral_end > 63))
|
2963 |
-
stop_decoding(JPGD_BAD_SOS_SPECTRAL);
|
2964 |
-
|
2965 |
-
if (dc_only_scan)
|
2966 |
-
{
|
2967 |
-
if (m_spectral_end)
|
2968 |
-
stop_decoding(JPGD_BAD_SOS_SPECTRAL);
|
2969 |
-
}
|
2970 |
-
else if (m_comps_in_scan != 1) /* AC scans can only contain one component */
|
2971 |
-
stop_decoding(JPGD_BAD_SOS_SPECTRAL);
|
2972 |
-
|
2973 |
-
if ((refinement_scan) && (m_successive_low != m_successive_high - 1))
|
2974 |
-
stop_decoding(JPGD_BAD_SOS_SUCCESSIVE);
|
2975 |
-
|
2976 |
-
if (dc_only_scan)
|
2977 |
-
{
|
2978 |
-
if (refinement_scan)
|
2979 |
-
decode_block_func = decode_block_dc_refine;
|
2980 |
-
else
|
2981 |
-
decode_block_func = decode_block_dc_first;
|
2982 |
-
}
|
2983 |
-
else
|
2984 |
-
{
|
2985 |
-
if (refinement_scan)
|
2986 |
-
decode_block_func = decode_block_ac_refine;
|
2987 |
-
else
|
2988 |
-
decode_block_func = decode_block_ac_first;
|
2989 |
-
}
|
2990 |
-
|
2991 |
-
decode_scan(decode_block_func);
|
2992 |
-
|
2993 |
-
m_bits_left = 16;
|
2994 |
-
get_bits(16);
|
2995 |
-
get_bits(16);
|
2996 |
-
}
|
2997 |
-
|
2998 |
-
m_comps_in_scan = m_comps_in_frame;
|
2999 |
-
|
3000 |
-
for (i = 0; i < m_comps_in_frame; i++)
|
3001 |
-
m_comp_list[i] = i;
|
3002 |
-
|
3003 |
-
calc_mcu_block_order();
|
3004 |
-
}
|
3005 |
-
|
3006 |
-
void jpeg_decoder::init_sequential()
|
3007 |
-
{
|
3008 |
-
if (!init_scan())
|
3009 |
-
stop_decoding(JPGD_UNEXPECTED_MARKER);
|
3010 |
-
}
|
3011 |
-
|
3012 |
-
void jpeg_decoder::decode_start()
|
3013 |
-
{
|
3014 |
-
init_frame();
|
3015 |
-
|
3016 |
-
if (m_progressive_flag)
|
3017 |
-
init_progressive();
|
3018 |
-
else
|
3019 |
-
init_sequential();
|
3020 |
-
}
|
3021 |
-
|
3022 |
-
void jpeg_decoder::decode_init(jpeg_decoder_stream *pStream)
|
3023 |
-
{
|
3024 |
-
init(pStream);
|
3025 |
-
locate_sof_marker();
|
3026 |
-
}
|
3027 |
-
|
3028 |
-
jpeg_decoder::jpeg_decoder(jpeg_decoder_stream *pStream)
|
3029 |
-
{
|
3030 |
-
if (setjmp(m_jmp_state))
|
3031 |
-
return;
|
3032 |
-
decode_init(pStream);
|
3033 |
-
}
|
3034 |
-
|
3035 |
-
int jpeg_decoder::begin_decoding()
|
3036 |
-
{
|
3037 |
-
if (m_ready_flag)
|
3038 |
-
return JPGD_SUCCESS;
|
3039 |
-
|
3040 |
-
if (m_error_code)
|
3041 |
-
return JPGD_FAILED;
|
3042 |
-
|
3043 |
-
if (setjmp(m_jmp_state))
|
3044 |
-
return JPGD_FAILED;
|
3045 |
-
|
3046 |
-
decode_start();
|
3047 |
-
|
3048 |
-
m_ready_flag = true;
|
3049 |
-
|
3050 |
-
return JPGD_SUCCESS;
|
3051 |
-
}
|
3052 |
-
|
3053 |
-
jpeg_decoder::~jpeg_decoder()
|
3054 |
-
{
|
3055 |
-
free_all_blocks();
|
3056 |
-
}
|
3057 |
-
|
3058 |
-
jpeg_decoder_file_stream::jpeg_decoder_file_stream()
|
3059 |
-
{
|
3060 |
-
m_pFile = NULL;
|
3061 |
-
m_eof_flag = false;
|
3062 |
-
m_error_flag = false;
|
3063 |
-
}
|
3064 |
-
|
3065 |
-
void jpeg_decoder_file_stream::close()
|
3066 |
-
{
|
3067 |
-
if (m_pFile)
|
3068 |
-
{
|
3069 |
-
fclose(m_pFile);
|
3070 |
-
m_pFile = NULL;
|
3071 |
-
}
|
3072 |
-
|
3073 |
-
m_eof_flag = false;
|
3074 |
-
m_error_flag = false;
|
3075 |
-
}
|
3076 |
-
|
3077 |
-
jpeg_decoder_file_stream::~jpeg_decoder_file_stream()
|
3078 |
-
{
|
3079 |
-
close();
|
3080 |
-
}
|
3081 |
-
|
3082 |
-
bool jpeg_decoder_file_stream::open(const char *Pfilename)
|
3083 |
-
{
|
3084 |
-
close();
|
3085 |
-
|
3086 |
-
m_eof_flag = false;
|
3087 |
-
m_error_flag = false;
|
3088 |
-
|
3089 |
-
#if defined(_MSC_VER)
|
3090 |
-
m_pFile = NULL;
|
3091 |
-
fopen_s(&m_pFile, Pfilename, "rb");
|
3092 |
-
#else
|
3093 |
-
m_pFile = fopen(Pfilename, "rb");
|
3094 |
-
#endif
|
3095 |
-
return m_pFile != NULL;
|
3096 |
-
}
|
3097 |
-
|
3098 |
-
int jpeg_decoder_file_stream::read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag)
|
3099 |
-
{
|
3100 |
-
if (!m_pFile)
|
3101 |
-
return -1;
|
3102 |
-
|
3103 |
-
if (m_eof_flag)
|
3104 |
-
{
|
3105 |
-
*pEOF_flag = true;
|
3106 |
-
return 0;
|
3107 |
-
}
|
3108 |
-
|
3109 |
-
if (m_error_flag)
|
3110 |
-
return -1;
|
3111 |
-
|
3112 |
-
int bytes_read = static_cast<int>(fread(pBuf, 1, max_bytes_to_read, m_pFile));
|
3113 |
-
if (bytes_read < max_bytes_to_read)
|
3114 |
-
{
|
3115 |
-
if (ferror(m_pFile))
|
3116 |
-
{
|
3117 |
-
m_error_flag = true;
|
3118 |
-
return -1;
|
3119 |
-
}
|
3120 |
-
|
3121 |
-
m_eof_flag = true;
|
3122 |
-
*pEOF_flag = true;
|
3123 |
-
}
|
3124 |
-
|
3125 |
-
return bytes_read;
|
3126 |
-
}
|
3127 |
-
|
3128 |
-
bool jpeg_decoder_mem_stream::open(const uint8 *pSrc_data, uint size)
|
3129 |
-
{
|
3130 |
-
close();
|
3131 |
-
m_pSrc_data = pSrc_data;
|
3132 |
-
m_ofs = 0;
|
3133 |
-
m_size = size;
|
3134 |
-
return true;
|
3135 |
-
}
|
3136 |
-
|
3137 |
-
int jpeg_decoder_mem_stream::read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag)
|
3138 |
-
{
|
3139 |
-
*pEOF_flag = false;
|
3140 |
-
|
3141 |
-
if (!m_pSrc_data)
|
3142 |
-
return -1;
|
3143 |
-
|
3144 |
-
uint bytes_remaining = m_size - m_ofs;
|
3145 |
-
if ((uint)max_bytes_to_read > bytes_remaining)
|
3146 |
-
{
|
3147 |
-
max_bytes_to_read = bytes_remaining;
|
3148 |
-
*pEOF_flag = true;
|
3149 |
-
}
|
3150 |
-
|
3151 |
-
memcpy(pBuf, m_pSrc_data + m_ofs, max_bytes_to_read);
|
3152 |
-
m_ofs += max_bytes_to_read;
|
3153 |
-
|
3154 |
-
return max_bytes_to_read;
|
3155 |
-
}
|
3156 |
-
|
3157 |
-
unsigned char *decompress_jpeg_image_from_stream(jpeg_decoder_stream *pStream, int *width, int *height, int *actual_comps, int req_comps)
|
3158 |
-
{
|
3159 |
-
if (!actual_comps)
|
3160 |
-
return NULL;
|
3161 |
-
*actual_comps = 0;
|
3162 |
-
|
3163 |
-
if ((!pStream) || (!width) || (!height) || (!req_comps))
|
3164 |
-
return NULL;
|
3165 |
-
|
3166 |
-
if ((req_comps != 1) && (req_comps != 3) && (req_comps != 4))
|
3167 |
-
return NULL;
|
3168 |
-
|
3169 |
-
jpeg_decoder decoder(pStream);
|
3170 |
-
if (decoder.get_error_code() != JPGD_SUCCESS)
|
3171 |
-
return NULL;
|
3172 |
-
|
3173 |
-
const int image_width = decoder.get_width(), image_height = decoder.get_height();
|
3174 |
-
*width = image_width;
|
3175 |
-
*height = image_height;
|
3176 |
-
*actual_comps = decoder.get_num_components();
|
3177 |
-
|
3178 |
-
if (decoder.begin_decoding() != JPGD_SUCCESS)
|
3179 |
-
return NULL;
|
3180 |
-
|
3181 |
-
const int dst_bpl = image_width * req_comps;
|
3182 |
-
|
3183 |
-
uint8 *pImage_data = (uint8*)jpgd_malloc(dst_bpl * image_height);
|
3184 |
-
if (!pImage_data)
|
3185 |
-
return NULL;
|
3186 |
-
|
3187 |
-
for (int y = 0; y < image_height; y++)
|
3188 |
-
{
|
3189 |
-
const uint8* pScan_line = 0;
|
3190 |
-
uint scan_line_len;
|
3191 |
-
if (decoder.decode((const void**)&pScan_line, &scan_line_len) != JPGD_SUCCESS)
|
3192 |
-
{
|
3193 |
-
jpgd_free(pImage_data);
|
3194 |
-
return NULL;
|
3195 |
-
}
|
3196 |
-
|
3197 |
-
uint8 *pDst = pImage_data + y * dst_bpl;
|
3198 |
-
|
3199 |
-
if (((req_comps == 4) && (decoder.get_num_components() == 3)) ||
|
3200 |
-
((req_comps == 1) && (decoder.get_num_components() == 1)))
|
3201 |
-
{
|
3202 |
-
memcpy(pDst, pScan_line, dst_bpl);
|
3203 |
-
}
|
3204 |
-
else if (decoder.get_num_components() == 1)
|
3205 |
-
{
|
3206 |
-
if (req_comps == 3)
|
3207 |
-
{
|
3208 |
-
for (int x = 0; x < image_width; x++)
|
3209 |
-
{
|
3210 |
-
uint8 luma = pScan_line[x];
|
3211 |
-
pDst[0] = luma;
|
3212 |
-
pDst[1] = luma;
|
3213 |
-
pDst[2] = luma;
|
3214 |
-
pDst += 3;
|
3215 |
-
}
|
3216 |
-
}
|
3217 |
-
else
|
3218 |
-
{
|
3219 |
-
for (int x = 0; x < image_width; x++)
|
3220 |
-
{
|
3221 |
-
uint8 luma = pScan_line[x];
|
3222 |
-
pDst[0] = luma;
|
3223 |
-
pDst[1] = luma;
|
3224 |
-
pDst[2] = luma;
|
3225 |
-
pDst[3] = 255;
|
3226 |
-
pDst += 4;
|
3227 |
-
}
|
3228 |
-
}
|
3229 |
-
}
|
3230 |
-
else if (decoder.get_num_components() == 3)
|
3231 |
-
{
|
3232 |
-
if (req_comps == 1)
|
3233 |
-
{
|
3234 |
-
const int YR = 19595, YG = 38470, YB = 7471;
|
3235 |
-
for (int x = 0; x < image_width; x++)
|
3236 |
-
{
|
3237 |
-
int r = pScan_line[x*4+0];
|
3238 |
-
int g = pScan_line[x*4+1];
|
3239 |
-
int b = pScan_line[x*4+2];
|
3240 |
-
*pDst++ = static_cast<uint8>((r * YR + g * YG + b * YB + 32768) >> 16);
|
3241 |
-
}
|
3242 |
-
}
|
3243 |
-
else
|
3244 |
-
{
|
3245 |
-
for (int x = 0; x < image_width; x++)
|
3246 |
-
{
|
3247 |
-
pDst[0] = pScan_line[x*4+0];
|
3248 |
-
pDst[1] = pScan_line[x*4+1];
|
3249 |
-
pDst[2] = pScan_line[x*4+2];
|
3250 |
-
pDst += 3;
|
3251 |
-
}
|
3252 |
-
}
|
3253 |
-
}
|
3254 |
-
}
|
3255 |
-
|
3256 |
-
return pImage_data;
|
3257 |
-
}
|
3258 |
-
|
3259 |
-
// BEGIN EPIC MOD
|
3260 |
-
unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps, int format)
|
3261 |
-
{
|
3262 |
-
jpg_format = (ERGBFormatJPG)format;
|
3263 |
-
// EMD EPIC MOD
|
3264 |
-
jpgd::jpeg_decoder_mem_stream mem_stream(pSrc_data, src_data_size);
|
3265 |
-
return decompress_jpeg_image_from_stream(&mem_stream, width, height, actual_comps, req_comps);
|
3266 |
-
}
|
3267 |
-
|
3268 |
-
unsigned char *decompress_jpeg_image_from_file(const char *pSrc_filename, int *width, int *height, int *actual_comps, int req_comps)
|
3269 |
-
{
|
3270 |
-
jpgd::jpeg_decoder_file_stream file_stream;
|
3271 |
-
if (!file_stream.open(pSrc_filename))
|
3272 |
-
return NULL;
|
3273 |
-
return decompress_jpeg_image_from_stream(&file_stream, width, height, actual_comps, req_comps);
|
3274 |
-
}
|
3275 |
-
|
3276 |
-
} // namespace jpgd
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/img2img.md
DELETED
@@ -1,100 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# Text-guided image-to-image generation
|
14 |
-
|
15 |
-
[[open-in-colab]]
|
16 |
-
|
17 |
-
The [`StableDiffusionImg2ImgPipeline`] lets you pass a text prompt and an initial image to condition the generation of new images.
|
18 |
-
|
19 |
-
Before you begin, make sure you have all the necessary libraries installed:
|
20 |
-
|
21 |
-
```py
|
22 |
-
# uncomment to install the necessary libraries in Colab
|
23 |
-
#!pip install diffusers transformers ftfy accelerate
|
24 |
-
```
|
25 |
-
|
26 |
-
Get started by creating a [`StableDiffusionImg2ImgPipeline`] with a pretrained Stable Diffusion model like [`nitrosocke/Ghibli-Diffusion`](https://huggingface.co/nitrosocke/Ghibli-Diffusion).
|
27 |
-
|
28 |
-
```python
|
29 |
-
import torch
|
30 |
-
import requests
|
31 |
-
from PIL import Image
|
32 |
-
from io import BytesIO
|
33 |
-
from diffusers import StableDiffusionImg2ImgPipeline
|
34 |
-
|
35 |
-
device = "cuda"
|
36 |
-
pipe = StableDiffusionImg2ImgPipeline.from_pretrained("nitrosocke/Ghibli-Diffusion", torch_dtype=torch.float16).to(
|
37 |
-
device
|
38 |
-
)
|
39 |
-
```
|
40 |
-
|
41 |
-
Download and preprocess an initial image so you can pass it to the pipeline:
|
42 |
-
|
43 |
-
```python
|
44 |
-
url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
|
45 |
-
|
46 |
-
response = requests.get(url)
|
47 |
-
init_image = Image.open(BytesIO(response.content)).convert("RGB")
|
48 |
-
init_image.thumbnail((768, 768))
|
49 |
-
init_image
|
50 |
-
```
|
51 |
-
|
52 |
-
<div class="flex justify-center">
|
53 |
-
<img src="https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/image_2_image_using_diffusers_cell_8_output_0.jpeg"/>
|
54 |
-
</div>
|
55 |
-
|
56 |
-
<Tip>
|
57 |
-
|
58 |
-
💡 `strength` is a value between 0.0 and 1.0 that controls the amount of noise added to the input image. Values that approach 1.0 allow for lots of variations but will also produce images that are not semantically consistent with the input.
|
59 |
-
|
60 |
-
</Tip>
|
61 |
-
|
62 |
-
Define the prompt (for this checkpoint finetuned on Ghibli-style art, you need to prefix the prompt with the `ghibli style` tokens) and run the pipeline:
|
63 |
-
|
64 |
-
```python
|
65 |
-
prompt = "ghibli style, a fantasy landscape with castles"
|
66 |
-
generator = torch.Generator(device=device).manual_seed(1024)
|
67 |
-
image = pipe(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5, generator=generator).images[0]
|
68 |
-
image
|
69 |
-
```
|
70 |
-
|
71 |
-
<div class="flex justify-center">
|
72 |
-
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ghibli-castles.png"/>
|
73 |
-
</div>
|
74 |
-
|
75 |
-
You can also try experimenting with a different scheduler to see how that affects the output:
|
76 |
-
|
77 |
-
```python
|
78 |
-
from diffusers import LMSDiscreteScheduler
|
79 |
-
|
80 |
-
lms = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
|
81 |
-
pipe.scheduler = lms
|
82 |
-
generator = torch.Generator(device=device).manual_seed(1024)
|
83 |
-
image = pipe(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5, generator=generator).images[0]
|
84 |
-
image
|
85 |
-
```
|
86 |
-
|
87 |
-
<div class="flex justify-center">
|
88 |
-
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lms-ghibli.png"/>
|
89 |
-
</div>
|
90 |
-
|
91 |
-
Check out the Spaces below, and try generating images with different values for `strength`. You'll notice that using lower values for `strength` produces images that are more similar to the original image.
|
92 |
-
|
93 |
-
Feel free to also switch the scheduler to the [`LMSDiscreteScheduler`] and see how that affects the output.
|
94 |
-
|
95 |
-
<iframe
|
96 |
-
src="https://stevhliu-ghibli-img2img.hf.space"
|
97 |
-
frameborder="0"
|
98 |
-
width="850"
|
99 |
-
height="500"
|
100 |
-
></iframe>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
backbone=dict(
|
4 |
-
norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False))
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/danet/danet_r50-d8_512x1024_40k_cityscapes.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/danet_r50-d8.py', '../_base_/datasets/cityscapes.py',
|
3 |
-
'../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
|
4 |
-
]
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/docs/Low-VRAM-guide.md
DELETED
@@ -1,53 +0,0 @@
|
|
1 |
-
If you GPU is not large enough to fit a 16-bit model, try these in the following order:
|
2 |
-
|
3 |
-
### Load the model in 8-bit mode
|
4 |
-
|
5 |
-
```
|
6 |
-
python server.py --load-in-8bit
|
7 |
-
```
|
8 |
-
|
9 |
-
### Load the model in 4-bit mode
|
10 |
-
|
11 |
-
```
|
12 |
-
python server.py --load-in-4bit
|
13 |
-
```
|
14 |
-
|
15 |
-
### Split the model across your GPU and CPU
|
16 |
-
|
17 |
-
```
|
18 |
-
python server.py --auto-devices
|
19 |
-
```
|
20 |
-
|
21 |
-
If you can load the model with this command but it runs out of memory when you try to generate text, try increasingly limiting the amount of memory allocated to the GPU until the error stops happening:
|
22 |
-
|
23 |
-
```
|
24 |
-
python server.py --auto-devices --gpu-memory 10
|
25 |
-
python server.py --auto-devices --gpu-memory 9
|
26 |
-
python server.py --auto-devices --gpu-memory 8
|
27 |
-
...
|
28 |
-
```
|
29 |
-
|
30 |
-
where the number is in GiB.
|
31 |
-
|
32 |
-
For finer control, you can also specify the unit in MiB explicitly:
|
33 |
-
|
34 |
-
```
|
35 |
-
python server.py --auto-devices --gpu-memory 8722MiB
|
36 |
-
python server.py --auto-devices --gpu-memory 4725MiB
|
37 |
-
python server.py --auto-devices --gpu-memory 3500MiB
|
38 |
-
...
|
39 |
-
```
|
40 |
-
|
41 |
-
### Send layers to a disk cache
|
42 |
-
|
43 |
-
As a desperate last measure, you can split the model across your GPU, CPU, and disk:
|
44 |
-
|
45 |
-
```
|
46 |
-
python server.py --auto-devices --disk
|
47 |
-
```
|
48 |
-
|
49 |
-
With this, I am able to load a 30b model into my RTX 3090, but it takes 10 seconds to generate 1 word.
|
50 |
-
|
51 |
-
### DeepSpeed (experimental)
|
52 |
-
|
53 |
-
An experimental alternative to all of the above is to use DeepSpeed: [guide](DeepSpeed.md).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/docs/llama.cpp.md
DELETED
@@ -1,43 +0,0 @@
|
|
1 |
-
# llama.cpp
|
2 |
-
|
3 |
-
llama.cpp is the best backend in two important scenarios:
|
4 |
-
|
5 |
-
1) You don't have a GPU.
|
6 |
-
2) You want to run a model that doesn't fit into your GPU.
|
7 |
-
|
8 |
-
## Setting up the models
|
9 |
-
|
10 |
-
#### Pre-converted
|
11 |
-
|
12 |
-
Download the GGUF models directly into your `text-generation-webui/models` folder. It will be a single file.
|
13 |
-
|
14 |
-
* Make sure its name ends in `.gguf`.
|
15 |
-
* `q4_K_M` quantization is recommended.
|
16 |
-
|
17 |
-
#### Convert Llama yourself
|
18 |
-
|
19 |
-
Follow the instructions in the llama.cpp README to generate a GGUF: https://github.com/ggerganov/llama.cpp#prepare-data--run
|
20 |
-
|
21 |
-
## GPU acceleration
|
22 |
-
|
23 |
-
Enabled with the `--n-gpu-layers` parameter.
|
24 |
-
|
25 |
-
* If you have enough VRAM, use a high number like `--n-gpu-layers 1000` to offload all layers to the GPU.
|
26 |
-
* Otherwise, start with a low number like `--n-gpu-layers 10` and then gradually increase it until you run out of memory.
|
27 |
-
|
28 |
-
This feature works out of the box for NVIDIA GPUs on Linux (amd64) or Windows. For other GPUs, you need to uninstall `llama-cpp-python` with
|
29 |
-
|
30 |
-
```
|
31 |
-
pip uninstall -y llama-cpp-python
|
32 |
-
```
|
33 |
-
|
34 |
-
and then recompile it using the commands here: https://pypi.org/project/llama-cpp-python/
|
35 |
-
|
36 |
-
#### macOS
|
37 |
-
|
38 |
-
For macOS, these are the commands:
|
39 |
-
|
40 |
-
```
|
41 |
-
pip uninstall -y llama-cpp-python
|
42 |
-
CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 pip install llama-cpp-python --no-cache-dir
|
43 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/logging_colors.py
DELETED
@@ -1,117 +0,0 @@
|
|
1 |
-
# Copied from https://stackoverflow.com/a/1336640
|
2 |
-
|
3 |
-
import logging
|
4 |
-
import platform
|
5 |
-
|
6 |
-
logging.basicConfig(
|
7 |
-
format='%(asctime)s %(levelname)s:%(message)s',
|
8 |
-
datefmt='%Y-%m-%d %H:%M:%S',
|
9 |
-
)
|
10 |
-
|
11 |
-
|
12 |
-
def add_coloring_to_emit_windows(fn):
|
13 |
-
# add methods we need to the class
|
14 |
-
def _out_handle(self):
|
15 |
-
import ctypes
|
16 |
-
return ctypes.windll.kernel32.GetStdHandle(self.STD_OUTPUT_HANDLE)
|
17 |
-
out_handle = property(_out_handle)
|
18 |
-
|
19 |
-
def _set_color(self, code):
|
20 |
-
import ctypes
|
21 |
-
|
22 |
-
# Constants from the Windows API
|
23 |
-
self.STD_OUTPUT_HANDLE = -11
|
24 |
-
hdl = ctypes.windll.kernel32.GetStdHandle(self.STD_OUTPUT_HANDLE)
|
25 |
-
ctypes.windll.kernel32.SetConsoleTextAttribute(hdl, code)
|
26 |
-
|
27 |
-
setattr(logging.StreamHandler, '_set_color', _set_color)
|
28 |
-
|
29 |
-
def new(*args):
|
30 |
-
FOREGROUND_BLUE = 0x0001 # text color contains blue.
|
31 |
-
FOREGROUND_GREEN = 0x0002 # text color contains green.
|
32 |
-
FOREGROUND_RED = 0x0004 # text color contains red.
|
33 |
-
FOREGROUND_INTENSITY = 0x0008 # text color is intensified.
|
34 |
-
FOREGROUND_WHITE = FOREGROUND_BLUE | FOREGROUND_GREEN | FOREGROUND_RED
|
35 |
-
# winbase.h
|
36 |
-
# STD_INPUT_HANDLE = -10
|
37 |
-
# STD_OUTPUT_HANDLE = -11
|
38 |
-
# STD_ERROR_HANDLE = -12
|
39 |
-
|
40 |
-
# wincon.h
|
41 |
-
# FOREGROUND_BLACK = 0x0000
|
42 |
-
FOREGROUND_BLUE = 0x0001
|
43 |
-
FOREGROUND_GREEN = 0x0002
|
44 |
-
# FOREGROUND_CYAN = 0x0003
|
45 |
-
FOREGROUND_RED = 0x0004
|
46 |
-
FOREGROUND_MAGENTA = 0x0005
|
47 |
-
FOREGROUND_YELLOW = 0x0006
|
48 |
-
# FOREGROUND_GREY = 0x0007
|
49 |
-
FOREGROUND_INTENSITY = 0x0008 # foreground color is intensified.
|
50 |
-
|
51 |
-
# BACKGROUND_BLACK = 0x0000
|
52 |
-
# BACKGROUND_BLUE = 0x0010
|
53 |
-
# BACKGROUND_GREEN = 0x0020
|
54 |
-
# BACKGROUND_CYAN = 0x0030
|
55 |
-
# BACKGROUND_RED = 0x0040
|
56 |
-
# BACKGROUND_MAGENTA = 0x0050
|
57 |
-
BACKGROUND_YELLOW = 0x0060
|
58 |
-
# BACKGROUND_GREY = 0x0070
|
59 |
-
BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
|
60 |
-
|
61 |
-
levelno = args[1].levelno
|
62 |
-
if (levelno >= 50):
|
63 |
-
color = BACKGROUND_YELLOW | FOREGROUND_RED | FOREGROUND_INTENSITY | BACKGROUND_INTENSITY
|
64 |
-
elif (levelno >= 40):
|
65 |
-
color = FOREGROUND_RED | FOREGROUND_INTENSITY
|
66 |
-
elif (levelno >= 30):
|
67 |
-
color = FOREGROUND_YELLOW | FOREGROUND_INTENSITY
|
68 |
-
elif (levelno >= 20):
|
69 |
-
color = FOREGROUND_GREEN
|
70 |
-
elif (levelno >= 10):
|
71 |
-
color = FOREGROUND_MAGENTA
|
72 |
-
else:
|
73 |
-
color = FOREGROUND_WHITE
|
74 |
-
args[0]._set_color(color)
|
75 |
-
|
76 |
-
ret = fn(*args)
|
77 |
-
args[0]._set_color(FOREGROUND_WHITE)
|
78 |
-
# print "after"
|
79 |
-
return ret
|
80 |
-
return new
|
81 |
-
|
82 |
-
|
83 |
-
def add_coloring_to_emit_ansi(fn):
|
84 |
-
# add methods we need to the class
|
85 |
-
def new(*args):
|
86 |
-
levelno = args[1].levelno
|
87 |
-
if (levelno >= 50):
|
88 |
-
color = '\x1b[31m' # red
|
89 |
-
elif (levelno >= 40):
|
90 |
-
color = '\x1b[31m' # red
|
91 |
-
elif (levelno >= 30):
|
92 |
-
color = '\x1b[33m' # yellow
|
93 |
-
elif (levelno >= 20):
|
94 |
-
color = '\x1b[32m' # green
|
95 |
-
elif (levelno >= 10):
|
96 |
-
color = '\x1b[35m' # pink
|
97 |
-
else:
|
98 |
-
color = '\x1b[0m' # normal
|
99 |
-
args[1].msg = color + args[1].msg + '\x1b[0m' # normal
|
100 |
-
# print "after"
|
101 |
-
return fn(*args)
|
102 |
-
return new
|
103 |
-
|
104 |
-
|
105 |
-
if platform.system() == 'Windows':
|
106 |
-
# Windows does not support ANSI escapes and we are using API calls to set the console color
|
107 |
-
logging.StreamHandler.emit = add_coloring_to_emit_windows(logging.StreamHandler.emit)
|
108 |
-
else:
|
109 |
-
# all non-Windows platforms are supporting ANSI escapes so we use them
|
110 |
-
logging.StreamHandler.emit = add_coloring_to_emit_ansi(logging.StreamHandler.emit)
|
111 |
-
# log = logging.getLogger()
|
112 |
-
# log.addFilter(log_filter())
|
113 |
-
# //hdlr = logging.StreamHandler()
|
114 |
-
# //hdlr.setFormatter(formatter())
|
115 |
-
|
116 |
-
logger = logging.getLogger('text-generation-webui')
|
117 |
-
logger.setLevel(logging.DEBUG)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Apex-X/Tm/roop/processors/frame/face_swapper.py
DELETED
@@ -1,88 +0,0 @@
|
|
1 |
-
from typing import Any, List, Callable
|
2 |
-
import cv2
|
3 |
-
import insightface
|
4 |
-
import threading
|
5 |
-
|
6 |
-
import roop.globals
|
7 |
-
import roop.processors.frame.core
|
8 |
-
from roop.core import update_status
|
9 |
-
from roop.face_analyser import get_one_face, get_many_faces
|
10 |
-
from roop.typing import Face, Frame
|
11 |
-
from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video
|
12 |
-
|
13 |
-
FACE_SWAPPER = None
|
14 |
-
THREAD_LOCK = threading.Lock()
|
15 |
-
NAME = 'ROOP.FACE-SWAPPER'
|
16 |
-
|
17 |
-
|
18 |
-
def get_face_swapper() -> Any:
|
19 |
-
global FACE_SWAPPER
|
20 |
-
|
21 |
-
with THREAD_LOCK:
|
22 |
-
if FACE_SWAPPER is None:
|
23 |
-
model_path = resolve_relative_path('../models/inswapper_128.onnx')
|
24 |
-
FACE_SWAPPER = insightface.model_zoo.get_model(model_path, providers=roop.globals.execution_providers)
|
25 |
-
return FACE_SWAPPER
|
26 |
-
|
27 |
-
|
28 |
-
def pre_check() -> bool:
|
29 |
-
download_directory_path = resolve_relative_path('../models')
|
30 |
-
conditional_download(download_directory_path, ['https://huggingface.co/henryruhs/roop/resolve/main/inswapper_128.onnx'])
|
31 |
-
return True
|
32 |
-
|
33 |
-
|
34 |
-
def pre_start() -> bool:
|
35 |
-
if not is_image(roop.globals.source_path):
|
36 |
-
update_status('Select an image for source path.', NAME)
|
37 |
-
return False
|
38 |
-
elif not get_one_face(cv2.imread(roop.globals.source_path)):
|
39 |
-
update_status('No face in source path detected.', NAME)
|
40 |
-
return False
|
41 |
-
if not is_image(roop.globals.target_path) and not is_video(roop.globals.target_path):
|
42 |
-
update_status('Select an image or video for target path.', NAME)
|
43 |
-
return False
|
44 |
-
return True
|
45 |
-
|
46 |
-
|
47 |
-
def post_process() -> None:
|
48 |
-
global FACE_SWAPPER
|
49 |
-
|
50 |
-
FACE_SWAPPER = None
|
51 |
-
|
52 |
-
|
53 |
-
def swap_face(source_face: Face, target_face: Face, temp_frame: Frame) -> Frame:
|
54 |
-
return get_face_swapper().get(temp_frame, target_face, source_face, paste_back=True)
|
55 |
-
|
56 |
-
|
57 |
-
def process_frame(source_face: Face, temp_frame: Frame) -> Frame:
|
58 |
-
if roop.globals.many_faces:
|
59 |
-
many_faces = get_many_faces(temp_frame)
|
60 |
-
if many_faces:
|
61 |
-
for target_face in many_faces:
|
62 |
-
temp_frame = swap_face(source_face, target_face, temp_frame)
|
63 |
-
else:
|
64 |
-
target_face = get_one_face(temp_frame)
|
65 |
-
if target_face:
|
66 |
-
temp_frame = swap_face(source_face, target_face, temp_frame)
|
67 |
-
return temp_frame
|
68 |
-
|
69 |
-
|
70 |
-
def process_frames(source_path: str, temp_frame_paths: List[str], update: Callable[[], None]) -> None:
|
71 |
-
source_face = get_one_face(cv2.imread(source_path))
|
72 |
-
for temp_frame_path in temp_frame_paths:
|
73 |
-
temp_frame = cv2.imread(temp_frame_path)
|
74 |
-
result = process_frame(source_face, temp_frame)
|
75 |
-
cv2.imwrite(temp_frame_path, result)
|
76 |
-
if update:
|
77 |
-
update()
|
78 |
-
|
79 |
-
|
80 |
-
def process_image(source_path: str, target_path: str, output_path: str) -> None:
|
81 |
-
source_face = get_one_face(cv2.imread(source_path))
|
82 |
-
target_frame = cv2.imread(target_path)
|
83 |
-
result = process_frame(source_face, target_frame)
|
84 |
-
cv2.imwrite(output_path, result)
|
85 |
-
|
86 |
-
|
87 |
-
def process_video(source_path: str, temp_frame_paths: List[str]) -> None:
|
88 |
-
roop.processors.frame.core.process_video(source_path, temp_frame_paths, process_frames)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/segment.py
DELETED
@@ -1,739 +0,0 @@
|
|
1 |
-
from enum import IntEnum
|
2 |
-
from functools import lru_cache
|
3 |
-
from itertools import filterfalse
|
4 |
-
from logging import getLogger
|
5 |
-
from operator import attrgetter
|
6 |
-
from typing import (
|
7 |
-
TYPE_CHECKING,
|
8 |
-
Dict,
|
9 |
-
Iterable,
|
10 |
-
List,
|
11 |
-
NamedTuple,
|
12 |
-
Optional,
|
13 |
-
Sequence,
|
14 |
-
Tuple,
|
15 |
-
Type,
|
16 |
-
Union,
|
17 |
-
)
|
18 |
-
|
19 |
-
from .cells import (
|
20 |
-
_is_single_cell_widths,
|
21 |
-
cached_cell_len,
|
22 |
-
cell_len,
|
23 |
-
get_character_cell_size,
|
24 |
-
set_cell_size,
|
25 |
-
)
|
26 |
-
from .repr import Result, rich_repr
|
27 |
-
from .style import Style
|
28 |
-
|
29 |
-
if TYPE_CHECKING:
|
30 |
-
from .console import Console, ConsoleOptions, RenderResult
|
31 |
-
|
32 |
-
log = getLogger("rich")
|
33 |
-
|
34 |
-
|
35 |
-
class ControlType(IntEnum):
|
36 |
-
"""Non-printable control codes which typically translate to ANSI codes."""
|
37 |
-
|
38 |
-
BELL = 1
|
39 |
-
CARRIAGE_RETURN = 2
|
40 |
-
HOME = 3
|
41 |
-
CLEAR = 4
|
42 |
-
SHOW_CURSOR = 5
|
43 |
-
HIDE_CURSOR = 6
|
44 |
-
ENABLE_ALT_SCREEN = 7
|
45 |
-
DISABLE_ALT_SCREEN = 8
|
46 |
-
CURSOR_UP = 9
|
47 |
-
CURSOR_DOWN = 10
|
48 |
-
CURSOR_FORWARD = 11
|
49 |
-
CURSOR_BACKWARD = 12
|
50 |
-
CURSOR_MOVE_TO_COLUMN = 13
|
51 |
-
CURSOR_MOVE_TO = 14
|
52 |
-
ERASE_IN_LINE = 15
|
53 |
-
SET_WINDOW_TITLE = 16
|
54 |
-
|
55 |
-
|
56 |
-
ControlCode = Union[
|
57 |
-
Tuple[ControlType],
|
58 |
-
Tuple[ControlType, Union[int, str]],
|
59 |
-
Tuple[ControlType, int, int],
|
60 |
-
]
|
61 |
-
|
62 |
-
|
63 |
-
@rich_repr()
|
64 |
-
class Segment(NamedTuple):
|
65 |
-
"""A piece of text with associated style. Segments are produced by the Console render process and
|
66 |
-
are ultimately converted in to strings to be written to the terminal.
|
67 |
-
|
68 |
-
Args:
|
69 |
-
text (str): A piece of text.
|
70 |
-
style (:class:`~rich.style.Style`, optional): An optional style to apply to the text.
|
71 |
-
control (Tuple[ControlCode], optional): Optional sequence of control codes.
|
72 |
-
|
73 |
-
Attributes:
|
74 |
-
cell_length (int): The cell length of this Segment.
|
75 |
-
"""
|
76 |
-
|
77 |
-
text: str
|
78 |
-
style: Optional[Style] = None
|
79 |
-
control: Optional[Sequence[ControlCode]] = None
|
80 |
-
|
81 |
-
@property
|
82 |
-
def cell_length(self) -> int:
|
83 |
-
"""The number of terminal cells required to display self.text.
|
84 |
-
|
85 |
-
Returns:
|
86 |
-
int: A number of cells.
|
87 |
-
"""
|
88 |
-
text, _style, control = self
|
89 |
-
return 0 if control else cell_len(text)
|
90 |
-
|
91 |
-
def __rich_repr__(self) -> Result:
|
92 |
-
yield self.text
|
93 |
-
if self.control is None:
|
94 |
-
if self.style is not None:
|
95 |
-
yield self.style
|
96 |
-
else:
|
97 |
-
yield self.style
|
98 |
-
yield self.control
|
99 |
-
|
100 |
-
def __bool__(self) -> bool:
|
101 |
-
"""Check if the segment contains text."""
|
102 |
-
return bool(self.text)
|
103 |
-
|
104 |
-
@property
|
105 |
-
def is_control(self) -> bool:
|
106 |
-
"""Check if the segment contains control codes."""
|
107 |
-
return self.control is not None
|
108 |
-
|
109 |
-
@classmethod
|
110 |
-
@lru_cache(1024 * 16)
|
111 |
-
def _split_cells(cls, segment: "Segment", cut: int) -> Tuple["Segment", "Segment"]:
|
112 |
-
|
113 |
-
text, style, control = segment
|
114 |
-
_Segment = Segment
|
115 |
-
|
116 |
-
cell_length = segment.cell_length
|
117 |
-
if cut >= cell_length:
|
118 |
-
return segment, _Segment("", style, control)
|
119 |
-
|
120 |
-
cell_size = get_character_cell_size
|
121 |
-
|
122 |
-
pos = int((cut / cell_length) * (len(text) - 1))
|
123 |
-
|
124 |
-
before = text[:pos]
|
125 |
-
cell_pos = cell_len(before)
|
126 |
-
if cell_pos == cut:
|
127 |
-
return (
|
128 |
-
_Segment(before, style, control),
|
129 |
-
_Segment(text[pos:], style, control),
|
130 |
-
)
|
131 |
-
while pos < len(text):
|
132 |
-
char = text[pos]
|
133 |
-
pos += 1
|
134 |
-
cell_pos += cell_size(char)
|
135 |
-
before = text[:pos]
|
136 |
-
if cell_pos == cut:
|
137 |
-
return (
|
138 |
-
_Segment(before, style, control),
|
139 |
-
_Segment(text[pos:], style, control),
|
140 |
-
)
|
141 |
-
if cell_pos > cut:
|
142 |
-
return (
|
143 |
-
_Segment(before[: pos - 1] + " ", style, control),
|
144 |
-
_Segment(" " + text[pos:], style, control),
|
145 |
-
)
|
146 |
-
|
147 |
-
raise AssertionError("Will never reach here")
|
148 |
-
|
149 |
-
def split_cells(self, cut: int) -> Tuple["Segment", "Segment"]:
|
150 |
-
"""Split segment in to two segments at the specified column.
|
151 |
-
|
152 |
-
If the cut point falls in the middle of a 2-cell wide character then it is replaced
|
153 |
-
by two spaces, to preserve the display width of the parent segment.
|
154 |
-
|
155 |
-
Returns:
|
156 |
-
Tuple[Segment, Segment]: Two segments.
|
157 |
-
"""
|
158 |
-
text, style, control = self
|
159 |
-
|
160 |
-
if _is_single_cell_widths(text):
|
161 |
-
# Fast path with all 1 cell characters
|
162 |
-
if cut >= len(text):
|
163 |
-
return self, Segment("", style, control)
|
164 |
-
return (
|
165 |
-
Segment(text[:cut], style, control),
|
166 |
-
Segment(text[cut:], style, control),
|
167 |
-
)
|
168 |
-
|
169 |
-
return self._split_cells(self, cut)
|
170 |
-
|
171 |
-
@classmethod
|
172 |
-
def line(cls) -> "Segment":
|
173 |
-
"""Make a new line segment."""
|
174 |
-
return cls("\n")
|
175 |
-
|
176 |
-
@classmethod
|
177 |
-
def apply_style(
|
178 |
-
cls,
|
179 |
-
segments: Iterable["Segment"],
|
180 |
-
style: Optional[Style] = None,
|
181 |
-
post_style: Optional[Style] = None,
|
182 |
-
) -> Iterable["Segment"]:
|
183 |
-
"""Apply style(s) to an iterable of segments.
|
184 |
-
|
185 |
-
Returns an iterable of segments where the style is replaced by ``style + segment.style + post_style``.
|
186 |
-
|
187 |
-
Args:
|
188 |
-
segments (Iterable[Segment]): Segments to process.
|
189 |
-
style (Style, optional): Base style. Defaults to None.
|
190 |
-
post_style (Style, optional): Style to apply on top of segment style. Defaults to None.
|
191 |
-
|
192 |
-
Returns:
|
193 |
-
Iterable[Segments]: A new iterable of segments (possibly the same iterable).
|
194 |
-
"""
|
195 |
-
result_segments = segments
|
196 |
-
if style:
|
197 |
-
apply = style.__add__
|
198 |
-
result_segments = (
|
199 |
-
cls(text, None if control else apply(_style), control)
|
200 |
-
for text, _style, control in result_segments
|
201 |
-
)
|
202 |
-
if post_style:
|
203 |
-
result_segments = (
|
204 |
-
cls(
|
205 |
-
text,
|
206 |
-
(
|
207 |
-
None
|
208 |
-
if control
|
209 |
-
else (_style + post_style if _style else post_style)
|
210 |
-
),
|
211 |
-
control,
|
212 |
-
)
|
213 |
-
for text, _style, control in result_segments
|
214 |
-
)
|
215 |
-
return result_segments
|
216 |
-
|
217 |
-
@classmethod
|
218 |
-
def filter_control(
|
219 |
-
cls, segments: Iterable["Segment"], is_control: bool = False
|
220 |
-
) -> Iterable["Segment"]:
|
221 |
-
"""Filter segments by ``is_control`` attribute.
|
222 |
-
|
223 |
-
Args:
|
224 |
-
segments (Iterable[Segment]): An iterable of Segment instances.
|
225 |
-
is_control (bool, optional): is_control flag to match in search.
|
226 |
-
|
227 |
-
Returns:
|
228 |
-
Iterable[Segment]: And iterable of Segment instances.
|
229 |
-
|
230 |
-
"""
|
231 |
-
if is_control:
|
232 |
-
return filter(attrgetter("control"), segments)
|
233 |
-
else:
|
234 |
-
return filterfalse(attrgetter("control"), segments)
|
235 |
-
|
236 |
-
@classmethod
|
237 |
-
def split_lines(cls, segments: Iterable["Segment"]) -> Iterable[List["Segment"]]:
|
238 |
-
"""Split a sequence of segments in to a list of lines.
|
239 |
-
|
240 |
-
Args:
|
241 |
-
segments (Iterable[Segment]): Segments potentially containing line feeds.
|
242 |
-
|
243 |
-
Yields:
|
244 |
-
Iterable[List[Segment]]: Iterable of segment lists, one per line.
|
245 |
-
"""
|
246 |
-
line: List[Segment] = []
|
247 |
-
append = line.append
|
248 |
-
|
249 |
-
for segment in segments:
|
250 |
-
if "\n" in segment.text and not segment.control:
|
251 |
-
text, style, _ = segment
|
252 |
-
while text:
|
253 |
-
_text, new_line, text = text.partition("\n")
|
254 |
-
if _text:
|
255 |
-
append(cls(_text, style))
|
256 |
-
if new_line:
|
257 |
-
yield line
|
258 |
-
line = []
|
259 |
-
append = line.append
|
260 |
-
else:
|
261 |
-
append(segment)
|
262 |
-
if line:
|
263 |
-
yield line
|
264 |
-
|
265 |
-
@classmethod
|
266 |
-
def split_and_crop_lines(
|
267 |
-
cls,
|
268 |
-
segments: Iterable["Segment"],
|
269 |
-
length: int,
|
270 |
-
style: Optional[Style] = None,
|
271 |
-
pad: bool = True,
|
272 |
-
include_new_lines: bool = True,
|
273 |
-
) -> Iterable[List["Segment"]]:
|
274 |
-
"""Split segments in to lines, and crop lines greater than a given length.
|
275 |
-
|
276 |
-
Args:
|
277 |
-
segments (Iterable[Segment]): An iterable of segments, probably
|
278 |
-
generated from console.render.
|
279 |
-
length (int): Desired line length.
|
280 |
-
style (Style, optional): Style to use for any padding.
|
281 |
-
pad (bool): Enable padding of lines that are less than `length`.
|
282 |
-
|
283 |
-
Returns:
|
284 |
-
Iterable[List[Segment]]: An iterable of lines of segments.
|
285 |
-
"""
|
286 |
-
line: List[Segment] = []
|
287 |
-
append = line.append
|
288 |
-
|
289 |
-
adjust_line_length = cls.adjust_line_length
|
290 |
-
new_line_segment = cls("\n")
|
291 |
-
|
292 |
-
for segment in segments:
|
293 |
-
if "\n" in segment.text and not segment.control:
|
294 |
-
text, segment_style, _ = segment
|
295 |
-
while text:
|
296 |
-
_text, new_line, text = text.partition("\n")
|
297 |
-
if _text:
|
298 |
-
append(cls(_text, segment_style))
|
299 |
-
if new_line:
|
300 |
-
cropped_line = adjust_line_length(
|
301 |
-
line, length, style=style, pad=pad
|
302 |
-
)
|
303 |
-
if include_new_lines:
|
304 |
-
cropped_line.append(new_line_segment)
|
305 |
-
yield cropped_line
|
306 |
-
line.clear()
|
307 |
-
else:
|
308 |
-
append(segment)
|
309 |
-
if line:
|
310 |
-
yield adjust_line_length(line, length, style=style, pad=pad)
|
311 |
-
|
312 |
-
@classmethod
|
313 |
-
def adjust_line_length(
|
314 |
-
cls,
|
315 |
-
line: List["Segment"],
|
316 |
-
length: int,
|
317 |
-
style: Optional[Style] = None,
|
318 |
-
pad: bool = True,
|
319 |
-
) -> List["Segment"]:
|
320 |
-
"""Adjust a line to a given width (cropping or padding as required).
|
321 |
-
|
322 |
-
Args:
|
323 |
-
segments (Iterable[Segment]): A list of segments in a single line.
|
324 |
-
length (int): The desired width of the line.
|
325 |
-
style (Style, optional): The style of padding if used (space on the end). Defaults to None.
|
326 |
-
pad (bool, optional): Pad lines with spaces if they are shorter than `length`. Defaults to True.
|
327 |
-
|
328 |
-
Returns:
|
329 |
-
List[Segment]: A line of segments with the desired length.
|
330 |
-
"""
|
331 |
-
line_length = sum(segment.cell_length for segment in line)
|
332 |
-
new_line: List[Segment]
|
333 |
-
|
334 |
-
if line_length < length:
|
335 |
-
if pad:
|
336 |
-
new_line = line + [cls(" " * (length - line_length), style)]
|
337 |
-
else:
|
338 |
-
new_line = line[:]
|
339 |
-
elif line_length > length:
|
340 |
-
new_line = []
|
341 |
-
append = new_line.append
|
342 |
-
line_length = 0
|
343 |
-
for segment in line:
|
344 |
-
segment_length = segment.cell_length
|
345 |
-
if line_length + segment_length < length or segment.control:
|
346 |
-
append(segment)
|
347 |
-
line_length += segment_length
|
348 |
-
else:
|
349 |
-
text, segment_style, _ = segment
|
350 |
-
text = set_cell_size(text, length - line_length)
|
351 |
-
append(cls(text, segment_style))
|
352 |
-
break
|
353 |
-
else:
|
354 |
-
new_line = line[:]
|
355 |
-
return new_line
|
356 |
-
|
357 |
-
@classmethod
|
358 |
-
def get_line_length(cls, line: List["Segment"]) -> int:
|
359 |
-
"""Get the length of list of segments.
|
360 |
-
|
361 |
-
Args:
|
362 |
-
line (List[Segment]): A line encoded as a list of Segments (assumes no '\\\\n' characters),
|
363 |
-
|
364 |
-
Returns:
|
365 |
-
int: The length of the line.
|
366 |
-
"""
|
367 |
-
_cell_len = cell_len
|
368 |
-
return sum(_cell_len(text) for text, style, control in line if not control)
|
369 |
-
|
370 |
-
@classmethod
|
371 |
-
def get_shape(cls, lines: List[List["Segment"]]) -> Tuple[int, int]:
|
372 |
-
"""Get the shape (enclosing rectangle) of a list of lines.
|
373 |
-
|
374 |
-
Args:
|
375 |
-
lines (List[List[Segment]]): A list of lines (no '\\\\n' characters).
|
376 |
-
|
377 |
-
Returns:
|
378 |
-
Tuple[int, int]: Width and height in characters.
|
379 |
-
"""
|
380 |
-
get_line_length = cls.get_line_length
|
381 |
-
max_width = max(get_line_length(line) for line in lines) if lines else 0
|
382 |
-
return (max_width, len(lines))
|
383 |
-
|
384 |
-
@classmethod
|
385 |
-
def set_shape(
|
386 |
-
cls,
|
387 |
-
lines: List[List["Segment"]],
|
388 |
-
width: int,
|
389 |
-
height: Optional[int] = None,
|
390 |
-
style: Optional[Style] = None,
|
391 |
-
new_lines: bool = False,
|
392 |
-
) -> List[List["Segment"]]:
|
393 |
-
"""Set the shape of a list of lines (enclosing rectangle).
|
394 |
-
|
395 |
-
Args:
|
396 |
-
lines (List[List[Segment]]): A list of lines.
|
397 |
-
width (int): Desired width.
|
398 |
-
height (int, optional): Desired height or None for no change.
|
399 |
-
style (Style, optional): Style of any padding added.
|
400 |
-
new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
|
401 |
-
|
402 |
-
Returns:
|
403 |
-
List[List[Segment]]: New list of lines.
|
404 |
-
"""
|
405 |
-
_height = height or len(lines)
|
406 |
-
|
407 |
-
blank = (
|
408 |
-
[cls(" " * width + "\n", style)] if new_lines else [cls(" " * width, style)]
|
409 |
-
)
|
410 |
-
|
411 |
-
adjust_line_length = cls.adjust_line_length
|
412 |
-
shaped_lines = lines[:_height]
|
413 |
-
shaped_lines[:] = [
|
414 |
-
adjust_line_length(line, width, style=style) for line in lines
|
415 |
-
]
|
416 |
-
if len(shaped_lines) < _height:
|
417 |
-
shaped_lines.extend([blank] * (_height - len(shaped_lines)))
|
418 |
-
return shaped_lines
|
419 |
-
|
420 |
-
@classmethod
|
421 |
-
def align_top(
|
422 |
-
cls: Type["Segment"],
|
423 |
-
lines: List[List["Segment"]],
|
424 |
-
width: int,
|
425 |
-
height: int,
|
426 |
-
style: Style,
|
427 |
-
new_lines: bool = False,
|
428 |
-
) -> List[List["Segment"]]:
|
429 |
-
"""Aligns lines to top (adds extra lines to bottom as required).
|
430 |
-
|
431 |
-
Args:
|
432 |
-
lines (List[List[Segment]]): A list of lines.
|
433 |
-
width (int): Desired width.
|
434 |
-
height (int, optional): Desired height or None for no change.
|
435 |
-
style (Style): Style of any padding added.
|
436 |
-
new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
|
437 |
-
|
438 |
-
Returns:
|
439 |
-
List[List[Segment]]: New list of lines.
|
440 |
-
"""
|
441 |
-
extra_lines = height - len(lines)
|
442 |
-
if not extra_lines:
|
443 |
-
return lines[:]
|
444 |
-
lines = lines[:height]
|
445 |
-
blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style)
|
446 |
-
lines = lines + [[blank]] * extra_lines
|
447 |
-
return lines
|
448 |
-
|
449 |
-
@classmethod
|
450 |
-
def align_bottom(
|
451 |
-
cls: Type["Segment"],
|
452 |
-
lines: List[List["Segment"]],
|
453 |
-
width: int,
|
454 |
-
height: int,
|
455 |
-
style: Style,
|
456 |
-
new_lines: bool = False,
|
457 |
-
) -> List[List["Segment"]]:
|
458 |
-
"""Aligns render to bottom (adds extra lines above as required).
|
459 |
-
|
460 |
-
Args:
|
461 |
-
lines (List[List[Segment]]): A list of lines.
|
462 |
-
width (int): Desired width.
|
463 |
-
height (int, optional): Desired height or None for no change.
|
464 |
-
style (Style): Style of any padding added. Defaults to None.
|
465 |
-
new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
|
466 |
-
|
467 |
-
Returns:
|
468 |
-
List[List[Segment]]: New list of lines.
|
469 |
-
"""
|
470 |
-
extra_lines = height - len(lines)
|
471 |
-
if not extra_lines:
|
472 |
-
return lines[:]
|
473 |
-
lines = lines[:height]
|
474 |
-
blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style)
|
475 |
-
lines = [[blank]] * extra_lines + lines
|
476 |
-
return lines
|
477 |
-
|
478 |
-
@classmethod
|
479 |
-
def align_middle(
|
480 |
-
cls: Type["Segment"],
|
481 |
-
lines: List[List["Segment"]],
|
482 |
-
width: int,
|
483 |
-
height: int,
|
484 |
-
style: Style,
|
485 |
-
new_lines: bool = False,
|
486 |
-
) -> List[List["Segment"]]:
|
487 |
-
"""Aligns lines to middle (adds extra lines to above and below as required).
|
488 |
-
|
489 |
-
Args:
|
490 |
-
lines (List[List[Segment]]): A list of lines.
|
491 |
-
width (int): Desired width.
|
492 |
-
height (int, optional): Desired height or None for no change.
|
493 |
-
style (Style): Style of any padding added.
|
494 |
-
new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
|
495 |
-
|
496 |
-
Returns:
|
497 |
-
List[List[Segment]]: New list of lines.
|
498 |
-
"""
|
499 |
-
extra_lines = height - len(lines)
|
500 |
-
if not extra_lines:
|
501 |
-
return lines[:]
|
502 |
-
lines = lines[:height]
|
503 |
-
blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style)
|
504 |
-
top_lines = extra_lines // 2
|
505 |
-
bottom_lines = extra_lines - top_lines
|
506 |
-
lines = [[blank]] * top_lines + lines + [[blank]] * bottom_lines
|
507 |
-
return lines
|
508 |
-
|
509 |
-
@classmethod
|
510 |
-
def simplify(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
|
511 |
-
"""Simplify an iterable of segments by combining contiguous segments with the same style.
|
512 |
-
|
513 |
-
Args:
|
514 |
-
segments (Iterable[Segment]): An iterable of segments.
|
515 |
-
|
516 |
-
Returns:
|
517 |
-
Iterable[Segment]: A possibly smaller iterable of segments that will render the same way.
|
518 |
-
"""
|
519 |
-
iter_segments = iter(segments)
|
520 |
-
try:
|
521 |
-
last_segment = next(iter_segments)
|
522 |
-
except StopIteration:
|
523 |
-
return
|
524 |
-
|
525 |
-
_Segment = Segment
|
526 |
-
for segment in iter_segments:
|
527 |
-
if last_segment.style == segment.style and not segment.control:
|
528 |
-
last_segment = _Segment(
|
529 |
-
last_segment.text + segment.text, last_segment.style
|
530 |
-
)
|
531 |
-
else:
|
532 |
-
yield last_segment
|
533 |
-
last_segment = segment
|
534 |
-
yield last_segment
|
535 |
-
|
536 |
-
@classmethod
|
537 |
-
def strip_links(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
|
538 |
-
"""Remove all links from an iterable of styles.
|
539 |
-
|
540 |
-
Args:
|
541 |
-
segments (Iterable[Segment]): An iterable segments.
|
542 |
-
|
543 |
-
Yields:
|
544 |
-
Segment: Segments with link removed.
|
545 |
-
"""
|
546 |
-
for segment in segments:
|
547 |
-
if segment.control or segment.style is None:
|
548 |
-
yield segment
|
549 |
-
else:
|
550 |
-
text, style, _control = segment
|
551 |
-
yield cls(text, style.update_link(None) if style else None)
|
552 |
-
|
553 |
-
@classmethod
|
554 |
-
def strip_styles(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
|
555 |
-
"""Remove all styles from an iterable of segments.
|
556 |
-
|
557 |
-
Args:
|
558 |
-
segments (Iterable[Segment]): An iterable segments.
|
559 |
-
|
560 |
-
Yields:
|
561 |
-
Segment: Segments with styles replace with None
|
562 |
-
"""
|
563 |
-
for text, _style, control in segments:
|
564 |
-
yield cls(text, None, control)
|
565 |
-
|
566 |
-
@classmethod
|
567 |
-
def remove_color(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
|
568 |
-
"""Remove all color from an iterable of segments.
|
569 |
-
|
570 |
-
Args:
|
571 |
-
segments (Iterable[Segment]): An iterable segments.
|
572 |
-
|
573 |
-
Yields:
|
574 |
-
Segment: Segments with colorless style.
|
575 |
-
"""
|
576 |
-
|
577 |
-
cache: Dict[Style, Style] = {}
|
578 |
-
for text, style, control in segments:
|
579 |
-
if style:
|
580 |
-
colorless_style = cache.get(style)
|
581 |
-
if colorless_style is None:
|
582 |
-
colorless_style = style.without_color
|
583 |
-
cache[style] = colorless_style
|
584 |
-
yield cls(text, colorless_style, control)
|
585 |
-
else:
|
586 |
-
yield cls(text, None, control)
|
587 |
-
|
588 |
-
@classmethod
|
589 |
-
def divide(
|
590 |
-
cls, segments: Iterable["Segment"], cuts: Iterable[int]
|
591 |
-
) -> Iterable[List["Segment"]]:
|
592 |
-
"""Divides an iterable of segments in to portions.
|
593 |
-
|
594 |
-
Args:
|
595 |
-
cuts (Iterable[int]): Cell positions where to divide.
|
596 |
-
|
597 |
-
Yields:
|
598 |
-
[Iterable[List[Segment]]]: An iterable of Segments in List.
|
599 |
-
"""
|
600 |
-
split_segments: List["Segment"] = []
|
601 |
-
add_segment = split_segments.append
|
602 |
-
|
603 |
-
iter_cuts = iter(cuts)
|
604 |
-
|
605 |
-
while True:
|
606 |
-
cut = next(iter_cuts, -1)
|
607 |
-
if cut == -1:
|
608 |
-
return []
|
609 |
-
if cut != 0:
|
610 |
-
break
|
611 |
-
yield []
|
612 |
-
pos = 0
|
613 |
-
|
614 |
-
segments_clear = split_segments.clear
|
615 |
-
segments_copy = split_segments.copy
|
616 |
-
|
617 |
-
_cell_len = cached_cell_len
|
618 |
-
for segment in segments:
|
619 |
-
text, _style, control = segment
|
620 |
-
while text:
|
621 |
-
end_pos = pos if control else pos + _cell_len(text)
|
622 |
-
if end_pos < cut:
|
623 |
-
add_segment(segment)
|
624 |
-
pos = end_pos
|
625 |
-
break
|
626 |
-
|
627 |
-
if end_pos == cut:
|
628 |
-
add_segment(segment)
|
629 |
-
yield segments_copy()
|
630 |
-
segments_clear()
|
631 |
-
pos = end_pos
|
632 |
-
|
633 |
-
cut = next(iter_cuts, -1)
|
634 |
-
if cut == -1:
|
635 |
-
if split_segments:
|
636 |
-
yield segments_copy()
|
637 |
-
return
|
638 |
-
|
639 |
-
break
|
640 |
-
|
641 |
-
else:
|
642 |
-
before, segment = segment.split_cells(cut - pos)
|
643 |
-
text, _style, control = segment
|
644 |
-
add_segment(before)
|
645 |
-
yield segments_copy()
|
646 |
-
segments_clear()
|
647 |
-
pos = cut
|
648 |
-
|
649 |
-
cut = next(iter_cuts, -1)
|
650 |
-
if cut == -1:
|
651 |
-
if split_segments:
|
652 |
-
yield segments_copy()
|
653 |
-
return
|
654 |
-
|
655 |
-
yield segments_copy()
|
656 |
-
|
657 |
-
|
658 |
-
class Segments:
|
659 |
-
"""A simple renderable to render an iterable of segments. This class may be useful if
|
660 |
-
you want to print segments outside of a __rich_console__ method.
|
661 |
-
|
662 |
-
Args:
|
663 |
-
segments (Iterable[Segment]): An iterable of segments.
|
664 |
-
new_lines (bool, optional): Add new lines between segments. Defaults to False.
|
665 |
-
"""
|
666 |
-
|
667 |
-
def __init__(self, segments: Iterable[Segment], new_lines: bool = False) -> None:
|
668 |
-
self.segments = list(segments)
|
669 |
-
self.new_lines = new_lines
|
670 |
-
|
671 |
-
def __rich_console__(
|
672 |
-
self, console: "Console", options: "ConsoleOptions"
|
673 |
-
) -> "RenderResult":
|
674 |
-
if self.new_lines:
|
675 |
-
line = Segment.line()
|
676 |
-
for segment in self.segments:
|
677 |
-
yield segment
|
678 |
-
yield line
|
679 |
-
else:
|
680 |
-
yield from self.segments
|
681 |
-
|
682 |
-
|
683 |
-
class SegmentLines:
|
684 |
-
def __init__(self, lines: Iterable[List[Segment]], new_lines: bool = False) -> None:
|
685 |
-
"""A simple renderable containing a number of lines of segments. May be used as an intermediate
|
686 |
-
in rendering process.
|
687 |
-
|
688 |
-
Args:
|
689 |
-
lines (Iterable[List[Segment]]): Lists of segments forming lines.
|
690 |
-
new_lines (bool, optional): Insert new lines after each line. Defaults to False.
|
691 |
-
"""
|
692 |
-
self.lines = list(lines)
|
693 |
-
self.new_lines = new_lines
|
694 |
-
|
695 |
-
def __rich_console__(
|
696 |
-
self, console: "Console", options: "ConsoleOptions"
|
697 |
-
) -> "RenderResult":
|
698 |
-
if self.new_lines:
|
699 |
-
new_line = Segment.line()
|
700 |
-
for line in self.lines:
|
701 |
-
yield from line
|
702 |
-
yield new_line
|
703 |
-
else:
|
704 |
-
for line in self.lines:
|
705 |
-
yield from line
|
706 |
-
|
707 |
-
|
708 |
-
if __name__ == "__main__": # pragma: no cover
|
709 |
-
from pip._vendor.rich.console import Console
|
710 |
-
from pip._vendor.rich.syntax import Syntax
|
711 |
-
from pip._vendor.rich.text import Text
|
712 |
-
|
713 |
-
code = """from rich.console import Console
|
714 |
-
console = Console()
|
715 |
-
text = Text.from_markup("Hello, [bold magenta]World[/]!")
|
716 |
-
console.print(text)"""
|
717 |
-
|
718 |
-
text = Text.from_markup("Hello, [bold magenta]World[/]!")
|
719 |
-
|
720 |
-
console = Console()
|
721 |
-
|
722 |
-
console.rule("rich.Segment")
|
723 |
-
console.print(
|
724 |
-
"A Segment is the last step in the Rich render process before generating text with ANSI codes."
|
725 |
-
)
|
726 |
-
console.print("\nConsider the following code:\n")
|
727 |
-
console.print(Syntax(code, "python", line_numbers=True))
|
728 |
-
console.print()
|
729 |
-
console.print(
|
730 |
-
"When you call [b]print()[/b], Rich [i]renders[/i] the object in to the following:\n"
|
731 |
-
)
|
732 |
-
fragments = list(console.render(text))
|
733 |
-
console.print(fragments)
|
734 |
-
console.print()
|
735 |
-
console.print("The Segments are then processed to produce the following output:\n")
|
736 |
-
console.print(text)
|
737 |
-
console.print(
|
738 |
-
"\nYou will only need to know this if you are implementing your own Rich renderables."
|
739 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/tenacity/tornadoweb.py
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
# Copyright 2017 Elisey Zanko
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
import sys
|
16 |
-
import typing
|
17 |
-
|
18 |
-
from pip._vendor.tenacity import BaseRetrying
|
19 |
-
from pip._vendor.tenacity import DoAttempt
|
20 |
-
from pip._vendor.tenacity import DoSleep
|
21 |
-
from pip._vendor.tenacity import RetryCallState
|
22 |
-
|
23 |
-
from tornado import gen
|
24 |
-
|
25 |
-
if typing.TYPE_CHECKING:
|
26 |
-
from tornado.concurrent import Future
|
27 |
-
|
28 |
-
_RetValT = typing.TypeVar("_RetValT")
|
29 |
-
|
30 |
-
|
31 |
-
class TornadoRetrying(BaseRetrying):
|
32 |
-
def __init__(self, sleep: "typing.Callable[[float], Future[None]]" = gen.sleep, **kwargs: typing.Any) -> None:
|
33 |
-
super().__init__(**kwargs)
|
34 |
-
self.sleep = sleep
|
35 |
-
|
36 |
-
@gen.coroutine # type: ignore[misc]
|
37 |
-
def __call__(
|
38 |
-
self,
|
39 |
-
fn: "typing.Callable[..., typing.Union[typing.Generator[typing.Any, typing.Any, _RetValT], Future[_RetValT]]]",
|
40 |
-
*args: typing.Any,
|
41 |
-
**kwargs: typing.Any,
|
42 |
-
) -> "typing.Generator[typing.Any, typing.Any, _RetValT]":
|
43 |
-
self.begin()
|
44 |
-
|
45 |
-
retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
|
46 |
-
while True:
|
47 |
-
do = self.iter(retry_state=retry_state)
|
48 |
-
if isinstance(do, DoAttempt):
|
49 |
-
try:
|
50 |
-
result = yield fn(*args, **kwargs)
|
51 |
-
except BaseException: # noqa: B902
|
52 |
-
retry_state.set_exception(sys.exc_info()) # type: ignore[arg-type]
|
53 |
-
else:
|
54 |
-
retry_state.set_result(result)
|
55 |
-
elif isinstance(do, DoSleep):
|
56 |
-
retry_state.prepare_for_next_attempt()
|
57 |
-
yield self.sleep(do)
|
58 |
-
else:
|
59 |
-
raise gen.Return(do)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/datasets/nuimages.py
DELETED
@@ -1,37 +0,0 @@
|
|
1 |
-
from detectron2.data.datasets.register_coco import register_coco_instances
|
2 |
-
import os
|
3 |
-
|
4 |
-
categories = [
|
5 |
-
{'id': 0, 'name': 'car'},
|
6 |
-
{'id': 1, 'name': 'truck'},
|
7 |
-
{'id': 2, 'name': 'trailer'},
|
8 |
-
{'id': 3, 'name': 'bus'},
|
9 |
-
{'id': 4, 'name': 'construction_vehicle'},
|
10 |
-
{'id': 5, 'name': 'bicycle'},
|
11 |
-
{'id': 6, 'name': 'motorcycle'},
|
12 |
-
{'id': 7, 'name': 'pedestrian'},
|
13 |
-
{'id': 8, 'name': 'traffic_cone'},
|
14 |
-
{'id': 9, 'name': 'barrier'},
|
15 |
-
]
|
16 |
-
|
17 |
-
def _get_builtin_metadata():
|
18 |
-
id_to_name = {x['id']: x['name'] for x in categories}
|
19 |
-
thing_dataset_id_to_contiguous_id = {i: i for i in range(len(categories))}
|
20 |
-
thing_classes = [id_to_name[k] for k in sorted(id_to_name)]
|
21 |
-
return {
|
22 |
-
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
|
23 |
-
"thing_classes": thing_classes}
|
24 |
-
|
25 |
-
_PREDEFINED_SPLITS = {
|
26 |
-
"nuimages_train": ("nuimages", "nuimages/annotations/nuimages_v1.0-train.json"),
|
27 |
-
"nuimages_val": ("nuimages", "nuimages/annotations/nuimages_v1.0-val.json"),
|
28 |
-
"nuimages_mini": ("nuimages", "nuimages/annotations/nuimages_v1.0-mini.json"),
|
29 |
-
}
|
30 |
-
|
31 |
-
for key, (image_root, json_file) in _PREDEFINED_SPLITS.items():
|
32 |
-
register_coco_instances(
|
33 |
-
key,
|
34 |
-
_get_builtin_metadata(),
|
35 |
-
os.path.join("datasets", json_file) if "://" not in json_file else json_file,
|
36 |
-
os.path.join("datasets", image_root),
|
37 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/utils/backups_test.py
DELETED
@@ -1,138 +0,0 @@
|
|
1 |
-
|
2 |
-
import os
|
3 |
-
import shutil
|
4 |
-
import hashlib
|
5 |
-
import time
|
6 |
-
|
7 |
-
LOGS_FOLDER = '/content/Applio-RVC-Fork/logs'
|
8 |
-
WEIGHTS_FOLDER = '/content/Applio-RVC-Fork/weights'
|
9 |
-
GOOGLE_DRIVE_PATH = '/content/drive/MyDrive/RVC_Backup'
|
10 |
-
|
11 |
-
def import_google_drive_backup():
|
12 |
-
print("Importing Google Drive backup...")
|
13 |
-
GOOGLE_DRIVE_PATH = '/content/drive/MyDrive/RVC_Backup' # change this to your Google Drive path
|
14 |
-
LOGS_FOLDER = '/content/Applio-RVC-Fork/logs'
|
15 |
-
WEIGHTS_FOLDER = '/content/Applio-RVC-Fork/weights'
|
16 |
-
weights_exist = False
|
17 |
-
files_to_copy = []
|
18 |
-
weights_to_copy = []
|
19 |
-
|
20 |
-
def handle_files(root, files, is_weight_files=False):
|
21 |
-
for filename in files:
|
22 |
-
filepath = os.path.join(root, filename)
|
23 |
-
if filename.endswith('.pth') and is_weight_files:
|
24 |
-
weights_exist = True
|
25 |
-
backup_filepath = os.path.join(WEIGHTS_FOLDER, os.path.relpath(filepath, GOOGLE_DRIVE_PATH))
|
26 |
-
else:
|
27 |
-
backup_filepath = os.path.join(LOGS_FOLDER, os.path.relpath(filepath, GOOGLE_DRIVE_PATH))
|
28 |
-
backup_folderpath = os.path.dirname(backup_filepath)
|
29 |
-
if not os.path.exists(backup_folderpath):
|
30 |
-
os.makedirs(backup_folderpath)
|
31 |
-
print(f'Created folder: {backup_folderpath}', flush=True)
|
32 |
-
if is_weight_files:
|
33 |
-
weights_to_copy.append((filepath, backup_filepath))
|
34 |
-
else:
|
35 |
-
files_to_copy.append((filepath, backup_filepath))
|
36 |
-
|
37 |
-
for root, dirs, files in os.walk(os.path.join(GOOGLE_DRIVE_PATH, 'logs')):
|
38 |
-
handle_files(root, files)
|
39 |
-
|
40 |
-
for root, dirs, files in os.walk(os.path.join(GOOGLE_DRIVE_PATH, 'weights')):
|
41 |
-
handle_files(root, files, True)
|
42 |
-
|
43 |
-
# Copy files in batches
|
44 |
-
total_files = len(files_to_copy)
|
45 |
-
start_time = time.time()
|
46 |
-
for i, (source, dest) in enumerate(files_to_copy, start=1):
|
47 |
-
with open(source, 'rb') as src, open(dest, 'wb') as dst:
|
48 |
-
shutil.copyfileobj(src, dst, 1024*1024) # 1MB buffer size
|
49 |
-
# Report progress every 5 seconds or after every 100 files, whichever is less frequent
|
50 |
-
if time.time() - start_time > 5 or i % 100 == 0:
|
51 |
-
print(f'\rCopying file {i} of {total_files} ({i * 100 / total_files:.2f}%)', end="")
|
52 |
-
start_time = time.time()
|
53 |
-
print(f'\nImported {len(files_to_copy)} files from Google Drive backup')
|
54 |
-
|
55 |
-
# Copy weights in batches
|
56 |
-
total_weights = len(weights_to_copy)
|
57 |
-
start_time = time.time()
|
58 |
-
for i, (source, dest) in enumerate(weights_to_copy, start=1):
|
59 |
-
with open(source, 'rb') as src, open(dest, 'wb') as dst:
|
60 |
-
shutil.copyfileobj(src, dst, 1024*1024) # 1MB buffer size
|
61 |
-
# Report progress every 5 seconds or after every 100 files, whichever is less frequent
|
62 |
-
if time.time() - start_time > 5 or i % 100 == 0:
|
63 |
-
print(f'\rCopying weight file {i} of {total_weights} ({i * 100 / total_weights:.2f}%)', end="")
|
64 |
-
start_time = time.time()
|
65 |
-
if weights_exist:
|
66 |
-
print(f'\nImported {len(weights_to_copy)} weight files')
|
67 |
-
print("Copied weights from Google Drive backup to local weights folder.")
|
68 |
-
else:
|
69 |
-
print("\nNo weights found in Google Drive backup.")
|
70 |
-
print("Google Drive backup import completed.")
|
71 |
-
|
72 |
-
def backup_files():
|
73 |
-
print("\n Starting backup loop...")
|
74 |
-
last_backup_timestamps_path = os.path.join(LOGS_FOLDER, 'last_backup_timestamps.txt')
|
75 |
-
fully_updated = False # boolean to track if all files are up to date
|
76 |
-
try:
|
77 |
-
with open(last_backup_timestamps_path, 'r') as f:
|
78 |
-
last_backup_timestamps = dict(line.strip().split(':') for line in f)
|
79 |
-
except:
|
80 |
-
last_backup_timestamps = {}
|
81 |
-
|
82 |
-
while True:
|
83 |
-
updated = False
|
84 |
-
files_to_copy = []
|
85 |
-
files_to_delete = []
|
86 |
-
|
87 |
-
for root, dirs, files in os.walk(LOGS_FOLDER):
|
88 |
-
for filename in files:
|
89 |
-
if filename != 'last_backup_timestamps.txt':
|
90 |
-
filepath = os.path.join(root, filename)
|
91 |
-
if os.path.isfile(filepath):
|
92 |
-
backup_filepath = os.path.join(GOOGLE_DRIVE_PATH, os.path.relpath(filepath, LOGS_FOLDER))
|
93 |
-
backup_folderpath = os.path.dirname(backup_filepath)
|
94 |
-
|
95 |
-
if not os.path.exists(backup_folderpath):
|
96 |
-
os.makedirs(backup_folderpath)
|
97 |
-
print(f'Created backup folder: {backup_folderpath}', flush=True)
|
98 |
-
|
99 |
-
# check if file has changed since last backup
|
100 |
-
last_backup_timestamp = last_backup_timestamps.get(filepath)
|
101 |
-
current_timestamp = os.path.getmtime(filepath)
|
102 |
-
if last_backup_timestamp is None or float(last_backup_timestamp) < current_timestamp:
|
103 |
-
files_to_copy.append((filepath, backup_filepath)) # add to list of files to copy
|
104 |
-
last_backup_timestamps[filepath] = str(current_timestamp) # update last backup timestamp
|
105 |
-
updated = True
|
106 |
-
fully_updated = False # if a file is updated, all files are not up to date
|
107 |
-
|
108 |
-
# check if any files were deleted in Colab and delete them from the backup drive
|
109 |
-
for filepath in list(last_backup_timestamps.keys()):
|
110 |
-
if not os.path.exists(filepath):
|
111 |
-
backup_filepath = os.path.join(GOOGLE_DRIVE_PATH, os.path.relpath(filepath, LOGS_FOLDER))
|
112 |
-
if os.path.exists(backup_filepath):
|
113 |
-
files_to_delete.append(backup_filepath) # add to list of files to delete
|
114 |
-
del last_backup_timestamps[filepath]
|
115 |
-
updated = True
|
116 |
-
fully_updated = False # if a file is deleted, all files are not up to date
|
117 |
-
|
118 |
-
# Copy files in batches
|
119 |
-
if files_to_copy:
|
120 |
-
for source, dest in files_to_copy:
|
121 |
-
shutil.copy2(source, dest)
|
122 |
-
print(f'Copied or updated {len(files_to_copy)} files')
|
123 |
-
|
124 |
-
# Delete files in batches
|
125 |
-
if files_to_delete:
|
126 |
-
for file in files_to_delete:
|
127 |
-
os.remove(file)
|
128 |
-
print(f'Deleted {len(files_to_delete)} files')
|
129 |
-
|
130 |
-
if not updated and not fully_updated:
|
131 |
-
print("Files are up to date.")
|
132 |
-
fully_updated = True # if all files are up to date, set the boolean to True
|
133 |
-
copy_weights_folder_to_drive()
|
134 |
-
|
135 |
-
with open(last_backup_timestamps_path, 'w') as f:
|
136 |
-
for filepath, timestamp in last_backup_timestamps.items():
|
137 |
-
f.write(f'{filepath}:{timestamp}\n')
|
138 |
-
time.sleep(15) # wait for 15 seconds before checking again
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Apk Stumble Chicos Apk Puro.md
DELETED
@@ -1,80 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>8 bola piscina 5.8.0 Mod Apk: Todo lo que necesita saber</h1>
|
3 |
-
<p>Si eres un fan de los juegos de billar, es posible que hayas oído hablar de <strong>8 Ball Pool</strong>, uno de los juegos multijugador en línea más populares y adictivos para dispositivos Android e iOS. Pero ¿sabías que hay una manera de disfrutar de este juego aún más con monedas ilimitadas, dinero en efectivo, señales y otros beneficios? Sí, estamos hablando de <strong>8 Ball Pool 5.8.0 Mod Apk</strong>, la última versión de la aplicación modificada que le permite jugar el juego con características mejoradas y sin restricciones. En este artículo, le diremos todo lo que necesita saber acerca de este apk mod, incluyendo sus características, beneficios, riesgos, y cómo descargar e instalar en su dispositivo. </p>
|
4 |
-
<h2>¿Qué es la piscina de bolas 8? </h2>
|
5 |
-
<p>8 Ball Pool es un juego multijugador gratuito desarrollado por Miniclip, una compañía suiza que también creó otros juegos populares como Agar.io, Soccer Stars y Carrom Pool. El juego fue lanzado en 2010 y desde entonces se ha convertido en uno de los juegos más descargados y jugados en Google Play y App Store, con más de 500 millones de descargas y millones de jugadores activos en todo el mundo. </p>
|
6 |
-
<h2>apk stumble chicos apk puro</h2><br /><p><b><b>Download Zip</b> –––––>>> <a href="https://bltlly.com/2v6Lpb">https://bltlly.com/2v6Lpb</a></b></p><br /><br />
|
7 |
-
<h3>Características de la piscina de bolas 8</h3>
|
8 |
-
<p>Algunas de las características que hacen que 8 Ball Pool sea tan divertido y atractivo son:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Puedes jugar con tus amigos o desafiar a jugadores de todo el mundo en partidas 1 a 1 o torneos. </li>
|
11 |
-
<li> Puede personalizar su señal y tabla con varios diseños y colores. </li>
|
12 |
-
<li>Puedes ganar monedas y dinero ganando partidas y completando misiones. </li>
|
13 |
-
<li>Puedes usar monedas y dinero en efectivo para comprar nuevas pistas, paquetes de chat, minijuegos y otros artículos en la tienda de juegos. </li>
|
14 |
-
<li>Puedes unirte a clubes y chatear con otros miembros. </li>
|
15 |
-
<li>Puedes subir de nivel y desbloquear nuevas ubicaciones y modos. </li>
|
16 |
-
<li>Puedes participar en eventos de temporada y ganar recompensas exclusivas. </li>
|
17 |
-
</ul>
|
18 |
-
<h3>Cómo jugar al billar de bolas 8</h3>
|
19 |
-
|
20 |
-
<h2>¿Qué es un apk mod? </h2>
|
21 |
-
<p>Un apk mod es una versión modificada de una aplicación original que ha sido alterada por desarrolladores de terceros para agregar o eliminar ciertas características, omitir limitaciones o mejorar el rendimiento. Un apk mod generalmente viene con un nombre de archivo y firma diferente a la aplicación original, y requiere instalación manual de fuentes desconocidas. </p>
|
22 |
-
<h3>Beneficios de usar un mod apk</h3>
|
23 |
-
<p>Algunos de los beneficios de usar un apk mod son:</p>
|
24 |
-
<ul>
|
25 |
-
<li>Puede acceder a funciones premium que de otro modo están bloqueadas o requieren compras en la aplicación. </li>
|
26 |
-
<li>Puedes obtener recursos ilimitados como monedas, efectivo, gemas, etc. que son difíciles de ganar o caros de comprar. </li>
|
27 |
-
<li>Puedes desbloquear todos los niveles, modos, elementos, señales, etc. que estén restringidos o requieran progreso o logros. </li>
|
28 |
-
<li> Puede eliminar anuncios, ventanas emergentes, banners, etc. que son molestos o intrusivos. </li>
|
29 |
-
<li> Puede disfrutar de una carga más rápida, un juego más suave, mejores gráficos, etc. que de otra manera están comprometidos o de baja calidad. </li>
|
30 |
-
</ul>
|
31 |
-
<h3>R <h3>Riesgos de usar un mod apk</h3>
|
32 |
-
<p>Sin embargo, el uso de un apk mod también viene con algunos riesgos que usted debe ser consciente de:</p>
|
33 |
-
<ul>
|
34 |
-
<li> Usted puede obtener prohibido en el juego o perder su cuenta si los desarrolladores detectan que está utilizando un apk mod. </li>
|
35 |
-
<li>Puedes exponer tu dispositivo a malware, virus, spyware, etc. que pueden dañar tus datos, privacidad o seguridad si descargas un mod apk desde una fuente no confiable. </li>
|
36 |
-
<li>Puede experimentar fallos, errores, bloqueos, etc. que pueden afectar el rendimiento de su juego o dispositivo si instala un apk mod que es incompatible con su dispositivo o versión del juego. </li>
|
37 |
-
<li>Usted puede perderse las actualizaciones, nuevas características, correcciones de errores, etc. que son liberados por los desarrolladores originales si se utiliza un apk mod que está desactualizado o no se actualiza regularmente. </li>
|
38 |
-
<li> Usted puede perder la diversión y el desafío del juego si se utiliza un apk mod que hace que el juego demasiado fácil o injusto. </li>
|
39 |
-
</ul>
|
40 |
-
|
41 |
-
<p>8 Ball Pool 5.8.0 Mod Apk es la última versión de la aplicación modificada para 8 Ball Pool que fue lanzado en junio de 2023. Es uno de los apks mod más populares y ampliamente utilizados para este juego, ya que ofrece muchas características y beneficios increíbles que no están disponibles en la aplicación original. </p>
|
42 |
-
<h3>Características de 8 Piscina de bolas 5.8.0 Mod Apk</h3>
|
43 |
-
<p>Algunas de las características que se pueden disfrutar con 8 Ball Pool 5.8.0 Mod Apk son:</p>
|
44 |
-
<p></p>
|
45 |
-
<ul>
|
46 |
-
<li>Puedes obtener monedas ilimitadas y dinero en efectivo que puedes usar para comprar cualquier cosa en la tienda del juego. </li>
|
47 |
-
<li> Puedes obtener señales ilimitadas y actualizarlas al nivel máximo. </li>
|
48 |
-
<li>Puedes obtener paquetes de chat ilimitados y usarlos para comunicarte con otros jugadores. </li>
|
49 |
-
<li>Puedes obtener minijuegos ilimitados y jugarlos para ganar más monedas y dinero en efectivo. </li>
|
50 |
-
<li>Puedes obtener todas las características premium como club VIP, pistas exclusivas, cajas raras, etc. gratis. </li>
|
51 |
-
<li>Puedes jugar en cualquier lugar y modo sin ningún nivel o requisito de logro. </li>
|
52 |
-
<li>Puedes jugar con cualquier jugador sin ninguna restricción de habilidad o rango. </li>
|
53 |
-
<li> Puede jugar con directrices largas y un límite de tiempo extendido para mejorar su precisión y velocidad. </li>
|
54 |
-
<li>Puedes jugar sin anuncios ni interrupciones. </li>
|
55 |
-
</ul>
|
56 |
-
<h3>Cómo descargar e instalar 8 Ball Pool 5.8.0 Mod Apk</h3>
|
57 |
-
<p>Si desea probar 8 Ball Pool 5.8.0 Mod Apk, es necesario seguir estos pasos:</p>
|
58 |
-
<ol>
|
59 |
-
<li>Desinstalar la aplicación original de su dispositivo si lo tiene instalado. </li>
|
60 |
-
<li>Descargar el archivo apk mod de una fuente confiable (como [este]). </li>
|
61 |
-
<li>Habilitar la instalación desde fuentes desconocidas en la configuración del dispositivo. </li>
|
62 |
-
<li>Busque el archivo descargado en el almacenamiento del dispositivo y toque en él para instalarlo. </li>
|
63 |
-
<li>Iniciar la aplicación y disfrutar del juego con todas las características modded. </li>
|
64 |
-
</ol>
|
65 |
-
<h2>Conclusión</h2>
|
66 |
-
|
67 |
-
<h3>Preguntas frecuentes</h3>
|
68 |
-
<p>Aquí hay algunas preguntas frecuentes sobre 8 Ball Pool 5.8.0 Mod Apk:</p>
|
69 |
-
<h4>Es 8 bola piscina 5.8.0 Mod apk seguro de usar? </h4>
|
70 |
-
<p>8 Ball Pool 5.8.0 Mod Apk es generalmente seguro de usar si se descarga desde una fuente de confianza y escanear con un antivirus antes de instalarlo en su dispositivo. Sin embargo, siempre hay una posibilidad de conseguir malware o virus de fuentes no confiables o conseguir prohibido en el juego o perder su cuenta si los desarrolladores detectan que está utilizando un apk mod. Por lo tanto, le recomendamos que utilice este mod apk a su propio riesgo y discreción. </p>
|
71 |
-
<h4>Es 8 Ball Pool 5.8.0 Mod Apk compatible con mi dispositivo? </h4>
|
72 |
-
<p>8 Ball Pool 5.8.0 Mod Apk es compatible con <p>8 Ball Pool 5.8.0 Mod Apk es compatible con la mayoría de los dispositivos Android que tienen Android 4.4 o superior y al menos 2 GB de RAM. Sin embargo, algunos dispositivos pueden no ser compatibles con el mod apk o pueden experimentar algunos fallos o errores debido a diferentes especificaciones o configuraciones. Por lo tanto, le sugerimos que compruebe la compatibilidad de su dispositivo antes de descargar e instalar el apk mod. </p>
|
73 |
-
<h4> ¿Cómo puedo actualizar 8 Ball Pool 5.8.0 Mod Apk? </h4>
|
74 |
-
<p>8 Ball Pool 5.8.0 Mod Apk no está disponible en Google Play o App Store, por lo que no se puede actualizar automáticamente desde allí. En su lugar, es necesario comprobar las actualizaciones manualmente de la fuente donde se descargó el apk mod o de otros sitios web que ofrecen la última versión del apk mod. También puede seguir las páginas oficiales de las redes sociales de 8 Ball Pool o Miniclip para recibir notificaciones de nuevas actualizaciones o características. Para actualizar el apk mod, es necesario desinstalar la versión anterior e instalar la nueva versión siguiendo los mismos pasos que antes. </p>
|
75 |
-
<h4>¿Puedo jugar 8 bola piscina 5.8.0 mod apk offline? </h4>
|
76 |
-
|
77 |
-
<h4> ¿Puedo jugar 8 bola piscina 5.8.0 Mod Apk con mis amigos? </h4>
|
78 |
-
<p>Sí, se puede jugar 8 Ball Pool 5.8.0 Mod Apk con tus amigos que también tienen el mismo mod apk instalado en sus dispositivos. Puedes invitarlos a unirse a tu club o retarlos a un partido usando el chat en el juego o las plataformas de redes sociales. Sin embargo, no puedes jugar con tus amigos que tienen la aplicación original o un apk mod diferente, ya que no podrán conectarse contigo o ver tus características modificadas. </p> 64aa2da5cf<br />
|
79 |
-
<br />
|
80 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Brain Test 360.md
DELETED
@@ -1,54 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Brain Test 360: Una forma divertida y desafiante de entrenar tu cerebro</h1>
|
3 |
-
<p>¿Quieres aumentar tu poder cerebral, aprender cosas nuevas y divertirte al mismo tiempo? Si es así, deberías probar Brain Test 360, un juego móvil que combina puzzles, trivia y un modelo cerebral en 3D. En este artículo, te diremos qué es Brain Test 360, por qué deberías jugarlo, cómo jugarlo, y algunos consejos y trucos para ayudarte a tener éxito. </p>
|
4 |
-
<h2>brain test 360</h2><br /><p><b><b>DOWNLOAD</b> 🔗 <a href="https://bltlly.com/2v6J2l">https://bltlly.com/2v6J2l</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es Brain Test 360? </h2>
|
6 |
-
<p>Brain Test 360 es un juego móvil que pone a prueba tus habilidades de lógica, creatividad y resolución de problemas. Tiene dos modos: modo rompecabezas y modo cerebro. En el modo rompecabezas, tienes que resolver varios tipos de rompecabezas que van desde fácil a difícil. Algunos rompecabezas se basan en matemáticas, lógica o palabras, mientras que otros se basan en pistas visuales, sentido común o humor. Tienes que tocar, deslizar, agitar o inclinar el teléfono para encontrar la respuesta. En el modo cerebro, puede explorar un modelo cerebral en 3D que le permite ver la anatomía y las funciones del cerebro. Puedes rotar, acercar o alejar el modelo, y tocar en diferentes partes del cerebro para aprender más sobre ellos. También puedes hacer pruebas para probar tu conocimiento del cerebro. </p>
|
7 |
-
<h2>¿Por qué deberías jugar Brain Test 360? </h2>
|
8 |
-
<p>Hay muchos beneficios de jugar Brain Test 360. Aquí están algunos de ellos:</p>
|
9 |
-
<h3>Mejora tus capacidades cognitivas y salud mental</h3>
|
10 |
-
<p>Jugar Brain Test 360 puede ayudarte a mejorar tu memoria, atención, concentración, lógica, creatividad y habilidades para resolver problemas. Estos son esenciales para su éxito académico, profesional y personal. Jugar Brain Test 360 también puede ayudarte a reducir el estrés, la ansiedad, la depresión y el aburrimiento. También puede aumentar su confianza en sí mismo, felicidad y motivación. </p>
|
11 |
-
<h3>Te entretiene con rompecabezas divertidos y difíciles</h3>
|
12 |
-
|
13 |
-
<h3>Te educa sobre el cerebro y la neurociencia</h3>
|
14 |
-
<p>Jugar Brain Test 360 también puede ser una gran manera de aprender cosas nuevas sobre el cerebro y la neurociencia. El modo cerebro le permite explorar la estructura y función del cerebro de una manera interactiva. Puedes aprender sobre las diferentes partes del cerebro, como el cerebro, el cerebelo, el tronco cerebral, el sistema límbico, etc., y cómo afectan tus emociones, pensamientos, comportamientos, etc. También puedes aprender sobre algunos trastornos cerebrales comunes, como la enfermedad de Alzheimer, Enfermedad de Parkinson, accidente cerebrovascular, etc., y cómo afectan al cerebro. </p>
|
15 |
-
<p></p>
|
16 |
-
<h2>¿Cómo se juega Brain Test 360? </h2>
|
17 |
-
<p>Jugar Brain Test 360 es fácil y simple. Estos son los pasos:</p>
|
18 |
-
<h3>Descargar el juego desde la App Store o Google Play</h3>
|
19 |
-
<p>El juego está disponible para dispositivos iOS y Android. Puedes descargarlo gratis desde la App Store o Google Play. El juego tiene un tamaño de unos 100 MB y requiere una conexión a Internet para jugar. </p>
|
20 |
-
<h3>Elija entre el modo de puzzle o el modo cerebro</h3>
|
21 |
-
<p>Una vez que abra el juego, puede elegir entre el modo de rompecabezas o el modo cerebro. Puede cambiar entre ellos en cualquier momento pulsando en los iconos en la parte inferior de la pantalla. El modo rompecabezas tiene más de 200 niveles, mientras que el modo cerebro tiene más de 50 cuestionarios. También puede ver su progreso, logros y ajustes tocando el icono del menú en la esquina superior izquierda de la pantalla. </p>
|
22 |
-
<h3>Resolver los puzzles o interactuar con el modelo del cerebro</h3>
|
23 |
-
<p>En el modo de rompecabezas, tienes que resolver los puzzles usando el dedo para tocar, deslizar, agitar o inclinar el teléfono. Tienes que leer la pregunta cuidadosamente y buscar pistas en la imagen. A veces, tienes que pensar fuera de la caja y usar tu imaginación. Si te quedas atascado, puedes usar pistas o ver vídeos para obtener ayuda. También puedes saltarte un nivel si quieres. Ganarás monedas por cada puzzle que resuelvas, que puedes usar para comprar más pistas o desbloquear más niveles. </p>
|
24 |
-
|
25 |
-
<h3>Ganar monedas y desbloquear más niveles y características</h3>
|
26 |
-
<p>Mientras juegas Brain Test 360, ganarás monedas que puedes usar para desbloquear más niveles y características. También puedes obtener más monedas viendo anuncios, valorando el juego o invitando a tus amigos a jugar. Algunas de las características que puedes desbloquear son:</p>
|
27 |
-
<ul>
|
28 |
-
<li>Un modo nocturno que cambia la combinación de colores del juego</li>
|
29 |
-
<li>Un modo de sonido que reproduce música relajante y sonidos mientras juegas</li>
|
30 |
-
<li>Una opción de idioma que te permite elegir entre inglés, español, francés, alemán, italiano, portugués, turco, ruso, árabe, hindi, japonés, coreano o chino</li>
|
31 |
-
<li>Una opción de retroalimentación que te permite enviar tus comentarios o sugerencias a los desarrolladores</li>
|
32 |
-
</ul>
|
33 |
-
<h2>Consejos y trucos para Brain Test 360</h2>
|
34 |
-
<p>Aquí hay algunos consejos y trucos para ayudarle a disfrutar de Brain Test 360 más:</p>
|
35 |
-
<h3>Piensa fuera de la caja y usa tu imaginación</h3>
|
36 |
-
<p>Algunos de los puzzles de Brain Test 360 no son tan sencillos como parecen. Tienes que pensar creativamente y usar tu imaginación para encontrar la respuesta. Por ejemplo, a veces tienes que inclinar el teléfono para cambiar la perspectiva, o agitar el teléfono para hacer caer algo. A veces hay que buscar objetos ocultos o palabras en la imagen, o combinar dos elementos para crear uno nuevo. A veces hay que romper las reglas o hacer algo inesperado. No tengas miedo de probar cosas diferentes y experimentar con diferentes soluciones. </p>
|
37 |
-
<h3>Usa pistas o mira videos si te quedas atascado</h3>
|
38 |
-
<p>Si te quedas atascado en un rompecabezas o un examen, no te rindas. Puedes usar sugerencias o ver videos para obtener ayuda. Las pistas le darán una pista o una sugerencia sobre cómo resolver el rompecabezas o responder a la pregunta. Los vídeos le mostrarán la solución paso a paso. Puedes comprar pistas con monedas que ganas jugando al juego, o ver anuncios para obtener pistas gratis. También puedes ver anuncios para obtener videos gratis. </p>
|
39 |
-
<h3>Aprende de tus errores e inténtalo de nuevo</h3>
|
40 |
-
|
41 |
-
<h2>Conclusión</h2>
|
42 |
-
<p>Brain Test 360 es un juego móvil que pone a prueba tus habilidades de lógica, creatividad y resolución de problemas con puzles y trivia. También te permite explorar un modelo cerebral en 3D que te enseña sobre la anatomía y las funciones del cerebro. Jugar a Brain Test 360 puede mejorar tus habilidades cognitivas y tu salud mental, entretenerte con rompecabezas divertidos y complicados, y educarte sobre el cerebro y la neurociencia. Es fácil y simple de jugar, pero también desafiante y divertido. Puedes descargarlo gratis desde la App Store o Google Play y empezar a jugar de inmediato. </p>
|
43 |
-
<p>Si estás buscando una forma divertida y desafiante de entrenar tu cerebro, Brain Test 360 es el juego para ti. </ <p>Aquí hay algunas preguntas frecuentes que puede tener sobre Brain Test 360:</p>
|
44 |
-
<h4>Q: ¿Cómo puedo contactar a los desarrolladores de Brain Test 360? </h4>
|
45 |
-
<p>A: Puede ponerse en contacto con los desarrolladores de Brain Test 360 enviando un correo electrónico a [email protected]. También puedes seguirlos en Facebook, Twitter o Instagram para obtener las últimas noticias y actualizaciones sobre el juego. </p>
|
46 |
-
<h4>P: ¿Cómo puedo compartir mis comentarios o sugerencias para Brain Test 360? </h4>
|
47 |
-
<p>A: Puedes compartir tus comentarios o sugerencias para Brain Test 360 usando la opción de comentarios en el juego. También puedes calificar y revisar el juego en la App Store o Google Play, o dejar un comentario en sus páginas de redes sociales. </p>
|
48 |
-
<h4>Q: ¿Cómo puedo jugar Brain Test 360 con mis amigos? </h4>
|
49 |
-
<p>A: Puedes jugar a Brain Test 360 con tus amigos invitándolos a descargar el juego y unirse a ti. También puedes comparar tus puntuaciones y logros con ellos, y desafiarlos a resolver los puzzles o tomar los exámenes. </p>
|
50 |
-
<h4>P: ¿Cómo puedo obtener más monedas en Brain Test 360? </h4>
|
51 |
-
<p>A: Puedes obtener más monedas en Brain Test 360 resolviendo puzzles, completando concursos, viendo anuncios, valorando el juego o invitando a tus amigos. También puedes comprar monedas con dinero real si quieres. </p>
|
52 |
-
<h4>Q: ¿Cómo puedo apagar el sonido o la música en Brain Test 360? </h4> 64aa2da5cf<br />
|
53 |
-
<br />
|
54 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Cmo Descargar Minecraft De Prueba En El Ordenador Porttil.md
DELETED
@@ -1,87 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cómo descargar el modo creativo de prueba de Minecraft gratis</h1>
|
3 |
-
<p>Minecraft es uno de los juegos más populares y creativos del mundo, donde puedes explorar mundos infinitos y construir cualquier cosa que puedas imaginar. ¿Pero sabías que puedes probarlo gratis antes de comprar la versión completa? En este artículo, te mostraremos cómo descargar Minecraft Trial Creative Mode gratis en diferentes dispositivos y cómo disfrutarlo al máximo. </p>
|
4 |
-
<h2>¿Qué es el modo creativo de prueba de Minecraft? </h2>
|
5 |
-
<p>Minecraft Trial Creative Mode es una versión gratuita y por tiempo limitado de Minecraft que te permite experimentar el juego en modo creativo, donde tienes recursos ilimitados y puedes construir lo que quieras sin preocuparte por la supervivencia. También puedes cambiar al modo de supervivencia, donde tienes que crear armas y armaduras para defenderte de las peligrosas turbas, pero tendrás un tiempo limitado de 90 minutos por mundo. </p>
|
6 |
-
<h2>cómo descargar minecraft de prueba en el ordenador portátil</h2><br /><p><b><b>Download Zip</b> ✯✯✯ <a href="https://bltlly.com/2v6MVg">https://bltlly.com/2v6MVg</a></b></p><br /><br />
|
7 |
-
<h3>La diferencia entre la supervivencia y los modos creativos</h3>
|
8 |
-
<p>En el modo de supervivencia, tienes que reunir recursos, crear herramientas, luchar contra los enemigos y gestionar tu hambre y salud. También tienes que lidiar con ciclos diurnos y nocturnos, cambios climáticos y criaturas hostiles. El modo de supervivencia es más desafiante y realista, pero también más gratificante cuando logras tus objetivos. </p>
|
9 |
-
<p>En el modo creativo, tienes recursos ilimitados y puedes volar alrededor del mundo. Puedes construir lo que quieras sin restricciones ni peligros. También puede generar cualquier mob o artículo que desee utilizando comandos o menús de inventario. El modo creativo es más relajante y divertido, pero también menos inmersivo y emocionante. </p>
|
10 |
-
<h3>Los beneficios de jugar en modo creativo</h3>
|
11 |
-
|
12 |
-
<h3>Limitaciones de la versión de prueba</h3>
|
13 |
-
<p>Si bien Minecraft Trial Creative Mode es una buena manera de probar el juego de forma gratuita, también tiene algunas limitaciones que debes conocer. Por ejemplo: resultado/p>
|
14 |
-
<ul>
|
15 |
-
<li>Solo puedes jugar 90 minutos por mundo. Después de eso, todavía puedes ver tu mundo, pero no puedes interactuar con él o hacer ningún cambio. </li>
|
16 |
-
<li>No puedes guardar o cargar tus mundos. Si sales del juego o cambias de dispositivo, perderás tu progreso. </li>
|
17 |
-
<li>No puede acceder a funciones multijugador o en línea. Solo puede jugar en solitario o con pantalla dividida en Windows 10. </li>
|
18 |
-
<li>No puedes personalizar tu personaje o cambiar tu piel. Estás atascado con la piel predeterminada de Steve o Alex. </li>
|
19 |
-
<li>No puede acceder a todas las características y contenido de la versión completa. Por ejemplo, no puede usar bloques de comandos, bloques de estructura o paquetes de datos. </li>
|
20 |
-
</ul>
|
21 |
-
<p>Si quieres disfrutar de la experiencia completa de Minecraft, incluyendo modo creativo, multijugador, servidores en línea, skins personalizadas, mods, mapas y más, tendrás que comprar el juego en cualquier momento durante o después de tu prueba. </p>
|
22 |
-
<h2>Cómo descargar Minecraft Trial Creative Mode para diferentes dispositivos</h2>
|
23 |
-
<p>Minecraft Trial Creative Mode está disponible para dispositivos Windows 10 y Android. Estos son los pasos para descargarlo para cada dispositivo <h3>Para Windows 10</h3>
|
24 |
-
<p>Si tienes un PC con Windows 10, puedes descargar Minecraft Trial Creative Mode desde Microsoft Store. Estos son los pasos para hacerlo:</p>
|
25 |
-
<h4>Paso 1: Ir a la tienda de Microsoft</h4>
|
26 |
-
<p>Abra la aplicación Microsoft Store en su PC. Puede encontrarlo escribiendo "Microsoft Store" en la barra de búsqueda o haciendo clic en el icono de la barra de tareas o en el menú Inicio. </p>
|
27 |
-
<h4>Paso 2: Búsqueda de Minecraft para Windows 10</h4>
|
28 |
-
<p>En la aplicación de Microsoft Store, escriba "Minecraft para Windows 10" en el cuadro de búsqueda y pulse enter. Debería ver el juego en los resultados. Haga clic en él para abrir su página. </p>
|
29 |
-
<p></p>
|
30 |
-
<h4>Paso 3: Haga clic en el botón de prueba gratuita</h4>
|
31 |
-
|
32 |
-
<h4>Paso 4: Instalar y lanzar el juego</h4>
|
33 |
-
<p>Una vez completada la descarga, puede instalar y lanzar el juego haciendo clic en el botón "Jugar". También puedes encontrar el juego en tu biblioteca o en tu escritorio. ¡Disfruta de Minecraft Trial Creative Mode gratis! </p>
|
34 |
-
<h3>Para Android</h3>
|
35 |
-
<p>Si tienes un dispositivo Android, puedes descargar Minecraft Trial Creative Mode desde Google Play Store. Estos son los pasos para hacerlo:</p>
|
36 |
-
<h4>Paso 1: Ir a la tienda de Google Play</h4>
|
37 |
-
<p>Abra la aplicación Google Play Store en su dispositivo. Puedes encontrarlo deslizando el dedo hacia arriba desde la parte inferior de la pantalla o tocando el icono en el cajón de la aplicación. </p>
|
38 |
-
<h4>Paso 2: Búsqueda de prueba de Minecraft</h4>
|
39 |
-
<p>En la aplicación Google Play Store, escriba "Minecraft Trial" en el cuadro de búsqueda y toque en el icono de la lupa. Deberías ver el juego en los resultados. Toca en él para abrir su página. </p>
|
40 |
-
<h4>Paso 3: Toque en el botón de instalación</h4>
|
41 |
-
<p>En la página del juego, debería ver un botón que dice "Instalar". Toque en él para comenzar a descargar el juego. Es posible que necesite aceptar algunos permisos o aceptar algunos términos y condiciones antes de continuar. </p>
|
42 |
-
<h4>Paso 4: Abre y juega el juego</h4>
|
43 |
-
<p>Una vez completada la descarga, puedes abrir y jugar el juego tocando el botón "Abrir". También puede encontrar el juego en su lista de aplicaciones o en la pantalla de inicio. Diviértase jugando Minecraft Trial Creative Mode gratis! </p>
|
44 |
-
<h2>Cómo disfrutar al máximo del modo creativo de prueba de Minecraft</h2>
|
45 |
-
<p>Minecraft Trial Creative Mode es una gran manera de explorar y crear en Minecraft, pero también tiene algunas limitaciones y desafíos. Aquí hay algunos consejos y trucos para ayudarle a disfrutar al máximo:</p>
|
46 |
-
<h3>Consejos y trucos para construir estructuras sorprendentes</h3>
|
47 |
-
<p>El modo creativo te da recursos ilimitados y libertad para construir lo que quieras, pero también requiere algo de planificación y creatividad. Aquí hay algunos consejos y trucos para construir estructuras increíbles:</p>
|
48 |
-
<ul>
|
49 |
-
|
50 |
-
<li>Utilice comandos o menús de inventario para generar cualquier bloque o elemento que desee. También puede usar comandos para llenar áreas grandes con bloques, clonar estructuras existentes o teletransportarse. </li>
|
51 |
-
<li>Utilice redstone, pistones, palancas, botones, placas de presión, observadores, tolvas, dispensadores, cuentagotas y otros bloques y elementos para crear mecanismos y artilugios que se pueden mover, activar o interactuar con otras cosas. </li>
|
52 |
-
<li>Utilice mapas, carteles, pancartas, pinturas, marcos de artículos, armaduras, cabezas, libros y otros bloques y artículos para decorar y etiquetar sus construcciones. También puede usar comandos o menús de inventario para personalizarlos. </li>
|
53 |
-
<li>Utilice bloques de estructura o paquetes de datos para importar o exportar estructuras de otros mundos o fuentes en línea. También puede usarlos para guardar y cargar sus propias estructuras. </li>
|
54 |
-
<li>Utilice la capacidad de vuelo del modo creativo para construir más rápido y más fácil. También puede usar comandos o menús de inventario para cambiar su modo de juego, hora del día, clima, dificultad u otros ajustes. </li>
|
55 |
-
<li>Usa recursos en línea como tutoriales, guías, videos, imágenes, foros, wikis, blogs o sitios web para obtener inspiración e ideas para tus compilaciones. También puede utilizarlos para aprender nuevas técnicas y consejos. </li>
|
56 |
-
</ul>
|
57 |
-
<h3>Cómo cambiar entre modos creativos y de supervivencia</h3>
|
58 |
-
<p>El modo creativo es el modo creativo es el modo predeterminado para Minecraft Trial, pero también puedes cambiar al modo de supervivencia si quieres experimentar el juego de una manera diferente. Estos son los pasos para hacerlo:</p>
|
59 |
-
<ul>
|
60 |
-
<li>Abra el menú de pausa presionando la tecla Esc en su teclado o tocando el icono de pausa en su pantalla. </li>
|
61 |
-
<li>Seleccione la opción "Configuración" en el menú. </li>
|
62 |
-
<li>Seleccione la opción "Juego" en el menú de configuración. </li>
|
63 |
-
<li>Seleccione la opción "Gamemode" en el menú de configuración del juego. </li>
|
64 |
-
<li>Seleccione la opción "Supervivencia" en el menú del modo de juego. </li>
|
65 |
-
<li>Confirme su elección haciendo clic o tocando el botón "Hecho". </li>
|
66 |
-
</ul>
|
67 |
-
|
68 |
-
<h3>Cómo acceder a funciones multijugador y online</h3>
|
69 |
-
<p>Minecraft Trial Creative Mode no admite funciones multijugador o en línea, como jugar con amigos, unirse a servidores o descargar mapas y mods. Sin embargo, todavía se puede disfrutar de algunas de estas características mediante la compra de la versión completa de Minecraft o mediante el uso de otros métodos. Aquí hay algunas formas de acceder a las funciones multijugador y online:</p>
|
70 |
-
<ul>
|
71 |
-
<li>Si tiene un PC con Windows 10, puede jugar con hasta tres amigos en el modo de pantalla dividida. Para ello, es necesario conectar controladores o teclados adicionales a su PC, y luego seleccione la opción "Pantalla dividida" en el menú principal. </li>
|
72 |
-
<li>Si tiene un dispositivo Android, puede usar aplicaciones o herramientas de terceros para unirse a servidores o descargar mapas y mods. Sin embargo, estos métodos no son oficiales o soportados por Mojang, y pueden no funcionar correctamente o con seguridad. Utilícelos bajo su propio riesgo y discreción. </li>
|
73 |
-
<li>Si quieres jugar con amigos, unirte a servidores, descargar mapas y mods, y acceder a otras funciones en línea de una manera segura y oficial, tendrás que comprar la versión completa de Minecraft. Puedes hacerlo en cualquier momento durante o después de tu juicio haciendo clic o tocando el botón "Desbloquear juego completo" en el menú principal o en la configuración del juego. </li>
|
74 |
-
</ul>
|
75 |
-
<h2>Conclusión</h2>
|
76 |
-
<p>Minecraft Trial Creative Mode es una versión gratuita y por tiempo limitado de Minecraft que te permite experimentar el juego en modo creativo, donde tienes recursos ilimitados y puedes construir lo que quieras. Es una gran manera de probar el juego de forma gratuita antes de comprar la versión completa, pero también tiene algunas limitaciones y desafíos. En este artículo, te mostramos cómo descargar Minecraft Trial Creative Mode gratis en diferentes dispositivos y cómo disfrutarlo al máximo. Esperamos que hayas encontrado este artículo útil e informativo, y que te diviertas jugando Minecraft Trial Creative Mode! </p>
|
77 |
-
<h2>Preguntas frecuentes</h2>
|
78 |
-
<p>Aquí hay algunas preguntas frecuentes sobre Minecraft Trial Creative Mode:</p>
|
79 |
-
|
80 |
-
<li>Q: ¿Cuánto tiempo puedo jugar Minecraft Trial Creative Mode gratis? <br>A: Puedes jugar Minecraft Trial Creative Mode gratis durante 90 minutos por mundo. Después de eso, todavía puedes ver tu mundo, pero no puedes interactuar con él o hacer ningún cambio. </li>
|
81 |
-
<li>Q: ¿Puedo guardar o cargar mis mundos en el modo creativo de prueba de Minecraft? <br>A: No, no puede guardar o cargar sus mundos en el modo creativo de prueba de Minecraft. Si sale del juego o cambia de dispositivo, perderá su progreso. </li>
|
82 |
-
<li>Q: ¿Puedo jugar con amigos o unirme a servidores en Minecraft Trial Creative Mode? <br>A: No, no puede jugar con amigos o unirse a servidores en el modo creativo de prueba de Minecraft. Solo puede jugar en solitario o con pantalla dividida en Windows 10. </li>
|
83 |
-
<li>Q: ¿Puedo personalizar mi personaje o cambiar mi piel en el modo creativo de prueba de Minecraft? <br>A: No, no puedes personalizar tu personaje o cambiar tu piel en el modo creativo de prueba de Minecraft. Estás atascado con la piel predeterminada de Steve o Alex. </li>
|
84 |
-
<li>Q: ¿Puedo acceder a todas las características y contenido de la versión completa en Minecraft Trial Creative Mode? <br>A: No, no se puede acceder a todas las características y contenido de la versión completa en Minecraft Trial Creative Mode. Por ejemplo, no puede usar bloques de comandos, bloques de estructura, paquetes de datos, mods, mapas, skins, multijugador, servidores en línea y más. </li>
|
85 |
-
</ol></p> 64aa2da5cf<br />
|
86 |
-
<br />
|
87 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Base De Datos Oracle 11g.md
DELETED
@@ -1,94 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Cómo descargar la base de datos de Oracle 11g</h1>
|
3 |
-
<p>Si está buscando un sistema de gestión de bases de datos relacionales confiable, seguro y escalable, es posible que desee considerar Oracle Database 11g. Este artículo le guiará a través del proceso de descarga, instalación y actualización a Oracle Database 11g, así como explicar algunas de sus características y beneficios. </p>
|
4 |
-
<h2>descargar base de datos Oracle 11g</h2><br /><p><b><b>Download Zip</b> ::: <a href="https://bltlly.com/2v6Lwc">https://bltlly.com/2v6Lwc</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es Oracle Database 11g? </h2>
|
6 |
-
<p>Oracle Database 11g es una versión de Oracle Database que fue lanzada en 2007 y ha sido ampliamente utilizada por muchas organizaciones y desarrolladores. Es una plataforma de base de datos completa e integrada que admite varios tipos de datos, idiomas y aplicaciones. También ofrece muchas características que permiten la adaptabilidad, automatización y seguridad. </p>
|
7 |
-
<h3>Características y beneficios de Oracle Database 11g</h3>
|
8 |
-
<p>Algunas de las características y beneficios de Oracle Database 11g son:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Admite actualizaciones de aplicaciones en línea, lo que significa que puede aplicar parches y cambios a sus aplicaciones sin tiempo de inactividad o interrupción. </li>
|
11 |
-
<li> Tiene una capacidad de autogestión, lo que significa que puede monitorear, sintonizar y optimizar automáticamente su rendimiento y uso de recursos. </li>
|
12 |
-
<li> Tiene una función de alta disponibilidad, lo que significa que puede recuperarse de fallas y desastres de forma rápida y sin problemas. </li>
|
13 |
-
<li> Tiene una función de almacenamiento de datos, lo que significa que puede almacenar, analizar y visualizar grandes volúmenes de datos de manera eficiente y efectiva. </li>
|
14 |
-
<li> Tiene una función de seguridad, lo que significa que puede proteger sus datos de acceso no autorizado, cifrado, auditoría y cumplimiento. </li>
|
15 |
-
</ul>
|
16 |
-
<h3>Requisitos y compatibilidad de Oracle Database 11g</h3>
|
17 |
-
<p>Antes de descargar Oracle Database 11g, debe asegurarse de que su sistema cumple con los requisitos mínimos y es compatible con el software. Algunos de los requisitos y factores de compatibilidad son:</p>
|
18 |
-
<p></p>
|
19 |
-
<ul>
|
20 |
-
<li> Necesita tener al menos 1 GB de RAM y 5 GB de espacio en disco para la instalación del software. </li>
|
21 |
-
|
22 |
-
<li>Necesita tener un navegador web compatible, como Internet Explorer, Firefox, Chrome o Safari.</li>
|
23 |
-
<li>Necesita tener una herramienta de desarrollo compatible, como SQL Developer, Application Express, Java, PHP o .NET.</li>
|
24 |
-
</ul>
|
25 |
-
<h2>Cómo descargar el software Oracle Database 11g</h2>
|
26 |
-
<p>Una vez que haya comprobado los requisitos y la compatibilidad de su sistema, puede proceder a descargar el software Oracle Database 11g desde el sitio web oficial. Estos son los pasos que debes seguir:</p>
|
27 |
-
<h3>Paso 1: Elija la versión y plataforma correctas para sus necesidades</h3>
|
28 |
-
<p>El primer paso es elegir la versión y plataforma adecuada para sus necesidades. Existen dos versiones principales de Oracle Database 11g: Enterprise Edition y Express Edition. Enterprise Edition es la versión con todas las funciones y opciones disponibles. Express Edition es la versión gratuita de nivel de entrada que tiene una huella pequeña y características limitadas. Puede comparar las características de ambas versiones <a href="( 2 )">here</a>. </p>
|
29 |
-
<p>El siguiente paso es elegir la plataforma adecuada para su sistema operativo. Hay diferentes descargas para diferentes plataformas, como Windows x64 (64 bits), Linux x86-64 (64 bits), Solaris ( <h3>Paso 2: Registrarse para una cuenta gratuita de Oracle</h3>
|
30 |
-
<p>El segundo paso es registrarse para una cuenta gratuita de Oracle si no tiene una ya. Necesita una cuenta de Oracle para descargar el software y acceder a otros recursos y servicios. Para registrarse en una cuenta de Oracle, debe proporcionar información básica, como su nombre, dirección de correo electrónico, país y contraseña. Puede registrarse para una cuenta de Oracle <a href="">here</a>. </p>
|
31 |
-
<h3>Paso 3: Acepte el acuerdo de licencia y descargue el software</h3>
|
32 |
-
|
33 |
-
<h3>Paso 4: Verificar la integridad de los archivos descargados</h3>
|
34 |
-
<p>El cuarto paso es verificar la integridad de los archivos descargados. Debe asegurarse de que los archivos que descargó no estén dañados o manipulados. Puede hacer esto comparando la suma de verificación de cada archivo con la suma de verificación proporcionada en la página de descarga. Una suma de comprobación es un código único que identifica un archivo y su contenido. Puede utilizar una herramienta como MD5 o SHA-1 para generar y comparar sumas de comprobación. Puede encontrar más información sobre cómo verificar checksums <a href="">here</a>. </p>
|
35 |
-
<h2>Cómo instalar el software Oracle Database 11g</h2>
|
36 |
-
<p>Después de haber descargado y verificado los archivos, puede proceder a instalar el software Oracle Database 11g en su sistema. Estos son los pasos que debes seguir:</p>
|
37 |
-
<h3>Paso 1: Extraer los archivos descargados y ejecutar el programa de configuración</h3>
|
38 |
-
<p>El primer paso es extraer los archivos descargados y ejecutar el programa de configuración. Debe descomprimir o descomprimir los archivos en una carpeta de su sistema. Dependiendo de su plataforma, puede tener uno o más archivos para extraer. Después de extraer los archivos, debe ejecutar el programa de configuración que inicia el proceso de instalación. El programa de configuración puede tener diferentes nombres dependiendo de la plataforma, como setup.exe, runInstaller o install.sh. </p>
|
39 |
-
<h3>Paso 2: Siga el asistente de instalación y configure las opciones de la base de datos</h3>
|
40 |
-
<p>El segundo paso es seguir el asistente de instalación y configurar las opciones de la base de datos. El asistente de instalación lo guiará a través de una serie de pasos donde puede elegir y personalizar varios aspectos de la instalación de su base de datos, como:</p>
|
41 |
-
<ul>
|
42 |
-
<li>El tipo de instalación: Típica, Avanzada o Personalizada.</li>
|
43 |
-
<li>La carpeta de destino: La ubicación donde desea instalar el software. </li>
|
44 |
-
<li>El nombre de la base de datos global: El nombre de la instancia de la base de datos. </li>
|
45 |
-
<li>La contraseña administrativa: La contraseña para su cuenta de administrador de base de datos. </li>
|
46 |
-
|
47 |
-
<li>La opción de memoria: Gestión de memoria automática o manual. </li>
|
48 |
-
<li>La opción de seguridad: Activar o desactivar las actualizaciones de seguridad. </li>
|
49 |
-
</ul>
|
50 |
-
<p>Puede encontrar más información sobre cómo instalar el software Oracle Database 11g <a href="">aquí</a>. </p>
|
51 |
-
<h3>Paso 3: Pruebe la conexión de la base de datos y comience a usar Oracle Database 11g</h3>
|
52 |
-
<p>El tercer paso es probar la conexión de la base de datos y comenzar a usar Oracle Database 11g. Después de completar el asistente de instalación, verá un resumen de los detalles de su instalación y un mensaje de confirmación de que su base de datos está lista para usar. Puede probar la conexión de su base de datos utilizando una herramienta como SQL*Plus o SQL Developer. También puede acceder a su base de datos desde su navegador web mediante una herramienta como Enterprise Manager o Application Express. Puede encontrar más información sobre cómo usar Oracle Database 11g <a href="">aquí</a>. </p>
|
53 |
-
<h2>Cómo actualizar a Oracle Database 11g desde versiones anteriores</h2>
|
54 |
-
<p>Si ya tiene una versión anterior de Oracle Database instalada en su sistema, es posible que desee actualizar a Oracle Database 11g para aprovechar sus nuevas características y mejoras. Aquí hay algunos consejos sobre cómo actualizar a Oracle Database 11g de versiones anteriores:</p>
|
55 |
-
<h3>Métodos y consideraciones de actualización</h3>
|
56 |
-
<p>Existen diferentes métodos y consideraciones para actualizar a Oracle Database 11g dependiendo de su versión actual, plataforma y entorno. Algunos de los métodos comunes son:</p>
|
57 |
-
<ul>
|
58 |
-
<li>Database Upgrade Assistant (DBUA): Una herramienta gráfica de interfaz de usuario que automatiza y simplifica el proceso de actualización <li>Actualización manual: Un procedimiento paso a paso que requiere más intervención y personalización del usuario</li>
|
59 |
-
<li>Exportar/importar: un método que implica exportar datos de la base de datos de origen e importarlos en la base de datos de destino</li>
|
60 |
-
<li>Data Pump: método que utiliza una utilidad para transferir datos y metadatos entre bases de datos</li>
|
61 |
-
</ul>
|
62 |
-
<p>Algunas de las consideraciones son:</p>
|
63 |
-
<ul>
|
64 |
-
|
65 |
-
<li>El tiempo de inactividad y la disponibilidad de su base de datos durante el proceso de actualización</li>
|
66 |
-
<li> La estrategia de copia de seguridad y recuperación de su base de datos antes y después de la actualización</li>
|
67 |
-
<li>Las pruebas y la validación de la funcionalidad y el rendimiento de la base de datos después de la actualización</li>
|
68 |
-
</ul>
|
69 |
-
<h3>Pasos de actualización y mejores prácticas</h3>
|
70 |
-
<p>Hay algunos pasos generales y las mejores prácticas que debe seguir al actualizar a Oracle Database 11g de versiones anteriores. Algunos de ellos son:</p>
|
71 |
-
<ol>
|
72 |
-
<li>Analiza tu base de datos actual e identifica los requisitos y objetivos de actualización</li>
|
73 |
-
<li>Elija el método de actualización adecuado y planifique el proceso de actualización</li>
|
74 |
-
<li>Prepare su sistema y entorno para la actualización, como instalar el software, comprobar los requisitos previos y crear copias de seguridad</li>
|
75 |
-
<li>Realice la actualización según el método elegido y supervise el progreso y el estado</li>
|
76 |
-
<li>Verificar los resultados de la actualización y resolver cualquier problema o error</li>
|
77 |
-
<li>Ajuste y optimice su base de datos después de la actualización, como aplicar parches, configurar parámetros y recopilar estadísticas</li>
|
78 |
-
</ol>
|
79 |
-
<h2>Conclusión</h2>
|
80 |
-
<p>En este artículo, hemos aprendido cómo descargar, instalar y actualizar a Oracle Database 11g. También hemos discutido algunas de sus características, beneficios, requisitos y compatibilidad. Oracle Database 11g es una plataforma de base de datos potente y versátil que puede ayudarle a gestionar sus datos de forma eficaz y eficiente. Si desea obtener más información sobre Oracle Database 11g, puede visitar el sitio web oficial <a href="">here</a>. </p>
|
81 |
-
<h2>Preguntas frecuentes (preguntas frecuentes)</h2>
|
82 |
-
<p>Estas son algunas de las preguntas más frecuentes (FAQs) sobre Oracle Database 11g:</p>
|
83 |
-
<h3>Q: ¿Cómo puedo obtener una licencia para Oracle Database 11g? </h3>
|
84 |
-
|
85 |
-
<h3>Q: ¿Cómo puedo actualizar o parchear Oracle Database 11g? </h3>
|
86 |
-
<p>A: Puede actualizar o parchear Oracle Database 11g utilizando una herramienta como Oracle Universal Installer (OUI) o OPatch. También puede utilizar un servicio como My Oracle Support (MOS) o Oracle Enterprise Manager (OEM) para descargar y aplicar actualizaciones o parches. Puede encontrar más información sobre cómo actualizar o parchear Oracle Database 11g <a href="">aquí</a>. </p>
|
87 |
-
<h3>Q: ¿Cómo puedo desinstalar o eliminar Oracle Database 11g? </h3>
|
88 |
-
<p>A: Puede desinstalar o quitar Oracle Database 11g utilizando una herramienta como Oracle Universal Installer (OUI) o deinstall. También puede eliminar manualmente los archivos y carpetas relacionados con Oracle Database 11g de su sistema. Puede encontrar más información sobre cómo desinstalar o eliminar Oracle Database 11g <a href="">here</a>. </p>
|
89 |
-
<h3>P: ¿Cómo puedo conectarme a Oracle Database 11g desde otras aplicaciones? </h3>
|
90 |
-
<p>A: Puede conectarse a Oracle Database 11g desde otras aplicaciones utilizando un controlador o conector que admita el lenguaje de la aplicación o el framework. Por ejemplo, puede usar JDBC para Java, ODBC para C/C++, OCI para C/C++, PHP OCI8 para PHP, cx_Oracle para Python, ruby-oci8 para Ruby, etc. Puede encontrar más información sobre cómo conectarse a Oracle Database 11g desde otras aplicaciones <a href=">aquí/a>. </p>
|
91 |
-
<h3>Q: ¿Cómo puedo aprender más sobre Oracle Database 11g? </h3>
|
92 |
-
<p>A: Puede obtener más información sobre Oracle Database 11g leyendo la documentación, tutoriales, blogs, foros, libros, cursos, videos, podcasts, seminarios web, etc. que están disponibles en línea. También puede unirse a la comunidad de Oracle e interactuar con otros usuarios y expertos. Puede encontrar más recursos sobre cómo obtener más información sobre Oracle Database 11g <a href="">here</a>. </p> 64aa2da5cf<br />
|
93 |
-
<br />
|
94 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BetterAPI/BetterChat_new/src/lib/types/Timestamps.ts
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
export interface Timestamps {
|
2 |
-
createdAt: Date;
|
3 |
-
updatedAt: Date;
|
4 |
-
}
|
|
|
|
|
|
|
|
|
|
spaces/CALM/Dashboard/streamlit_observable/frontend/build/static/js/main.fc603b94.chunk.js
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
/*! For license information please see main.fc603b94.chunk.js.LICENSE.txt */
|
2 |
-
(this.webpackJsonpstreamlit_component_template=this.webpackJsonpstreamlit_component_template||[]).push([[0],{18:function(e,_,t){"use strict";t.d(_,"b",(function(){return h})),t.d(_,"c",(function(){return E})),t.d(_,"a",(function(){return p}));var r,a=t(0),n=t(4),o=t(2),s=t(3),i=t(36),l=t.n(i),u=t(12),d=t.n(u),c=t(10),m=t(37),b=t(24),g=function(){function e(_,t,r,n){var o=this;Object(a.a)(this,e),this.dataTable=void 0,this.indexTable=void 0,this.columnsTable=void 0,this.styler=void 0,this.getCell=function(e,_){var t=e<o.headerRows&&_<o.headerColumns,r=e>=o.headerRows&&_<o.headerColumns,a=e<o.headerRows&&_>=o.headerColumns;if(t){var n=["blank"];return _>0&&n.push("level"+e),{type:"blank",classNames:n.join(" "),content:""}}if(a){var s=_-o.headerColumns;return{type:"columns",classNames:["col_heading","level"+e,"col"+s].join(" "),content:o.getContent(o.columnsTable,s,e)}}if(r){var i=e-o.headerRows,l=["row_heading","level"+_,"row"+i];return{type:"index",id:"T_".concat(o.uuid,"level").concat(_,"_row").concat(i),classNames:l.join(" "),content:o.getContent(o.indexTable,i,_)}}var u=e-o.headerRows,d=_-o.headerColumns,c=["data","row"+u,"col"+d],m=o.styler?o.getContent(o.styler.displayValuesTable,u,d):o.getContent(o.dataTable,u,d);return{type:"data",id:"T_".concat(o.uuid,"row").concat(u,"_col").concat(d),classNames:c.join(" "),content:m}},this.getContent=function(e,_,t){var r=e.getColumnAt(t);if(null===r)return"";switch(o.getColumnTypeId(e,t)){case b.b.Timestamp:return o.nanosToDate(r.get(_));default:return r.get(_)}},this.dataTable=b.a.from(_),this.indexTable=b.a.from(t),this.columnsTable=b.a.from(r),this.styler=n?{caption:n.get("caption"),displayValuesTable:b.a.from(n.get("displayValues")),styles:n.get("styles"),uuid:n.get("uuid")}:void 0}return Object(n.a)(e,[{key:"rows",get:function(){return this.indexTable.length+this.columnsTable.numCols}},{key:"columns",get:function(){return this.indexTable.numCols+this.columnsTable.length}},{key:"headerRows",get:function(){return this.rows-this.dataRows}},{key:"headerColumns",get:function(){return this.columns-this.dataColumns}},{key:"dataRows",get:function(){return this.dataTable.length}},{key:"dataColumns",get:function(){return this.dataTable.numCols}},{key:"uuid",get:function(){return this.styler&&this.styler.uuid}},{key:"caption",get:function(){return this.styler&&this.styler.caption}},{key:"styles",get:function(){return this.styler&&this.styler.styles}},{key:"table",get:function(){return this.dataTable}},{key:"index",get:function(){return this.indexTable}},{key:"columnTable",get:function(){return this.columnsTable}},{key:"getColumnTypeId",value:function(e,_){return e.schema.fields[_].type.typeId}},{key:"nanosToDate",value:function(e){return new Date(e/1e6)}}]),e}();!function(e){e.COMPONENT_READY="streamlit:componentReady",e.SET_COMPONENT_VALUE="streamlit:setComponentValue",e.SET_FRAME_HEIGHT="streamlit:setFrameHeight"}(r||(r={}));var p=function e(){Object(a.a)(this,e)};p.API_VERSION=1,p.RENDER_EVENT="streamlit:render",p.events=new m.a,p.registeredMessageListener=!1,p.lastFrameHeight=void 0,p.setComponentReady=function(){p.registeredMessageListener||(window.addEventListener("message",p.onMessageEvent),p.registeredMessageListener=!0),p.sendBackMsg(r.COMPONENT_READY,{apiVersion:p.API_VERSION})},p.setFrameHeight=function(e){void 0===e&&(e=document.body.scrollHeight+10),e!==p.lastFrameHeight&&(p.lastFrameHeight=e,p.sendBackMsg(r.SET_FRAME_HEIGHT,{height:e}))},p.setComponentValue=function(e){p.sendBackMsg(r.SET_COMPONENT_VALUE,{value:e})},p.onMessageEvent=function(e){switch(e.data.type){case p.RENDER_EVENT:p.onRenderMessage(e.data)}},p.onRenderMessage=function(e){var _=e.args;null==_&&(console.error("Got null args in onRenderMessage. This should never happen"),_={});var t=e.dfs&&e.dfs.length>0?p.argsDataframeToObject(e.dfs):{};_=Object(c.a)(Object(c.a)({},_),t);var r={disabled:Boolean(e.disabled),args:_},a=new CustomEvent(p.RENDER_EVENT,{detail:r});p.events.dispatchEvent(a)},p.argsDataframeToObject=function(e){var _=e.map((function(e){var _=e.key,t=e.value;return[_,p.toArrowTable(t)]}));return Object.fromEntries(_)},p.toArrowTable=function(e){var _=e.data,t=_.data,r=_.index,a=_.columns;return new g(t,r,a)},p.sendBackMsg=function(e,_){window.parent.postMessage(Object(c.a)({isStreamlitMessage:!0,type:e},_),"*")};var h=function(e){Object(o.a)(t,e);var _=Object(s.a)(t);function t(){return Object(a.a)(this,t),_.apply(this,arguments)}return Object(n.a)(t,[{key:"componentDidMount",value:function(){p.setFrameHeight()}},{key:"componentDidUpdate",value:function(){p.setFrameHeight()}}]),t}(d.a.PureComponent);function E(e){var _=function(_){Object(o.a)(r,_);var t=Object(s.a)(r);function r(_){var n;return Object(a.a)(this,r),(n=t.call(this,_)).componentDidMount=function(){p.events.addEventListener(p.RENDER_EVENT,n.onRenderEvent),p.setComponentReady()},n.componentDidUpdate=function(e){null!=n.state.componentError&&p.setFrameHeight()},n.componentWillUnmount=function(){p.events.removeEventListener(p.RENDER_EVENT,n.onRenderEvent)},n.onRenderEvent=function(e){var _=e;n.setState({renderData:_.detail})},n.render=function(){return null!=n.state.componentError?d.a.createElement("div",null,d.a.createElement("h1",null,"Component Error"),d.a.createElement("span",null,n.state.componentError.message)):null==n.state.renderData?null:d.a.createElement(e,{width:window.innerWidth,disabled:n.state.renderData.disabled,args:n.state.renderData.args})},n.state={renderData:void 0,componentError:void 0},n}return r}(d.a.PureComponent);return _.getDerivedStateFromError=function(e){return{componentError:e}},l()(_,e)}},35:function(module,__webpack_exports__,__webpack_require__){"use strict";var _mnt_storage_Documents_hugging_face_colaborative_hub_training_demo_neurips_training_transformers_together_dashboard_streamlit_observable_frontend_node_modules_babel_preset_react_app_node_modules_babel_runtime_helpers_esm_slicedToArray__WEBPACK_IMPORTED_MODULE_0__=__webpack_require__(7),_mnt_storage_Documents_hugging_face_colaborative_hub_training_demo_neurips_training_transformers_together_dashboard_streamlit_observable_frontend_node_modules_babel_preset_react_app_node_modules_babel_runtime_helpers_esm_createForOfIteratorHelper__WEBPACK_IMPORTED_MODULE_1__=__webpack_require__(8),_mnt_storage_Documents_hugging_face_colaborative_hub_training_demo_neurips_training_transformers_together_dashboard_streamlit_observable_frontend_node_modules_babel_preset_react_app_node_modules_babel_runtime_regenerator__WEBPACK_IMPORTED_MODULE_2__=__webpack_require__(1),_mnt_storage_Documents_hugging_face_colaborative_hub_training_demo_neurips_training_transformers_together_dashboard_streamlit_observable_frontend_node_modules_babel_preset_react_app_node_modules_babel_runtime_regenerator__WEBPACK_IMPORTED_MODULE_2___default=__webpack_require__.n(_mnt_storage_Documents_hugging_face_colaborative_hub_training_demo_neurips_training_transformers_together_dashboard_streamlit_observable_frontend_node_modules_babel_preset_react_app_node_modules_babel_runtime_regenerator__WEBPACK_IMPORTED_MODULE_2__),_mnt_storage_Documents_hugging_face_colaborative_hub_training_demo_neurips_training_transformers_together_dashboard_streamlit_observable_frontend_node_modules_babel_preset_react_app_node_modules_babel_runtime_helpers_esm_asyncToGenerator__WEBPACK_IMPORTED_MODULE_3__=__webpack_require__(5),_mnt_storage_Documents_hugging_face_colaborative_hub_training_demo_neurips_training_transformers_together_dashboard_streamlit_observable_frontend_node_modules_babel_preset_react_app_node_modules_babel_runtime_helpers_esm_classCallCheck__WEBPACK_IMPORTED_MODULE_4__=__webpack_require__(0),_mnt_storage_Documents_hugging_face_colaborative_hub_training_demo_neurips_training_transformers_together_dashboard_streamlit_observable_frontend_node_modules_babel_preset_react_app_node_modules_babel_runtime_helpers_esm_createClass__WEBPACK_IMPORTED_MODULE_5__=__webpack_require__(4),_mnt_storage_Documents_hugging_face_colaborative_hub_training_demo_neurips_training_transformers_together_dashboard_streamlit_observable_frontend_node_modules_babel_preset_react_app_node_modules_babel_runtime_helpers_esm_inherits__WEBPACK_IMPORTED_MODULE_6__=__webpack_require__(2),_mnt_storage_Documents_hugging_face_colaborative_hub_training_demo_neurips_training_transformers_together_dashboard_streamlit_observable_frontend_node_modules_babel_preset_react_app_node_modules_babel_runtime_helpers_esm_createSuper__WEBPACK_IMPORTED_MODULE_7__=__webpack_require__(3),react__WEBPACK_IMPORTED_MODULE_8__=__webpack_require__(12),react__WEBPACK_IMPORTED_MODULE_8___default=__webpack_require__.n(react__WEBPACK_IMPORTED_MODULE_8__),_streamlit__WEBPACK_IMPORTED_MODULE_9__=__webpack_require__(18),_observablehq_runtime__WEBPACK_IMPORTED_MODULE_10__=__webpack_require__(30),Observable=function(_StreamlitComponentBa){Object(_mnt_storage_Documents_hugging_face_colaborative_hub_training_demo_neurips_training_transformers_together_dashboard_streamlit_observable_frontend_node_modules_babel_preset_react_app_node_modules_babel_runtime_helpers_esm_inherits__WEBPACK_IMPORTED_MODULE_6__.a)(Observable,_StreamlitComponentBa);var _super=Object(_mnt_storage_Documents_hugging_face_colaborative_hub_training_demo_neurips_training_transformers_together_dashboard_streamlit_observable_frontend_node_modules_babel_preset_react_app_node_modules_babel_runtime_helpers_esm_createSuper__WEBPACK_IMPORTED_MODULE_7__.a)(Observable);function Observable(){var e;Object(_mnt_storage_Documents_hugging_face_colaborative_hub_training_demo_neurips_training_transformers_together_dashboard_streamlit_observable_frontend_node_modules_babel_preset_react_app_node_modules_babel_runtime_helpers_esm_classCallCheck__WEBPACK_IMPORTED_MODULE_4__.a)(this,Observable);for(var _=arguments.length,t=new Array(_),r=0;r<_;r++)t[r]=arguments[r];return(e=_super.call.apply(_super,[this].concat(t))).observeValue={},e.notebookRef=react__WEBPACK_IMPORTED_MODULE_8___default.a.createRef(),e.runtime=null,e.main=null,e.render=function(){return console.log("this.props.args.render_empty: ",e.props.args.render_empty),e.props.args.render_empty?react__WEBPACK_IMPORTED_MODULE_8___default.a.createElement("div",null,react__WEBPACK_IMPORTED_MODULE_8___default.a.createElement("div",{style:{padding:"9px 12px"}},react__WEBPACK_IMPORTED_MODULE_8___default.a.createElement("div",{ref:e.notebookRef})),react__WEBPACK_IMPORTED_MODULE_8___default.a.createElement("div",{style:{marginTop:"4px"}},react__WEBPACK_IMPORTED_MODULE_8___default.a.createElement("div",null,react__WEBPACK_IMPORTED_MODULE_8___default.a.createElement("div",{style:{textAlign:"left"}},e.props.args.name),react__WEBPACK_IMPORTED_MODULE_8___default.a.createElement("div",{style:{textAlign:"right"}},react__WEBPACK_IMPORTED_MODULE_8___default.a.createElement("a",{href:"https://observablehq.com/".concat(e.props.args.notebook),style:{color:"#666"}}))))):react__WEBPACK_IMPORTED_MODULE_8___default.a.createElement("div",{style:{border:"1px solid gray",borderRadius:"4px"}},react__WEBPACK_IMPORTED_MODULE_8___default.a.createElement("div",{style:{padding:"9px 12px"}},react__WEBPACK_IMPORTED_MODULE_8___default.a.createElement("div",{ref:e.notebookRef})),react__WEBPACK_IMPORTED_MODULE_8___default.a.createElement("div",{style:{marginTop:"4px"}},react__WEBPACK_IMPORTED_MODULE_8___default.a.createElement("div",{style:{backgroundColor:"#ddd",fontWeight:700,padding:".25rem .5rem",borderRadius:"0 0 4px 4px",gridTemplateColumns:"auto auto",display:"grid"}},react__WEBPACK_IMPORTED_MODULE_8___default.a.createElement("div",{style:{textAlign:"left"}},e.props.args.name),react__WEBPACK_IMPORTED_MODULE_8___default.a.createElement("div",{style:{textAlign:"right"}},react__WEBPACK_IMPORTED_MODULE_8___default.a.createElement("a",{href:"https://observablehq.com/".concat(e.props.args.notebook),style:{color:"#666"}})))))},e}return Object(_mnt_storage_Documents_hugging_face_colaborative_hub_training_demo_neurips_training_transformers_together_dashboard_streamlit_observable_frontend_node_modules_babel_preset_react_app_node_modules_babel_runtime_helpers_esm_createClass__WEBPACK_IMPORTED_MODULE_5__.a)(Observable,[{key:"componentWillUnmount",value:function(){var e;null===(e=this.runtime)||void 0===e||e.dispose()}},{key:"componentDidUpdate",value:function(e){e.args.notebook,this.props.args.notebook,console.log("this.props.args.redefine: ",this.props.args.redefine),null!==this.main&&this.redefineCells(this.main,this.props.args.redefine)}},{key:"embedNotebook",value:function(){var _embedNotebook=Object(_mnt_storage_Documents_hugging_face_colaborative_hub_training_demo_neurips_training_transformers_together_dashboard_streamlit_observable_frontend_node_modules_babel_preset_react_app_node_modules_babel_runtime_helpers_esm_asyncToGenerator__WEBPACK_IMPORTED_MODULE_3__.a)(_mnt_storage_Documents_hugging_face_colaborative_hub_training_demo_neurips_training_transformers_together_dashboard_streamlit_observable_frontend_node_modules_babel_preset_react_app_node_modules_babel_runtime_regenerator__WEBPACK_IMPORTED_MODULE_2___default.a.mark((function _callee2(notebook,targets,observe,hide){var _this2=this,targetSet,observeSet,hideSet,_yield$eval,define;return _mnt_storage_Documents_hugging_face_colaborative_hub_training_demo_neurips_training_transformers_together_dashboard_streamlit_observable_frontend_node_modules_babel_preset_react_app_node_modules_babel_runtime_regenerator__WEBPACK_IMPORTED_MODULE_2___default.a.wrap((function _callee2$(_context2){for(;;)switch(_context2.prev=_context2.next){case 0:return this.runtime&&this.runtime.dispose(),console.log("Console says hi!"),targetSet=new Set(targets),observeSet=new Set(observe),hideSet=new Set(hide),this.runtime=new _observablehq_runtime__WEBPACK_IMPORTED_MODULE_10__.b,_context2.next=8,eval('import("https://api.observablehq.com/'.concat(notebook,'.js?v=3")'));case 8:_yield$eval=_context2.sent,define=_yield$eval.default,this.main=this.runtime.module(define,(function(e){var _;if(console.log("name: ",e),console.log("observeSet.has(name: ",observeSet.has(e)),console.log("targetSet.has(name): ",targetSet.has(e)),observeSet.has(e)&&!targetSet.has(e)){var t=_this2.observeValue;return console.log("observeValue: ",t),{fulfilled:function(_){t[e]=_,_streamlit__WEBPACK_IMPORTED_MODULE_9__.a.setComponentValue(t)}}}if(!(targetSet.size>0)||targetSet.has(e)){if(hideSet.has(e))return!0;var r=document.createElement("div");null===(_=_this2.notebookRef.current)||void 0===_||_.appendChild(r);var a=new _observablehq_runtime__WEBPACK_IMPORTED_MODULE_10__.a(r);return r.addEventListener("input",(function(e){_streamlit__WEBPACK_IMPORTED_MODULE_9__.a.setFrameHeight()})),{pending:function(){a.pending(),_streamlit__WEBPACK_IMPORTED_MODULE_9__.a.setFrameHeight()},fulfilled:function(e){a.fulfilled(e),_streamlit__WEBPACK_IMPORTED_MODULE_9__.a.setFrameHeight()},rejected:function(e){a.rejected(e),_streamlit__WEBPACK_IMPORTED_MODULE_9__.a.setFrameHeight()}}}})),observeSet.size>0&&Promise.all(Array.from(observeSet).map(function(){var e=Object(_mnt_storage_Documents_hugging_face_colaborative_hub_training_demo_neurips_training_transformers_together_dashboard_streamlit_observable_frontend_node_modules_babel_preset_react_app_node_modules_babel_runtime_helpers_esm_asyncToGenerator__WEBPACK_IMPORTED_MODULE_3__.a)(_mnt_storage_Documents_hugging_face_colaborative_hub_training_demo_neurips_training_transformers_together_dashboard_streamlit_observable_frontend_node_modules_babel_preset_react_app_node_modules_babel_runtime_regenerator__WEBPACK_IMPORTED_MODULE_2___default.a.mark((function e(_){return _mnt_storage_Documents_hugging_face_colaborative_hub_training_demo_neurips_training_transformers_together_dashboard_streamlit_observable_frontend_node_modules_babel_preset_react_app_node_modules_babel_runtime_regenerator__WEBPACK_IMPORTED_MODULE_2___default.a.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return e.t0=_,e.next=3,_this2.main.value(_);case 3:return e.t1=e.sent,e.abrupt("return",[e.t0,e.t1]);case 5:case"end":return e.stop()}}),e)})));return function(_){return e.apply(this,arguments)}}())).then((function(e){var _,t=Object(_mnt_storage_Documents_hugging_face_colaborative_hub_training_demo_neurips_training_transformers_together_dashboard_streamlit_observable_frontend_node_modules_babel_preset_react_app_node_modules_babel_runtime_helpers_esm_createForOfIteratorHelper__WEBPACK_IMPORTED_MODULE_1__.a)(e);try{for(t.s();!(_=t.n()).done;){var r=Object(_mnt_storage_Documents_hugging_face_colaborative_hub_training_demo_neurips_training_transformers_together_dashboard_streamlit_observable_frontend_node_modules_babel_preset_react_app_node_modules_babel_runtime_helpers_esm_slicedToArray__WEBPACK_IMPORTED_MODULE_0__.a)(_.value,2),a=r[0],n=r[1];_this2.observeValue[a]=n}}catch(o){t.e(o)}finally{t.f()}_streamlit__WEBPACK_IMPORTED_MODULE_9__.a.setComponentValue(_this2.observeValue)}));case 12:case"end":return _context2.stop()}}),_callee2,this)})));function embedNotebook(e,_,t,r){return _embedNotebook.apply(this,arguments)}return embedNotebook}()},{key:"redefineCells",value:function(e){var _=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};for(var t in console.log("Console says hi 2 !"),_)e.redefine(t,_[t])}},{key:"componentDidMount",value:function(){var e=this,_=this.props.args,t=_.notebook,r=_.targets,a=void 0===r?[]:r,n=_.observe,o=void 0===n?[]:n,s=_.redefine,i=void 0===s?{}:s,l=_.hide,u=void 0===l?[]:l;_streamlit__WEBPACK_IMPORTED_MODULE_9__.a.setComponentValue(this.observeValue),this.embedNotebook(t,a,o,u).then((function(){e.redefineCells(e.main,i)}))}}]),Observable}(_streamlit__WEBPACK_IMPORTED_MODULE_9__.b);__webpack_exports__.a=Object(_streamlit__WEBPACK_IMPORTED_MODULE_9__.c)(Observable)},40:function(e,_,t){e.exports=t(41)},41:function(e,_,t){"use strict";t.r(_);var r=t(12),a=t.n(r),n=t(34),o=t.n(n),s=t(35);o.a.render(a.a.createElement(a.a.StrictMode,null,a.a.createElement(s.a,null)),document.getElementById("root"))}},[[40,1,2]]]);
|
3 |
-
//# sourceMappingURL=main.fc603b94.chunk.js.map
|
|
|
|
|
|
|
|
spaces/CNXT/GPTx/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: GPTx
|
3 |
-
emoji: 🔥
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: red
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.24.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/test_model_analysis.py
DELETED
@@ -1,58 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
2 |
-
|
3 |
-
|
4 |
-
import unittest
|
5 |
-
import torch
|
6 |
-
|
7 |
-
import detectron2.model_zoo as model_zoo
|
8 |
-
from detectron2.config import get_cfg
|
9 |
-
from detectron2.modeling import build_model
|
10 |
-
from detectron2.utils.analysis import flop_count_operators, parameter_count
|
11 |
-
|
12 |
-
|
13 |
-
def get_model_zoo(config_path):
|
14 |
-
"""
|
15 |
-
Like model_zoo.get, but do not load any weights (even pretrained)
|
16 |
-
"""
|
17 |
-
cfg_file = model_zoo.get_config_file(config_path)
|
18 |
-
cfg = get_cfg()
|
19 |
-
cfg.merge_from_file(cfg_file)
|
20 |
-
if not torch.cuda.is_available():
|
21 |
-
cfg.MODEL.DEVICE = "cpu"
|
22 |
-
return build_model(cfg)
|
23 |
-
|
24 |
-
|
25 |
-
class RetinaNetTest(unittest.TestCase):
|
26 |
-
def setUp(self):
|
27 |
-
self.model = get_model_zoo("COCO-Detection/retinanet_R_50_FPN_1x.yaml")
|
28 |
-
|
29 |
-
def test_flop(self):
|
30 |
-
# RetinaNet supports flop-counting with random inputs
|
31 |
-
inputs = [{"image": torch.rand(3, 800, 800)}]
|
32 |
-
res = flop_count_operators(self.model, inputs)
|
33 |
-
self.assertTrue(int(res["conv"]), 146) # 146B flops
|
34 |
-
|
35 |
-
def test_param_count(self):
|
36 |
-
res = parameter_count(self.model)
|
37 |
-
self.assertTrue(res[""], 37915572)
|
38 |
-
self.assertTrue(res["backbone"], 31452352)
|
39 |
-
|
40 |
-
|
41 |
-
class FasterRCNNTest(unittest.TestCase):
|
42 |
-
def setUp(self):
|
43 |
-
self.model = get_model_zoo("COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml")
|
44 |
-
|
45 |
-
def test_flop(self):
|
46 |
-
# Faster R-CNN supports flop-counting with random inputs
|
47 |
-
inputs = [{"image": torch.rand(3, 800, 800)}]
|
48 |
-
res = flop_count_operators(self.model, inputs)
|
49 |
-
|
50 |
-
# This only checks flops for backbone & proposal generator
|
51 |
-
# Flops for box head is not conv, and depends on #proposals, which is
|
52 |
-
# almost 0 for random inputs.
|
53 |
-
self.assertTrue(int(res["conv"]), 117)
|
54 |
-
|
55 |
-
def test_param_count(self):
|
56 |
-
res = parameter_count(self.model)
|
57 |
-
self.assertTrue(res[""], 41699936)
|
58 |
-
self.assertTrue(res["backbone"], 26799296)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/utils/make_mask.py
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
# --------------------------------------------------------
|
2 |
-
# OpenVQA
|
3 |
-
# Written by Yuhao Cui https://github.com/cuiyuhao1996
|
4 |
-
# --------------------------------------------------------
|
5 |
-
|
6 |
-
import torch
|
7 |
-
|
8 |
-
|
9 |
-
# Masking the sequence mask
|
10 |
-
def make_mask(feature):
|
11 |
-
return (torch.sum(
|
12 |
-
torch.abs(feature),
|
13 |
-
dim=-1
|
14 |
-
) == 0).unsqueeze(1).unsqueeze(2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Text2Human/Text2Human/models/sample_model.py
DELETED
@@ -1,500 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
|
3 |
-
import numpy as np
|
4 |
-
import torch
|
5 |
-
import torch.distributions as dists
|
6 |
-
import torch.nn.functional as F
|
7 |
-
from torchvision.utils import save_image
|
8 |
-
|
9 |
-
from models.archs.fcn_arch import FCNHead, MultiHeadFCNHead
|
10 |
-
from models.archs.shape_attr_embedding_arch import ShapeAttrEmbedding
|
11 |
-
from models.archs.transformer_arch import TransformerMultiHead
|
12 |
-
from models.archs.unet_arch import ShapeUNet, UNet
|
13 |
-
from models.archs.vqgan_arch import (Decoder, DecoderRes, Encoder,
|
14 |
-
VectorQuantizer,
|
15 |
-
VectorQuantizerSpatialTextureAware,
|
16 |
-
VectorQuantizerTexture)
|
17 |
-
|
18 |
-
logger = logging.getLogger('base')
|
19 |
-
|
20 |
-
|
21 |
-
class BaseSampleModel():
|
22 |
-
"""Base Model"""
|
23 |
-
|
24 |
-
def __init__(self, opt):
|
25 |
-
self.opt = opt
|
26 |
-
self.device = torch.device('cuda')
|
27 |
-
|
28 |
-
# hierarchical VQVAE
|
29 |
-
self.decoder = Decoder(
|
30 |
-
in_channels=opt['top_in_channels'],
|
31 |
-
resolution=opt['top_resolution'],
|
32 |
-
z_channels=opt['top_z_channels'],
|
33 |
-
ch=opt['top_ch'],
|
34 |
-
out_ch=opt['top_out_ch'],
|
35 |
-
num_res_blocks=opt['top_num_res_blocks'],
|
36 |
-
attn_resolutions=opt['top_attn_resolutions'],
|
37 |
-
ch_mult=opt['top_ch_mult'],
|
38 |
-
dropout=opt['top_dropout'],
|
39 |
-
resamp_with_conv=True,
|
40 |
-
give_pre_end=False).to(self.device)
|
41 |
-
self.top_quantize = VectorQuantizerTexture(
|
42 |
-
1024, opt['embed_dim'], beta=0.25).to(self.device)
|
43 |
-
self.top_post_quant_conv = torch.nn.Conv2d(opt['embed_dim'],
|
44 |
-
opt["top_z_channels"],
|
45 |
-
1).to(self.device)
|
46 |
-
self.load_top_pretrain_models()
|
47 |
-
|
48 |
-
self.bot_decoder_res = DecoderRes(
|
49 |
-
in_channels=opt['bot_in_channels'],
|
50 |
-
resolution=opt['bot_resolution'],
|
51 |
-
z_channels=opt['bot_z_channels'],
|
52 |
-
ch=opt['bot_ch'],
|
53 |
-
num_res_blocks=opt['bot_num_res_blocks'],
|
54 |
-
ch_mult=opt['bot_ch_mult'],
|
55 |
-
dropout=opt['bot_dropout'],
|
56 |
-
give_pre_end=False).to(self.device)
|
57 |
-
self.bot_quantize = VectorQuantizerSpatialTextureAware(
|
58 |
-
opt['bot_n_embed'],
|
59 |
-
opt['embed_dim'],
|
60 |
-
beta=0.25,
|
61 |
-
spatial_size=opt['bot_codebook_spatial_size']).to(self.device)
|
62 |
-
self.bot_post_quant_conv = torch.nn.Conv2d(opt['embed_dim'],
|
63 |
-
opt["bot_z_channels"],
|
64 |
-
1).to(self.device)
|
65 |
-
self.load_bot_pretrain_network()
|
66 |
-
|
67 |
-
# top -> bot prediction
|
68 |
-
self.index_pred_guidance_encoder = UNet(
|
69 |
-
in_channels=opt['index_pred_encoder_in_channels']).to(self.device)
|
70 |
-
self.index_pred_decoder = MultiHeadFCNHead(
|
71 |
-
in_channels=opt['index_pred_fc_in_channels'],
|
72 |
-
in_index=opt['index_pred_fc_in_index'],
|
73 |
-
channels=opt['index_pred_fc_channels'],
|
74 |
-
num_convs=opt['index_pred_fc_num_convs'],
|
75 |
-
concat_input=opt['index_pred_fc_concat_input'],
|
76 |
-
dropout_ratio=opt['index_pred_fc_dropout_ratio'],
|
77 |
-
num_classes=opt['index_pred_fc_num_classes'],
|
78 |
-
align_corners=opt['index_pred_fc_align_corners'],
|
79 |
-
num_head=18).to(self.device)
|
80 |
-
self.load_index_pred_network()
|
81 |
-
|
82 |
-
# VAE for segmentation mask
|
83 |
-
self.segm_encoder = Encoder(
|
84 |
-
ch=opt['segm_ch'],
|
85 |
-
num_res_blocks=opt['segm_num_res_blocks'],
|
86 |
-
attn_resolutions=opt['segm_attn_resolutions'],
|
87 |
-
ch_mult=opt['segm_ch_mult'],
|
88 |
-
in_channels=opt['segm_in_channels'],
|
89 |
-
resolution=opt['segm_resolution'],
|
90 |
-
z_channels=opt['segm_z_channels'],
|
91 |
-
double_z=opt['segm_double_z'],
|
92 |
-
dropout=opt['segm_dropout']).to(self.device)
|
93 |
-
self.segm_quantizer = VectorQuantizer(
|
94 |
-
opt['segm_n_embed'],
|
95 |
-
opt['segm_embed_dim'],
|
96 |
-
beta=0.25,
|
97 |
-
sane_index_shape=True).to(self.device)
|
98 |
-
self.segm_quant_conv = torch.nn.Conv2d(opt["segm_z_channels"],
|
99 |
-
opt['segm_embed_dim'],
|
100 |
-
1).to(self.device)
|
101 |
-
self.load_pretrained_segm_token()
|
102 |
-
|
103 |
-
# define sampler
|
104 |
-
self.sampler_fn = TransformerMultiHead(
|
105 |
-
codebook_size=opt['codebook_size'],
|
106 |
-
segm_codebook_size=opt['segm_codebook_size'],
|
107 |
-
texture_codebook_size=opt['texture_codebook_size'],
|
108 |
-
bert_n_emb=opt['bert_n_emb'],
|
109 |
-
bert_n_layers=opt['bert_n_layers'],
|
110 |
-
bert_n_head=opt['bert_n_head'],
|
111 |
-
block_size=opt['block_size'],
|
112 |
-
latent_shape=opt['latent_shape'],
|
113 |
-
embd_pdrop=opt['embd_pdrop'],
|
114 |
-
resid_pdrop=opt['resid_pdrop'],
|
115 |
-
attn_pdrop=opt['attn_pdrop'],
|
116 |
-
num_head=opt['num_head']).to(self.device)
|
117 |
-
self.load_sampler_pretrained_network()
|
118 |
-
|
119 |
-
self.shape = tuple(opt['latent_shape'])
|
120 |
-
|
121 |
-
self.mask_id = opt['codebook_size']
|
122 |
-
self.sample_steps = opt['sample_steps']
|
123 |
-
|
124 |
-
def load_top_pretrain_models(self):
|
125 |
-
# load pretrained vqgan
|
126 |
-
top_vae_checkpoint = torch.load(self.opt['top_vae_path'])
|
127 |
-
|
128 |
-
self.decoder.load_state_dict(
|
129 |
-
top_vae_checkpoint['decoder'], strict=True)
|
130 |
-
self.top_quantize.load_state_dict(
|
131 |
-
top_vae_checkpoint['quantize'], strict=True)
|
132 |
-
self.top_post_quant_conv.load_state_dict(
|
133 |
-
top_vae_checkpoint['post_quant_conv'], strict=True)
|
134 |
-
|
135 |
-
self.decoder.eval()
|
136 |
-
self.top_quantize.eval()
|
137 |
-
self.top_post_quant_conv.eval()
|
138 |
-
|
139 |
-
def load_bot_pretrain_network(self):
|
140 |
-
checkpoint = torch.load(self.opt['bot_vae_path'])
|
141 |
-
self.bot_decoder_res.load_state_dict(
|
142 |
-
checkpoint['bot_decoder_res'], strict=True)
|
143 |
-
self.decoder.load_state_dict(checkpoint['decoder'], strict=True)
|
144 |
-
self.bot_quantize.load_state_dict(
|
145 |
-
checkpoint['bot_quantize'], strict=True)
|
146 |
-
self.bot_post_quant_conv.load_state_dict(
|
147 |
-
checkpoint['bot_post_quant_conv'], strict=True)
|
148 |
-
|
149 |
-
self.bot_decoder_res.eval()
|
150 |
-
self.decoder.eval()
|
151 |
-
self.bot_quantize.eval()
|
152 |
-
self.bot_post_quant_conv.eval()
|
153 |
-
|
154 |
-
def load_pretrained_segm_token(self):
|
155 |
-
# load pretrained vqgan for segmentation mask
|
156 |
-
segm_token_checkpoint = torch.load(self.opt['segm_token_path'])
|
157 |
-
self.segm_encoder.load_state_dict(
|
158 |
-
segm_token_checkpoint['encoder'], strict=True)
|
159 |
-
self.segm_quantizer.load_state_dict(
|
160 |
-
segm_token_checkpoint['quantize'], strict=True)
|
161 |
-
self.segm_quant_conv.load_state_dict(
|
162 |
-
segm_token_checkpoint['quant_conv'], strict=True)
|
163 |
-
|
164 |
-
self.segm_encoder.eval()
|
165 |
-
self.segm_quantizer.eval()
|
166 |
-
self.segm_quant_conv.eval()
|
167 |
-
|
168 |
-
def load_index_pred_network(self):
|
169 |
-
checkpoint = torch.load(self.opt['pretrained_index_network'])
|
170 |
-
self.index_pred_guidance_encoder.load_state_dict(
|
171 |
-
checkpoint['guidance_encoder'], strict=True)
|
172 |
-
self.index_pred_decoder.load_state_dict(
|
173 |
-
checkpoint['index_decoder'], strict=True)
|
174 |
-
|
175 |
-
self.index_pred_guidance_encoder.eval()
|
176 |
-
self.index_pred_decoder.eval()
|
177 |
-
|
178 |
-
def load_sampler_pretrained_network(self):
|
179 |
-
checkpoint = torch.load(self.opt['pretrained_sampler'])
|
180 |
-
self.sampler_fn.load_state_dict(checkpoint, strict=True)
|
181 |
-
self.sampler_fn.eval()
|
182 |
-
|
183 |
-
def bot_index_prediction(self, feature_top, texture_mask):
|
184 |
-
self.index_pred_guidance_encoder.eval()
|
185 |
-
self.index_pred_decoder.eval()
|
186 |
-
|
187 |
-
texture_tokens = F.interpolate(
|
188 |
-
texture_mask, (32, 16), mode='nearest').view(self.batch_size,
|
189 |
-
-1).long()
|
190 |
-
|
191 |
-
texture_mask_flatten = texture_tokens.view(-1)
|
192 |
-
min_encodings_indices_list = [
|
193 |
-
torch.full(
|
194 |
-
texture_mask_flatten.size(),
|
195 |
-
fill_value=-1,
|
196 |
-
dtype=torch.long,
|
197 |
-
device=texture_mask_flatten.device) for _ in range(18)
|
198 |
-
]
|
199 |
-
with torch.no_grad():
|
200 |
-
feature_enc = self.index_pred_guidance_encoder(feature_top)
|
201 |
-
memory_logits_list = self.index_pred_decoder(feature_enc)
|
202 |
-
for codebook_idx, memory_logits in enumerate(memory_logits_list):
|
203 |
-
region_of_interest = texture_mask_flatten == codebook_idx
|
204 |
-
if torch.sum(region_of_interest) > 0:
|
205 |
-
memory_indices_pred = memory_logits.argmax(dim=1).view(-1)
|
206 |
-
memory_indices_pred = memory_indices_pred
|
207 |
-
min_encodings_indices_list[codebook_idx][
|
208 |
-
region_of_interest] = memory_indices_pred[
|
209 |
-
region_of_interest]
|
210 |
-
min_encodings_indices_return_list = [
|
211 |
-
min_encodings_indices.view((1, 32, 16))
|
212 |
-
for min_encodings_indices in min_encodings_indices_list
|
213 |
-
]
|
214 |
-
|
215 |
-
return min_encodings_indices_return_list
|
216 |
-
|
217 |
-
def sample_and_refine(self, save_dir=None, img_name=None):
|
218 |
-
# sample 32x16 features indices
|
219 |
-
sampled_top_indices_list = self.sample_fn(
|
220 |
-
temp=1, sample_steps=self.sample_steps)
|
221 |
-
|
222 |
-
for sample_idx in range(self.batch_size):
|
223 |
-
sample_indices = [
|
224 |
-
sampled_indices_cur[sample_idx:sample_idx + 1]
|
225 |
-
for sampled_indices_cur in sampled_top_indices_list
|
226 |
-
]
|
227 |
-
top_quant = self.top_quantize.get_codebook_entry(
|
228 |
-
sample_indices, self.texture_mask[sample_idx:sample_idx + 1],
|
229 |
-
(sample_indices[0].size(0), self.shape[0], self.shape[1],
|
230 |
-
self.opt["top_z_channels"]))
|
231 |
-
|
232 |
-
top_quant = self.top_post_quant_conv(top_quant)
|
233 |
-
|
234 |
-
bot_indices_list = self.bot_index_prediction(
|
235 |
-
top_quant, self.texture_mask[sample_idx:sample_idx + 1])
|
236 |
-
|
237 |
-
quant_bot = self.bot_quantize.get_codebook_entry(
|
238 |
-
bot_indices_list, self.texture_mask[sample_idx:sample_idx + 1],
|
239 |
-
(bot_indices_list[0].size(0), bot_indices_list[0].size(1),
|
240 |
-
bot_indices_list[0].size(2),
|
241 |
-
self.opt["bot_z_channels"])) #.permute(0, 3, 1, 2)
|
242 |
-
quant_bot = self.bot_post_quant_conv(quant_bot)
|
243 |
-
bot_dec_res = self.bot_decoder_res(quant_bot)
|
244 |
-
|
245 |
-
dec = self.decoder(top_quant, bot_h=bot_dec_res)
|
246 |
-
|
247 |
-
dec = ((dec + 1) / 2)
|
248 |
-
dec = dec.clamp_(0, 1)
|
249 |
-
if save_dir is None and img_name is None:
|
250 |
-
return dec
|
251 |
-
else:
|
252 |
-
save_image(
|
253 |
-
dec,
|
254 |
-
f'{save_dir}/{img_name[sample_idx]}',
|
255 |
-
nrow=1,
|
256 |
-
padding=4)
|
257 |
-
|
258 |
-
def sample_fn(self, temp=1.0, sample_steps=None):
|
259 |
-
self.sampler_fn.eval()
|
260 |
-
|
261 |
-
x_t = torch.ones((self.batch_size, np.prod(self.shape)),
|
262 |
-
device=self.device).long() * self.mask_id
|
263 |
-
unmasked = torch.zeros_like(x_t, device=self.device).bool()
|
264 |
-
sample_steps = list(range(1, sample_steps + 1))
|
265 |
-
|
266 |
-
texture_tokens = F.interpolate(
|
267 |
-
self.texture_mask, (32, 16),
|
268 |
-
mode='nearest').view(self.batch_size, -1).long()
|
269 |
-
|
270 |
-
texture_mask_flatten = texture_tokens.view(-1)
|
271 |
-
|
272 |
-
# min_encodings_indices_list would be used to visualize the image
|
273 |
-
min_encodings_indices_list = [
|
274 |
-
torch.full(
|
275 |
-
texture_mask_flatten.size(),
|
276 |
-
fill_value=-1,
|
277 |
-
dtype=torch.long,
|
278 |
-
device=texture_mask_flatten.device) for _ in range(18)
|
279 |
-
]
|
280 |
-
|
281 |
-
for t in reversed(sample_steps):
|
282 |
-
t = torch.full((self.batch_size, ),
|
283 |
-
t,
|
284 |
-
device=self.device,
|
285 |
-
dtype=torch.long)
|
286 |
-
|
287 |
-
# where to unmask
|
288 |
-
changes = torch.rand(
|
289 |
-
x_t.shape, device=self.device) < 1 / t.float().unsqueeze(-1)
|
290 |
-
# don't unmask somewhere already unmasked
|
291 |
-
changes = torch.bitwise_xor(changes,
|
292 |
-
torch.bitwise_and(changes, unmasked))
|
293 |
-
# update mask with changes
|
294 |
-
unmasked = torch.bitwise_or(unmasked, changes)
|
295 |
-
|
296 |
-
x_0_logits_list = self.sampler_fn(
|
297 |
-
x_t, self.segm_tokens, texture_tokens, t=t)
|
298 |
-
|
299 |
-
changes_flatten = changes.view(-1)
|
300 |
-
ori_shape = x_t.shape # [b, h*w]
|
301 |
-
x_t = x_t.view(-1) # [b*h*w]
|
302 |
-
for codebook_idx, x_0_logits in enumerate(x_0_logits_list):
|
303 |
-
if torch.sum(texture_mask_flatten[changes_flatten] ==
|
304 |
-
codebook_idx) > 0:
|
305 |
-
# scale by temperature
|
306 |
-
x_0_logits = x_0_logits / temp
|
307 |
-
x_0_dist = dists.Categorical(logits=x_0_logits)
|
308 |
-
x_0_hat = x_0_dist.sample().long()
|
309 |
-
x_0_hat = x_0_hat.view(-1)
|
310 |
-
|
311 |
-
# only replace the changed indices with corresponding codebook_idx
|
312 |
-
changes_segm = torch.bitwise_and(
|
313 |
-
changes_flatten, texture_mask_flatten == codebook_idx)
|
314 |
-
|
315 |
-
# x_t would be the input to the transformer, so the index range should be continual one
|
316 |
-
x_t[changes_segm] = x_0_hat[
|
317 |
-
changes_segm] + 1024 * codebook_idx
|
318 |
-
min_encodings_indices_list[codebook_idx][
|
319 |
-
changes_segm] = x_0_hat[changes_segm]
|
320 |
-
|
321 |
-
x_t = x_t.view(ori_shape) # [b, h*w]
|
322 |
-
|
323 |
-
min_encodings_indices_return_list = [
|
324 |
-
min_encodings_indices.view(ori_shape)
|
325 |
-
for min_encodings_indices in min_encodings_indices_list
|
326 |
-
]
|
327 |
-
|
328 |
-
self.sampler_fn.train()
|
329 |
-
|
330 |
-
return min_encodings_indices_return_list
|
331 |
-
|
332 |
-
@torch.no_grad()
|
333 |
-
def get_quantized_segm(self, segm):
|
334 |
-
segm_one_hot = F.one_hot(
|
335 |
-
segm.squeeze(1).long(),
|
336 |
-
num_classes=self.opt['segm_num_segm_classes']).permute(
|
337 |
-
0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
|
338 |
-
encoded_segm_mask = self.segm_encoder(segm_one_hot)
|
339 |
-
encoded_segm_mask = self.segm_quant_conv(encoded_segm_mask)
|
340 |
-
_, _, [_, _, segm_tokens] = self.segm_quantizer(encoded_segm_mask)
|
341 |
-
|
342 |
-
return segm_tokens
|
343 |
-
|
344 |
-
|
345 |
-
class SampleFromParsingModel(BaseSampleModel):
|
346 |
-
"""SampleFromParsing model.
|
347 |
-
"""
|
348 |
-
|
349 |
-
def feed_data(self, data):
|
350 |
-
self.segm = data['segm'].to(self.device)
|
351 |
-
self.texture_mask = data['texture_mask'].to(self.device)
|
352 |
-
self.batch_size = self.segm.size(0)
|
353 |
-
|
354 |
-
self.segm_tokens = self.get_quantized_segm(self.segm)
|
355 |
-
self.segm_tokens = self.segm_tokens.view(self.batch_size, -1)
|
356 |
-
|
357 |
-
def inference(self, data_loader, save_dir):
|
358 |
-
for _, data in enumerate(data_loader):
|
359 |
-
img_name = data['img_name']
|
360 |
-
self.feed_data(data)
|
361 |
-
with torch.no_grad():
|
362 |
-
self.sample_and_refine(save_dir, img_name)
|
363 |
-
|
364 |
-
|
365 |
-
class SampleFromPoseModel(BaseSampleModel):
|
366 |
-
"""SampleFromPose model.
|
367 |
-
"""
|
368 |
-
|
369 |
-
def __init__(self, opt):
|
370 |
-
super().__init__(opt)
|
371 |
-
# pose-to-parsing
|
372 |
-
self.shape_attr_embedder = ShapeAttrEmbedding(
|
373 |
-
dim=opt['shape_embedder_dim'],
|
374 |
-
out_dim=opt['shape_embedder_out_dim'],
|
375 |
-
cls_num_list=opt['shape_attr_class_num']).to(self.device)
|
376 |
-
self.shape_parsing_encoder = ShapeUNet(
|
377 |
-
in_channels=opt['shape_encoder_in_channels']).to(self.device)
|
378 |
-
self.shape_parsing_decoder = FCNHead(
|
379 |
-
in_channels=opt['shape_fc_in_channels'],
|
380 |
-
in_index=opt['shape_fc_in_index'],
|
381 |
-
channels=opt['shape_fc_channels'],
|
382 |
-
num_convs=opt['shape_fc_num_convs'],
|
383 |
-
concat_input=opt['shape_fc_concat_input'],
|
384 |
-
dropout_ratio=opt['shape_fc_dropout_ratio'],
|
385 |
-
num_classes=opt['shape_fc_num_classes'],
|
386 |
-
align_corners=opt['shape_fc_align_corners'],
|
387 |
-
).to(self.device)
|
388 |
-
self.load_shape_generation_models()
|
389 |
-
|
390 |
-
self.palette = [[0, 0, 0], [255, 250, 250], [220, 220, 220],
|
391 |
-
[250, 235, 215], [255, 250, 205], [211, 211, 211],
|
392 |
-
[70, 130, 180], [127, 255, 212], [0, 100, 0],
|
393 |
-
[50, 205, 50], [255, 255, 0], [245, 222, 179],
|
394 |
-
[255, 140, 0], [255, 0, 0], [16, 78, 139],
|
395 |
-
[144, 238, 144], [50, 205, 174], [50, 155, 250],
|
396 |
-
[160, 140, 88], [213, 140, 88], [90, 140, 90],
|
397 |
-
[185, 210, 205], [130, 165, 180], [225, 141, 151]]
|
398 |
-
|
399 |
-
def load_shape_generation_models(self):
|
400 |
-
checkpoint = torch.load(self.opt['pretrained_parsing_gen'])
|
401 |
-
|
402 |
-
self.shape_attr_embedder.load_state_dict(
|
403 |
-
checkpoint['embedder'], strict=True)
|
404 |
-
self.shape_attr_embedder.eval()
|
405 |
-
|
406 |
-
self.shape_parsing_encoder.load_state_dict(
|
407 |
-
checkpoint['encoder'], strict=True)
|
408 |
-
self.shape_parsing_encoder.eval()
|
409 |
-
|
410 |
-
self.shape_parsing_decoder.load_state_dict(
|
411 |
-
checkpoint['decoder'], strict=True)
|
412 |
-
self.shape_parsing_decoder.eval()
|
413 |
-
|
414 |
-
def feed_data(self, data):
|
415 |
-
self.pose = data['densepose'].to(self.device)
|
416 |
-
self.batch_size = self.pose.size(0)
|
417 |
-
|
418 |
-
self.shape_attr = data['shape_attr'].to(self.device)
|
419 |
-
self.upper_fused_attr = data['upper_fused_attr'].to(self.device)
|
420 |
-
self.lower_fused_attr = data['lower_fused_attr'].to(self.device)
|
421 |
-
self.outer_fused_attr = data['outer_fused_attr'].to(self.device)
|
422 |
-
|
423 |
-
def inference(self, data_loader, save_dir):
|
424 |
-
for _, data in enumerate(data_loader):
|
425 |
-
img_name = data['img_name']
|
426 |
-
self.feed_data(data)
|
427 |
-
with torch.no_grad():
|
428 |
-
self.generate_parsing_map()
|
429 |
-
self.generate_quantized_segm()
|
430 |
-
self.generate_texture_map()
|
431 |
-
self.sample_and_refine(save_dir, img_name)
|
432 |
-
|
433 |
-
def generate_parsing_map(self):
|
434 |
-
with torch.no_grad():
|
435 |
-
attr_embedding = self.shape_attr_embedder(self.shape_attr)
|
436 |
-
pose_enc = self.shape_parsing_encoder(self.pose, attr_embedding)
|
437 |
-
seg_logits = self.shape_parsing_decoder(pose_enc)
|
438 |
-
self.segm = seg_logits.argmax(dim=1)
|
439 |
-
self.segm = self.segm.unsqueeze(1)
|
440 |
-
|
441 |
-
def generate_quantized_segm(self):
|
442 |
-
self.segm_tokens = self.get_quantized_segm(self.segm)
|
443 |
-
self.segm_tokens = self.segm_tokens.view(self.batch_size, -1)
|
444 |
-
|
445 |
-
def generate_texture_map(self):
|
446 |
-
upper_cls = [1., 4.]
|
447 |
-
lower_cls = [3., 5., 21.]
|
448 |
-
outer_cls = [2.]
|
449 |
-
|
450 |
-
mask_batch = []
|
451 |
-
for idx in range(self.batch_size):
|
452 |
-
mask = torch.zeros_like(self.segm[idx])
|
453 |
-
upper_fused_attr = self.upper_fused_attr[idx]
|
454 |
-
lower_fused_attr = self.lower_fused_attr[idx]
|
455 |
-
outer_fused_attr = self.outer_fused_attr[idx]
|
456 |
-
if upper_fused_attr != 17:
|
457 |
-
for cls in upper_cls:
|
458 |
-
mask[self.segm[idx] == cls] = upper_fused_attr + 1
|
459 |
-
|
460 |
-
if lower_fused_attr != 17:
|
461 |
-
for cls in lower_cls:
|
462 |
-
mask[self.segm[idx] == cls] = lower_fused_attr + 1
|
463 |
-
|
464 |
-
if outer_fused_attr != 17:
|
465 |
-
for cls in outer_cls:
|
466 |
-
mask[self.segm[idx] == cls] = outer_fused_attr + 1
|
467 |
-
|
468 |
-
mask_batch.append(mask)
|
469 |
-
self.texture_mask = torch.stack(mask_batch, dim=0).to(torch.float32)
|
470 |
-
|
471 |
-
def feed_pose_data(self, pose_img):
|
472 |
-
# for ui demo
|
473 |
-
|
474 |
-
self.pose = pose_img.to(self.device)
|
475 |
-
self.batch_size = self.pose.size(0)
|
476 |
-
|
477 |
-
def feed_shape_attributes(self, shape_attr):
|
478 |
-
# for ui demo
|
479 |
-
|
480 |
-
self.shape_attr = shape_attr.to(self.device)
|
481 |
-
|
482 |
-
def feed_texture_attributes(self, texture_attr):
|
483 |
-
# for ui demo
|
484 |
-
|
485 |
-
self.upper_fused_attr = texture_attr[0].unsqueeze(0).to(self.device)
|
486 |
-
self.lower_fused_attr = texture_attr[1].unsqueeze(0).to(self.device)
|
487 |
-
self.outer_fused_attr = texture_attr[2].unsqueeze(0).to(self.device)
|
488 |
-
|
489 |
-
def palette_result(self, result):
|
490 |
-
|
491 |
-
seg = result[0]
|
492 |
-
palette = np.array(self.palette)
|
493 |
-
assert palette.shape[1] == 3
|
494 |
-
assert len(palette.shape) == 2
|
495 |
-
color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
|
496 |
-
for label, color in enumerate(palette):
|
497 |
-
color_seg[seg == label, :] = color
|
498 |
-
# convert to BGR
|
499 |
-
# color_seg = color_seg[..., ::-1]
|
500 |
-
return color_seg
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/lama-example/saicinpainting/training/losses/constants.py
DELETED
@@ -1,152 +0,0 @@
|
|
1 |
-
weights = {"ade20k":
|
2 |
-
[6.34517766497462,
|
3 |
-
9.328358208955224,
|
4 |
-
11.389521640091116,
|
5 |
-
16.10305958132045,
|
6 |
-
20.833333333333332,
|
7 |
-
22.22222222222222,
|
8 |
-
25.125628140703515,
|
9 |
-
43.29004329004329,
|
10 |
-
50.5050505050505,
|
11 |
-
54.6448087431694,
|
12 |
-
55.24861878453038,
|
13 |
-
60.24096385542168,
|
14 |
-
62.5,
|
15 |
-
66.2251655629139,
|
16 |
-
84.74576271186442,
|
17 |
-
90.90909090909092,
|
18 |
-
91.74311926605505,
|
19 |
-
96.15384615384616,
|
20 |
-
96.15384615384616,
|
21 |
-
97.08737864077669,
|
22 |
-
102.04081632653062,
|
23 |
-
135.13513513513513,
|
24 |
-
149.2537313432836,
|
25 |
-
153.84615384615384,
|
26 |
-
163.93442622950818,
|
27 |
-
166.66666666666666,
|
28 |
-
188.67924528301887,
|
29 |
-
192.30769230769232,
|
30 |
-
217.3913043478261,
|
31 |
-
227.27272727272725,
|
32 |
-
227.27272727272725,
|
33 |
-
227.27272727272725,
|
34 |
-
303.03030303030306,
|
35 |
-
322.5806451612903,
|
36 |
-
333.3333333333333,
|
37 |
-
370.3703703703703,
|
38 |
-
384.61538461538464,
|
39 |
-
416.6666666666667,
|
40 |
-
416.6666666666667,
|
41 |
-
434.7826086956522,
|
42 |
-
434.7826086956522,
|
43 |
-
454.5454545454545,
|
44 |
-
454.5454545454545,
|
45 |
-
500.0,
|
46 |
-
526.3157894736842,
|
47 |
-
526.3157894736842,
|
48 |
-
555.5555555555555,
|
49 |
-
555.5555555555555,
|
50 |
-
555.5555555555555,
|
51 |
-
555.5555555555555,
|
52 |
-
555.5555555555555,
|
53 |
-
555.5555555555555,
|
54 |
-
555.5555555555555,
|
55 |
-
588.2352941176471,
|
56 |
-
588.2352941176471,
|
57 |
-
588.2352941176471,
|
58 |
-
588.2352941176471,
|
59 |
-
588.2352941176471,
|
60 |
-
666.6666666666666,
|
61 |
-
666.6666666666666,
|
62 |
-
666.6666666666666,
|
63 |
-
666.6666666666666,
|
64 |
-
714.2857142857143,
|
65 |
-
714.2857142857143,
|
66 |
-
714.2857142857143,
|
67 |
-
714.2857142857143,
|
68 |
-
714.2857142857143,
|
69 |
-
769.2307692307693,
|
70 |
-
769.2307692307693,
|
71 |
-
769.2307692307693,
|
72 |
-
833.3333333333334,
|
73 |
-
833.3333333333334,
|
74 |
-
833.3333333333334,
|
75 |
-
833.3333333333334,
|
76 |
-
909.090909090909,
|
77 |
-
1000.0,
|
78 |
-
1111.111111111111,
|
79 |
-
1111.111111111111,
|
80 |
-
1111.111111111111,
|
81 |
-
1111.111111111111,
|
82 |
-
1111.111111111111,
|
83 |
-
1250.0,
|
84 |
-
1250.0,
|
85 |
-
1250.0,
|
86 |
-
1250.0,
|
87 |
-
1250.0,
|
88 |
-
1428.5714285714287,
|
89 |
-
1428.5714285714287,
|
90 |
-
1428.5714285714287,
|
91 |
-
1428.5714285714287,
|
92 |
-
1428.5714285714287,
|
93 |
-
1428.5714285714287,
|
94 |
-
1428.5714285714287,
|
95 |
-
1666.6666666666667,
|
96 |
-
1666.6666666666667,
|
97 |
-
1666.6666666666667,
|
98 |
-
1666.6666666666667,
|
99 |
-
1666.6666666666667,
|
100 |
-
1666.6666666666667,
|
101 |
-
1666.6666666666667,
|
102 |
-
1666.6666666666667,
|
103 |
-
1666.6666666666667,
|
104 |
-
1666.6666666666667,
|
105 |
-
1666.6666666666667,
|
106 |
-
2000.0,
|
107 |
-
2000.0,
|
108 |
-
2000.0,
|
109 |
-
2000.0,
|
110 |
-
2000.0,
|
111 |
-
2000.0,
|
112 |
-
2000.0,
|
113 |
-
2000.0,
|
114 |
-
2000.0,
|
115 |
-
2000.0,
|
116 |
-
2000.0,
|
117 |
-
2000.0,
|
118 |
-
2000.0,
|
119 |
-
2000.0,
|
120 |
-
2000.0,
|
121 |
-
2000.0,
|
122 |
-
2000.0,
|
123 |
-
2500.0,
|
124 |
-
2500.0,
|
125 |
-
2500.0,
|
126 |
-
2500.0,
|
127 |
-
2500.0,
|
128 |
-
2500.0,
|
129 |
-
2500.0,
|
130 |
-
2500.0,
|
131 |
-
2500.0,
|
132 |
-
2500.0,
|
133 |
-
2500.0,
|
134 |
-
2500.0,
|
135 |
-
2500.0,
|
136 |
-
3333.3333333333335,
|
137 |
-
3333.3333333333335,
|
138 |
-
3333.3333333333335,
|
139 |
-
3333.3333333333335,
|
140 |
-
3333.3333333333335,
|
141 |
-
3333.3333333333335,
|
142 |
-
3333.3333333333335,
|
143 |
-
3333.3333333333335,
|
144 |
-
3333.3333333333335,
|
145 |
-
3333.3333333333335,
|
146 |
-
3333.3333333333335,
|
147 |
-
3333.3333333333335,
|
148 |
-
3333.3333333333335,
|
149 |
-
5000.0,
|
150 |
-
5000.0,
|
151 |
-
5000.0]
|
152 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/monoscene_lite/monoscene/__init__.py
DELETED
File without changes
|
spaces/CaliforniaHealthCollaborative/Mermaid.Md/GAMEPLAN.md
DELETED
The diff for this file is too large to render.
See raw diff
|
|
spaces/Chitranshu/Dashboard-Uber/app.py
DELETED
@@ -1,198 +0,0 @@
|
|
1 |
-
import pandas as pd
|
2 |
-
import panel as pn
|
3 |
-
import hvplot.pandas
|
4 |
-
import numpy as np
|
5 |
-
from math import radians, sin, cos, sqrt, asin
|
6 |
-
uber_data = pd.read_csv(r'uber-raw-data-jul14.csv')
|
7 |
-
type(uber_data.loc[0,'Date/Time'])
|
8 |
-
uber_data['Date/Time'] = pd.to_datetime(uber_data['Date/Time'])
|
9 |
-
uber_data['BinnedHour']=uber_data['Date/Time'].dt.floor('15min')
|
10 |
-
uber_data['BinnedHour'].value_counts()
|
11 |
-
DayMap={0:'Monday', 1:'Tuesday', 2:'Wednesday', 3:'Thursday', 4:'Friday', 5:'Saturday', 6:'Sunday'}
|
12 |
-
uber_data['Day']=uber_data['BinnedHour'].dt.weekday.map(DayMap)
|
13 |
-
uber_data['Date']=uber_data['BinnedHour'].dt.date
|
14 |
-
uber_data['Day']=pd.Categorical(uber_data['Day'],categories=['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday'],ordered=True)
|
15 |
-
uber_data['Time']=uber_data['BinnedHour'].dt.time
|
16 |
-
weekly_data1 = uber_data.groupby(['Date','Day','Time']).count().dropna().rename(columns={'BinnedHour':'Rides'})['Rides'].reset_index()
|
17 |
-
daywise = weekly_data1.groupby('Day').sum('Day')
|
18 |
-
# Assuming you have the 'uber_data' DataFrame already defined
|
19 |
-
|
20 |
-
# --- Code 1 ---
|
21 |
-
# Calculate the value counts and sort by index
|
22 |
-
value_counts = uber_data['BinnedHour'].dt.day.value_counts().sort_index()
|
23 |
-
|
24 |
-
# Create a DataFrame from the value counts
|
25 |
-
df = pd.DataFrame({'Days': value_counts.index, 'Rides': value_counts.values})
|
26 |
-
|
27 |
-
# Create a Panel object for the Uber rides graph
|
28 |
-
pn.extension('plotly')
|
29 |
-
pn.config.sizing_mode = 'stretch_width'
|
30 |
-
uber_rides_graph = df.hvplot.bar(x='Days', y='Rides', color='black', xlabel='Days', ylabel='Rides',
|
31 |
-
rot=0, title='Uber Rides per day in July 2014 at NYC',
|
32 |
-
height=400, width=800)
|
33 |
-
|
34 |
-
# --- Code 2 ---
|
35 |
-
# Calculate the value counts and sort by index
|
36 |
-
value_counts = uber_data['BinnedHour'].value_counts().sort_index()
|
37 |
-
|
38 |
-
# Create a DataFrame from the value counts
|
39 |
-
df = pd.DataFrame({'BinnedHour': value_counts.index, 'Rides': value_counts.values})
|
40 |
-
|
41 |
-
# Create a Bokeh figure for the interactive DataFrame graph
|
42 |
-
interactive_df_figure = df.hvplot.line(x='BinnedHour', y='Rides', color='black', alpha=0.8,
|
43 |
-
title='Uber Rides every 15 mins in the month of July at NYC',
|
44 |
-
xlabel='Days', ylabel='No. of Rides',
|
45 |
-
height=400, width=800)
|
46 |
-
|
47 |
-
# Create a Panel object with the Bokeh figure
|
48 |
-
interactive_df_pane = pn.pane.HoloViews(interactive_df_figure)
|
49 |
-
|
50 |
-
# --- Code 3 ---
|
51 |
-
# Extracting day of the week from the 'BinnedHour' column
|
52 |
-
uber_data['BinnedHour'] = pd.to_datetime(uber_data['BinnedHour'])
|
53 |
-
uber_data['BinnedHour'].value_counts()
|
54 |
-
DayMap = {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday'}
|
55 |
-
uber_data['Day'] = uber_data['BinnedHour'].dt.weekday.map(DayMap)
|
56 |
-
uber_data['Date'] = uber_data['BinnedHour'].dt.date
|
57 |
-
uber_data['Day'] = pd.Categorical(uber_data['Day'],
|
58 |
-
categories=['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday',
|
59 |
-
'Sunday'],
|
60 |
-
ordered=True)
|
61 |
-
uber_data['Time'] = uber_data['BinnedHour'].dt.time
|
62 |
-
|
63 |
-
# Grouping by Date, Day, and Time to get the count of rides for each time slot
|
64 |
-
weekly_data = uber_data.groupby(['Date', 'Day', 'Time']).count().dropna().rename(columns={'BinnedHour': 'Rides'})[
|
65 |
-
'Rides'].reset_index()
|
66 |
-
|
67 |
-
# Summing up the rides per day
|
68 |
-
daywise = weekly_data.groupby('Day')['Rides'].sum()
|
69 |
-
df_total_rides = pd.DataFrame({'Days': daywise.index, 'Rides': daywise.values})
|
70 |
-
|
71 |
-
# Create a Panel object for the 'Total Rides per Day' graph
|
72 |
-
total_rides_graph = df_total_rides.hvplot.bar(x='Days', y='Rides', color='black', xlabel='Days', ylabel='Total Rides',
|
73 |
-
rot=0, title='Total Rides per Day',
|
74 |
-
height=400, width=800,
|
75 |
-
value_label=True) # Display total value when hovering
|
76 |
-
|
77 |
-
# --- Code 4 ---
|
78 |
-
# Your original data processing
|
79 |
-
weekly_data = weekly_data.groupby(['Day', 'Time']).mean('Rides')
|
80 |
-
weekly_data1 = weekly_data.unstack(level=0)
|
81 |
-
average_rides = weekly_data1.T.mean()
|
82 |
-
|
83 |
-
# Create a HoloViews plot
|
84 |
-
rides_plot = average_rides.hvplot(c='black', xlabel='Date', ylabel='Average rides',
|
85 |
-
xticks=10, title='Average Uber rides on any day in July 2014 at NYC',
|
86 |
-
height=400, width=800)
|
87 |
-
|
88 |
-
# Wrap the plot in a Panel
|
89 |
-
avg_rides_panel = pn.panel(rides_plot)
|
90 |
-
# --- Code 5 ---
|
91 |
-
# Countplot using hvplot
|
92 |
-
BaseMapper = {'B02512': 'Unter', 'B02598': 'Hinter', 'B02617': 'Weiter', 'B02682': 'Schmecken', 'B02764': 'Danach-NY'}
|
93 |
-
plot_top_rides_city = uber_data['Base'].map(BaseMapper).value_counts().hvplot(kind='bar', rot=0, xlabel='Base', ylabel='Total rides', color='black',
|
94 |
-
title='CountPlot: Total uber rides vs Base - July 2014, NYC', height=400, width=800)
|
95 |
-
|
96 |
-
# --- Code 6 ---
|
97 |
-
# Your code 6 as provided
|
98 |
-
metro_art_coordinates = (40.7794, -73.9632)
|
99 |
-
empire_state_building_coordinates = (40.7484, -73.9857)
|
100 |
-
|
101 |
-
def haversine(coordinates1, coordinates2):
|
102 |
-
lat1, lon1 = coordinates1
|
103 |
-
lat2, lon2 = coordinates2
|
104 |
-
|
105 |
-
# Convert to radians and apply Haversine formula
|
106 |
-
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
|
107 |
-
dlon = lon2 - lon1
|
108 |
-
dlat = lat2 - lat1
|
109 |
-
|
110 |
-
a = sin(dlat/2)**2 + cos(lat1)*cos(lat2)*sin(dlon/2)**2
|
111 |
-
c = 2 * asin(sqrt(a))
|
112 |
-
r = 3956
|
113 |
-
return c * r
|
114 |
-
|
115 |
-
# Assuming `uber_data` is a DataFrame containing 'Lat' and 'Lon' columns
|
116 |
-
# Calculate distances from 'metro_art_coordinates' and 'empire_state_building_coordinates'
|
117 |
-
uber_data['Distance MM'] = uber_data[['Lat', 'Lon']].apply(lambda x: haversine(metro_art_coordinates, tuple(x)), axis=1)
|
118 |
-
uber_data['Distance ESB'] = uber_data[['Lat', 'Lon']].apply(lambda x: haversine(empire_state_building_coordinates, tuple(x)), axis=1)
|
119 |
-
|
120 |
-
# Count the number of rides within 0.25 miles of each location
|
121 |
-
# print((uber_data[['Distance MM', 'Distance ESB']] < 0.25).sum())
|
122 |
-
|
123 |
-
# Create distance range and count the number of rides within each distance
|
124 |
-
distance_range = np.arange(0.1, 5.1, 0.1)
|
125 |
-
distance_data = [(uber_data[['Distance MM', 'Distance ESB']] < dist).sum() for dist in distance_range]
|
126 |
-
distance_data = pd.concat(distance_data, axis=1)
|
127 |
-
distance_data = distance_data.T
|
128 |
-
distance_data.index = distance_range
|
129 |
-
distance_data = distance_data.rename(columns={'Distance MM': 'CloserToMM', 'Distance ESB': 'CloserToESB'})
|
130 |
-
|
131 |
-
pn.extension('bokeh')
|
132 |
-
|
133 |
-
# Create the hvplot figure with customized colors
|
134 |
-
fig = distance_data.hvplot(height=400, width=800, color=['black', 'grey']).opts(title='Number of Rides Closer to ESB and MM',
|
135 |
-
xlabel='Threshold Radius(mi)',
|
136 |
-
ylabel='Rides')
|
137 |
-
|
138 |
-
# Create a panel with the figure
|
139 |
-
fig_panel = pn.panel(fig)
|
140 |
-
|
141 |
-
# Define Panel widgets
|
142 |
-
yaxis_radio = pn.widgets.RadioButtonGroup(
|
143 |
-
name='Y axis',
|
144 |
-
options=['Rides vs Days', '15 min of Uber', 'Total Rides per Day', 'Avg Rides per Day', 'Top Rides City', 'Predicting Distance'],
|
145 |
-
button_type='light',
|
146 |
-
button_style='solid',
|
147 |
-
inline=True
|
148 |
-
)
|
149 |
-
|
150 |
-
# Define the Panel layout
|
151 |
-
panel_layout = pn.Column(
|
152 |
-
yaxis_radio,
|
153 |
-
pn.pane.HoloViews(uber_rides_graph),
|
154 |
-
)
|
155 |
-
|
156 |
-
# Define the callback function for the radio button
|
157 |
-
def update_chart(event):
|
158 |
-
if event.new == 'Rides vs Days':
|
159 |
-
panel_layout[1] = pn.pane.HoloViews(uber_rides_graph)
|
160 |
-
elif event.new == '15 min of Uber':
|
161 |
-
panel_layout[1] = interactive_df_pane
|
162 |
-
elif event.new == 'Total Rides per Day':
|
163 |
-
panel_layout[1] = total_rides_graph
|
164 |
-
elif event.new == 'Avg Rides per Day':
|
165 |
-
panel_layout[1] = avg_rides_panel
|
166 |
-
elif event.new == 'Top Rides City':
|
167 |
-
panel_layout[1] = plot_top_rides_city
|
168 |
-
elif event.new == 'Predicting Distance':
|
169 |
-
panel_layout[1] = fig_panel
|
170 |
-
|
171 |
-
yaxis_radio.param.watch(update_chart, 'value')
|
172 |
-
panel_layout.append
|
173 |
-
|
174 |
-
# Display the Panel layout
|
175 |
-
panel_layout
|
176 |
-
import panel as pn
|
177 |
-
pn.extension() # Add this line to load the Panel extension
|
178 |
-
|
179 |
-
# Layout using Template
|
180 |
-
template = pn.template.FastListTemplate(
|
181 |
-
title='Uber Analysis Dashboard',
|
182 |
-
sidebar=[
|
183 |
-
pn.pane.PNG('Uber2.png', sizing_mode='scale_both'),
|
184 |
-
pn.pane.Markdown("# Key Performance Indicators (KPIs) of the EDA"),
|
185 |
-
pn.pane.Markdown("1. Let us visualize the total uber rides per day in the month of July 2014"),
|
186 |
-
pn.pane.Markdown("2. Let us have a more closer look at it, say every 15 minutes from July 1 to July 31."),
|
187 |
-
pn.pane.Markdown("3. Grouping weekly_data by days to plot total rides per week in july 2014."),
|
188 |
-
pn.pane.Markdown("4. Finding average rides on any day."),
|
189 |
-
pn.pane.Markdown("5. Now, let's try visualizing the relationship between Base and total number of rides in July 2014"),
|
190 |
-
pn.pane.Markdown("6. The number of rides predicted to Metropolitan Museum (MM) and Empire State Building (ESB)")],
|
191 |
-
main = [pn.Row(pn.Column(panel_layout)),
|
192 |
-
pn.Row(pn.pane.Markdown("Designed and Developed with ❤️ by Chitranshu Nagdawane © 2023"))],
|
193 |
-
accent_base_color="#000000",
|
194 |
-
header_background="#000000"
|
195 |
-
)
|
196 |
-
|
197 |
-
template.servable()
|
198 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CohereForAI/pokemon-cards-explorer/src/data_scraping.py
DELETED
@@ -1,103 +0,0 @@
|
|
1 |
-
import pandas as pd
|
2 |
-
from time import time, sleep
|
3 |
-
from tqdm import tqdm, trange
|
4 |
-
import requests
|
5 |
-
from bs4 import BeautifulSoup
|
6 |
-
|
7 |
-
url = "https://pokemondb.net/pokedex/all"
|
8 |
-
r = requests.get(url)
|
9 |
-
soup = BeautifulSoup(r.content, 'html5lib')
|
10 |
-
|
11 |
-
data = []
|
12 |
-
table_body = soup.find("tbody")
|
13 |
-
rows = table_body.find_all('tr')
|
14 |
-
for row in rows:
|
15 |
-
cols = row.find_all('td')
|
16 |
-
cols = [ele.text.strip() for ele in cols]
|
17 |
-
data.append([ele for ele in cols if ele])
|
18 |
-
|
19 |
-
|
20 |
-
urls = []
|
21 |
-
base_url = f"https://pokemondb.net/pokedex/"
|
22 |
-
for a in tqdm(data):
|
23 |
-
name = a[1]
|
24 |
-
name = name.lower().replace(" ", '-')
|
25 |
-
candidate_url = base_url + f"{name}"
|
26 |
-
r = requests.get(candidate_url)
|
27 |
-
if r.ok:
|
28 |
-
urls.append(candidate_url)
|
29 |
-
|
30 |
-
|
31 |
-
def get_pokedex_entries(url):
|
32 |
-
r = requests.get(url)
|
33 |
-
if not r.ok:
|
34 |
-
print("URL is not responding...")
|
35 |
-
return -1
|
36 |
-
soup = BeautifulSoup(r.content, 'html5lib')
|
37 |
-
|
38 |
-
pokedex_entries = soup.find_all("td", {"class" : "cell-med-text"})
|
39 |
-
pokedex_text = " ".join([entry.text for entry in pokedex_entries])
|
40 |
-
|
41 |
-
return pokedex_text
|
42 |
-
|
43 |
-
def get_pokemon_name(url):
|
44 |
-
r = requests.get(url)
|
45 |
-
if not r.ok:
|
46 |
-
print("URL is not responding...")
|
47 |
-
return -1
|
48 |
-
soup = BeautifulSoup(r.content, 'html5lib')
|
49 |
-
name = soup.find("h1").text
|
50 |
-
return name
|
51 |
-
|
52 |
-
def get_pokemon_intro(url):
|
53 |
-
r = requests.get(url)
|
54 |
-
if not r.ok:
|
55 |
-
print("URL is not responding...")
|
56 |
-
return -1
|
57 |
-
|
58 |
-
soup = BeautifulSoup(r.content, 'html5lib')
|
59 |
-
ps = soup.find_all("p")
|
60 |
-
texts = [p.text for p in ps]
|
61 |
-
i = texts.index("\n\n\n")
|
62 |
-
return " ".join(texts[:i])
|
63 |
-
|
64 |
-
def get_pokemon_image(url, name):
|
65 |
-
r = requests.get(url)
|
66 |
-
soup = BeautifulSoup(r.content, 'html5lib')
|
67 |
-
try:
|
68 |
-
img_url = soup.find_all("img", {"alt":f"{name} artwork by Ken Sugimori"})[0]['src']
|
69 |
-
except:
|
70 |
-
try:
|
71 |
-
img_url = soup.find_all("img", {"alt": f"{name}"})[0]['src']
|
72 |
-
except:
|
73 |
-
return -1
|
74 |
-
|
75 |
-
return img_url
|
76 |
-
|
77 |
-
|
78 |
-
p_names = []
|
79 |
-
pd_text = []
|
80 |
-
p_intros = []
|
81 |
-
p_images = []
|
82 |
-
|
83 |
-
for url in tqdm(urls):
|
84 |
-
name = get_pokemon_name(url)
|
85 |
-
p_names.append(name)
|
86 |
-
|
87 |
-
intro = get_pokemon_intro(url)
|
88 |
-
p_intros.append(intro)
|
89 |
-
|
90 |
-
img_url = get_pokemon_image(url, name)
|
91 |
-
p_images.append(img_url)
|
92 |
-
|
93 |
-
pokedex_entry = get_pokedex_entries(url)
|
94 |
-
pd_text.append(pokedex_entry)
|
95 |
-
|
96 |
-
sleep(1)
|
97 |
-
|
98 |
-
|
99 |
-
pd.DataFrame.from_dict({"name":p_names,
|
100 |
-
"intro_text":p_intros,
|
101 |
-
"img_url":p_images,
|
102 |
-
"pokedex_entry": pd_text})\
|
103 |
-
.to_json("./pokemondb_data.jsonl", lines=True, orient='records')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|