Commit
·
f8beb95
1
Parent(s):
03921cb
Update parquet files (step 30 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/101-5/gpt4free/g4f/Provider/Providers/Forefront.py +0 -30
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobephotoshopcs2paradoxkeygenindir12 A Complete Guide to Using Adobe Photoshop CS2 Keygen Paradox.md +0 -171
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Photoshop CS6 Nesabamedia A Risky and Illegal Way to Get Photoshop for Free.md +0 -38
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Virtual DJ 7 Cracked Version for Free and Unlock All the Features and Functions of This Software.md +0 -46
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/HACK Camtasia Studio 7 Serial Tips and Tricks to Make the Most of the Software.md +0 -196
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (ice age scrat no time for nuts 1080p) - See Scrat travel through time in search of his nut.md +0 -132
- spaces/1gistliPinn/ChatGPT4/Examples/Dos Vidas En Un Instante Descargar Espa Ol.md +0 -15
- spaces/1phancelerku/anime-remove-background/Download WhatsApp Business for Windows 10 (64-bit) and Boost Your Sales and Customer Satisfaction.md +0 -196
- spaces/1phancelerku/anime-remove-background/Download and Listen to Woodys Pull String Phrases The Best Toy Story Sound Effects.md +0 -153
- spaces/1phancelerku/anime-remove-background/Download the Thrilling Nollywood Tv Series - Jagaban ft. Selina Tested (Episode 6).md +0 -187
- spaces/1phancelerku/anime-remove-background/Free Fire Advance Server on iPhone How to Get Activation Code and Play.md +0 -136
- spaces/1toTree/lora_test/ppdiffusers/pipelines/alt_diffusion/modeling_roberta_series.py +0 -134
- spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/nets.py +0 -123
- spaces/AI-Zero-to-Hero/07-SL-Chatbot-Blenderbot/app.py +0 -28
- spaces/AIConsultant/MusicGen/audiocraft/utils/ui.py +0 -34
- spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/tokenize_caption.py +0 -86
- spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/word2vec/create_word_embedding.py +0 -67
- spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/tts/fs2_orig.py +0 -102
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb32-60e_deepfashion2_short_sleeved_shirt_256x192.py +0 -172
- spaces/Abhilashvj/planogram-compliance/CONTRIBUTING.md +0 -94
- spaces/AgentVerse/agentVerse/agentverse/environments/tasksolving_env/rules/base.py +0 -181
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/audio/Factory.d.ts +0 -6
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fixwidthbuttons/AddChildMethods.js +0 -46
- spaces/Amrrs/DragGan-Inversion/stylegan_human/dnnlib/tflib/ops/__init__.py +0 -9
- spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/style_module/style_module.py +0 -300
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/models/asymmetricautoencoderkl.md +0 -55
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +0 -319
- spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py +0 -5
- spaces/Andy1621/uniformer_image_detection/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco.py +0 -5
- spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/base_roi_head.py +0 -103
- spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r50-d8_769x769_80k_cityscapes.py +0 -9
- spaces/Andy1621/uniformer_image_segmentation/configs/mobilenet_v2/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes.py +0 -12
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/prompts.py +0 -51
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/corner_pool.py +0 -161
- spaces/AppleQAQ/anime-remove-background/app.py +0 -52
- spaces/Ariharasudhan/XAI_Class-Activation-Maps/README.md +0 -12
- spaces/Artples/llama-2-7b-chat/run-app.sh +0 -1
- spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/util/__init__.py +0 -1
- spaces/AxelBell/EasyOCR_text_recognition/data.py +0 -110
- spaces/Axolotlily/TextGen/app.py +0 -25
- spaces/Bart92/RVC_HF/demucs/pretrained.py +0 -107
- spaces/Bart92/RVC_HF/lib/uvr5_pack/lib_v5/spec_utils.py +0 -667
- spaces/Benson/text-generation/Examples/Amanda El Aventurero Descargar Apk 2023.md +0 -98
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/formatters/html.py +0 -991
- spaces/BigDL/bigdl_nano_demo/app.py +0 -192
- spaces/BigData-KSU/VQA-in-Medical-Imagery/CLIP/model.py +0 -461
- spaces/BlitzEsports/TextToImage/index.html +0 -77
- spaces/Brasd99/AnswerMate/app.py +0 -141
- spaces/CVPR/LIVE/pybind11/include/pybind11/complex.h +0 -65
- spaces/CVPR/WALT/mmdet/core/anchor/point_generator.py +0 -37
spaces/101-5/gpt4free/g4f/Provider/Providers/Forefront.py
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
-
import requests
|
4 |
-
from ...typing import sha256, Dict, get_type_hints
|
5 |
-
|
6 |
-
url = 'https://forefront.com'
|
7 |
-
model = ['gpt-3.5-turbo']
|
8 |
-
supports_stream = True
|
9 |
-
needs_auth = False
|
10 |
-
|
11 |
-
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
12 |
-
json_data = {
|
13 |
-
'text': messages[-1]['content'],
|
14 |
-
'action': 'noauth',
|
15 |
-
'id': '',
|
16 |
-
'parentId': '',
|
17 |
-
'workspaceId': '',
|
18 |
-
'messagePersona': '607e41fe-95be-497e-8e97-010a59b2e2c0',
|
19 |
-
'model': 'gpt-4',
|
20 |
-
'messages': messages[:-1] if len(messages) > 1 else [],
|
21 |
-
'internetMode': 'auto'
|
22 |
-
}
|
23 |
-
response = requests.post( 'https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat',
|
24 |
-
json=json_data, stream=True)
|
25 |
-
for token in response.iter_lines():
|
26 |
-
if b'delta' in token:
|
27 |
-
token = json.loads(token.decode().split('data: ')[1])['delta']
|
28 |
-
yield (token)
|
29 |
-
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
30 |
-
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobephotoshopcs2paradoxkeygenindir12 A Complete Guide to Using Adobe Photoshop CS2 Keygen Paradox.md
DELETED
@@ -1,171 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download and Activate Adobe Photoshop CS2 for Free</h1>
|
3 |
-
<p>Adobe Photoshop CS2 is one of the most popular and powerful photo editing software that can help you create stunning images and graphics. However, this software is not free and you need to pay a license fee to use it legally. But what if you want to use Adobe Photoshop CS2 for free without breaking the law? Is there a way to download and activate this software without paying anything?</p>
|
4 |
-
<p>In this article, we will show you how to download and activate Adobe Photoshop CS2 for free using a keygen. We will also explain what a keygen is, how it works, and what are the risks and disadvantages of using it. But before we get into that, let's first understand what Adobe Photoshop CS2 is and why you might want to use it.</p>
|
5 |
-
<h2>adobephotoshopcs2paradoxkeygenindir12</h2><br /><p><b><b>Download File</b> ✅ <a href="https://byltly.com/2uKwy3">https://byltly.com/2uKwy3</a></b></p><br /><br />
|
6 |
-
<h2>What is Adobe Photoshop CS2?</h2>
|
7 |
-
<p>Adobe Photoshop CS2 is the ninth version of Adobe Photoshop, which was released in 2005. It is a software that allows you to edit, enhance, and manipulate digital images and graphics. You can use it for various purposes, such as photo retouching, color correction, cropping, resizing, adding effects, filters, layers, masks, text, shapes, and more.</p>
|
8 |
-
<p>Adobe Photoshop CS2 also has some advanced features that make it stand out from other photo editing software. Some of these features are:</p>
|
9 |
-
<ul>
|
10 |
-
<li><strong>Camera Raw 3.0:</strong> This feature allows you to import and edit raw images from digital cameras without losing any quality or data. You can adjust the exposure, white balance, contrast, saturation, sharpness, noise reduction, and more.</li>
|
11 |
-
<li><strong>Smart Objects:</strong> This feature allows you to transform and warp raster and vector objects without losing their original quality or resolution. You can also apply filters and effects to smart objects non-destructively.</li>
|
12 |
-
<li><strong>Image Warp:</strong> This feature allows you to distort and reshape any image or layer using customizable warp presets or custom points. You can use it to create realistic or artistic effects.</li>
|
13 |
-
<li><strong>Vanishing Point:</strong> This feature allows you to clone, paint, and transform images in perspective using a grid that matches the perspective of your image. You can use it to create realistic scenes or compositions.</li>
|
14 |
-
<li><strong>Spot Healing Brush:</strong> This feature allows you to quickly remove blemishes, dust, scratches, and other imperfections from your images with a single click. It automatically samples the surrounding pixels and blends them seamlessly.</li>
|
15 |
-
</ul>
|
16 |
-
<p>These are just some of the features and benefits of Adobe Photoshop CS2 that make it a powerful and versatile photo editing software. However, as we mentioned earlier, this software is not free and you need to pay a license fee to use it legally.</p>
|
17 |
-
<p>adobe photoshop cs2 paradox keygen indir 12 download<br />
|
18 |
-
how to use adobe photoshop cs2 paradox keygen indir 12<br />
|
19 |
-
adobe photoshop cs2 paradox keygen indir 12 crack<br />
|
20 |
-
adobe photoshop cs2 paradox keygen indir 12 free<br />
|
21 |
-
adobe photoshop cs2 paradox keygen indir 12 full version<br />
|
22 |
-
adobe photoshop cs2 paradox keygen indir 12 serial number<br />
|
23 |
-
adobe photoshop cs2 paradox keygen indir 12 activation code<br />
|
24 |
-
adobe photoshop cs2 paradox keygen indir 12 tutorial<br />
|
25 |
-
adobe photoshop cs2 paradox keygen indir 12 rar<br />
|
26 |
-
adobe photoshop cs2 paradox keygen indir 12 zip<br />
|
27 |
-
adobe photoshop cs2 paradox keygen indir 12 online<br />
|
28 |
-
adobe photoshop cs2 paradox keygen indir 12 mac<br />
|
29 |
-
adobe photoshop cs2 paradox keygen indir 12 windows<br />
|
30 |
-
adobe photoshop cs2 paradox keygen indir 12 linux<br />
|
31 |
-
adobe photoshop cs2 paradox keygen indir 12 android<br />
|
32 |
-
adobe photoshop cs2 paradox keygen indir 12 ios<br />
|
33 |
-
adobe photoshop cs2 paradox keygen indir 12 review<br />
|
34 |
-
adobe photoshop cs2 paradox keygen indir 12 reddit<br />
|
35 |
-
adobe photoshop cs2 paradox keygen indir 12 youtube<br />
|
36 |
-
adobe photoshop cs2 paradox keygen indir 12 video<br />
|
37 |
-
adobe photoshop cs2 paradox keygen indir 12 guide<br />
|
38 |
-
adobe photoshop cs2 paradox keygen indir 12 tips<br />
|
39 |
-
adobe photoshop cs2 paradox keygen indir 12 tricks<br />
|
40 |
-
adobe photoshop cs2 paradox keygen indir 12 hacks<br />
|
41 |
-
adobe photoshop cs2 paradox keygen indir 12 cheats<br />
|
42 |
-
adobe photoshop cs2 paradox keygen indir 12 alternatives<br />
|
43 |
-
adobe photoshop cs2 paradox keygen indir 12 comparison<br />
|
44 |
-
adobe photoshop cs2 paradox keygen indir 12 vs<br />
|
45 |
-
adobe photoshop cs2 paradox keygen indir 12 features<br />
|
46 |
-
adobe photoshop cs2 paradox keygen indir 12 benefits<br />
|
47 |
-
adobe photoshop cs2 paradox keygen indir 12 pros and cons<br />
|
48 |
-
adobe photoshop cs2 paradox keygen indir 12 problems<br />
|
49 |
-
adobe photoshop cs2 paradox keygen indir 12 solutions<br />
|
50 |
-
adobe photoshop cs2 paradox keygen indir 12 fixes<br />
|
51 |
-
adobe photoshop cs2 paradox keygen indir 12 updates<br />
|
52 |
-
adobe photoshop cs2 paradox keygen indir 12 patches<br />
|
53 |
-
adobe photoshop cs2 paradox keygen indir 12 upgrades<br />
|
54 |
-
adobe photoshop cs2 paradox keygen indir 12 support<br />
|
55 |
-
adobe photoshop cs2 paradox keygen indir 12 customer service<br />
|
56 |
-
adobe photoshop cs2 paradox keygen indir 12 contact number<br />
|
57 |
-
adobe photoshop cs2 paradox keygen indir 12 email address<br />
|
58 |
-
adobe photoshop cs2 paradox keygen indir 12 website link<br />
|
59 |
-
adobe photoshop cs2 paradox keygen indir 12 blog post<br />
|
60 |
-
adobe photoshop cs2 paradox keygen indir 12 article title<br />
|
61 |
-
adobe photoshop cs2 paradox keygen indir 12 headline idea<br />
|
62 |
-
adobe photoshop cs2 paradox keygen indir 12 content idea<br />
|
63 |
-
adobe photoshop cs2 paradox keygen indir 12 niche topic<br />
|
64 |
-
adobe photoshop cs2 paradox keygen indir 12 keyword research tool</p>
|
65 |
-
<h2>How to Download Adobe Photoshop CS2 for Free?</h2>
|
66 |
-
<p>If you want to use Adobe Photoshop CS2 for free without paying anything, you might be wondering how to download it. The good news is that Adobe has made this software available for free download on its official website. However, there is a catch. This software is only intended for users who have purchased a valid license for Adobe Photoshop CS2 in the past and need to reinstall it on their computers. It is not meant for new users who have never bought this software before.</p>
|
67 |
-
<p>Therefore, if you download Adobe Photoshop CS2 from the official website without having a valid license, you will be violating the terms of service and the copyright laws. You will also need a serial number and an activation code to install and activate this software on your computer. These codes are not provided by Adobe on its website.</p>
|
68 |
-
<p>So how can you get these codes for free? This is where a keygen comes in handy.</p>
|
69 |
-
<h2>How to Activate Adobe Photoshop CS2 with a Keygen?</h2>
|
70 |
-
<h3>What is a Keygen?</h3>
|
71 |
-
<p>A keygen is a program that generates serial numbers and activation codes for various software products. It is usually created by hackers or crackers who want to bypass the security measures of the software developers. A keygen can help you activate a software product without paying anything or contacting the customer support.</p>
|
72 |
-
<p>However, using a keygen is illegal and unethical. It is considered as software piracy, which is a serious crime that can result in fines or imprisonment. Moreover, using a keygen can also expose your computer to malware infection and data theft.</p>
|
73 |
-
<h3>How to Use a Keygen to Generate a Serial Number and an Activation Code?</h3>
|
74 |
-
<p>If you still want to use a keygen to activate Adobe Photoshop CS2 for free despite knowing the risks and consequences, here are the steps you need to follow:</p>
|
75 |
-
<ol>
|
76 |
-
<li><strong>Download a keygen for Adobe Photoshop CS2:</strong> You can find many keygens for this software on various websites that offer cracked software or torrents. However, be careful as these websites may contain viruses or malware that can harm your computer or steal your data.</li>
|
77 |
-
<li><strong>Run the keygen program:</strong> After downloading the keygen file, extract it if it is compressed and run it on your computer. You may need to disable your antivirus or firewall temporarily as they may detect the keygen as malicious.</li>
|
78 |
-
<li><strong>Select "Photoshop CS2 9.0" from the application list:</strong> The keygen program may have options for different software products from Adobe or other companies. Make sure you select "Photoshop CS2 9.0" from the application list before generating any codes.</li>
|
79 |
-
<li><strong>Click "Generate" to get a serial number:</strong> The keygen program will generate a random serial number for Adobe Photoshop CS2 that you can use to install the software on your computer. Copy this serial number by selecting it with your mouse cursor and pressing Ctrl+C on your keyboard.</li>
|
80 |
-
<li><strong>Paste the serial number in the setup window of Adobe Photoshop CS2:</strong> Go back to the setup window of Adobe Photoshop CS2 that you downloaded from the official website earlier. Choose "I have a serial number" option and paste the serial number that you copied from the keygen program by pressing Ctrl+V on your keyboard.</li>
|
81 |
-
<li><strong>Complete the installation process:</strong> Follow the instructions on the screen to complete the installation process of Adobe Photoshop CS2 on your computer.</li>
|
82 |
-
<li><strong>Select "By telephone via" option when prompted for activation:</strong> After installing Adobe Photoshop CS2 on your computer Continuing the article: <li><strong>Select "By telephone via" option when prompted for activation:</strong> After installing Adobe Photoshop CS2 on your computer, you will be asked to activate it within 30 days. To do this, choose "By telephone via" option and click "Next". You will see an activation number on the screen.</li>
|
83 |
-
<li><strong>Copy the activation number and paste it in the keygen program:</strong> Go back to the keygen program that you ran earlier and select "Photoshop CS2 9.0" from the application list. Copy the activation number from the Adobe Photoshop CS2 window by selecting it with your mouse cursor and pressing Ctrl+C on your keyboard. Then paste it in the "Request Code" field of the keygen program by pressing Ctrl+V on your keyboard.</li>
|
84 |
-
<li><strong>Click "Generate" to get an authorization code:</strong> The keygen program will generate an authorization code for Adobe Photoshop CS2 based on the activation number that you entered. Copy this authorization code by selecting it with your mouse cursor and pressing Ctrl+C on your keyboard.</li>
|
85 |
-
<li><strong>Paste the authorization code in the Adobe Photoshop CS2 window:</strong> Go back to the Adobe Photoshop CS2 window and paste the authorization code in the "Authorization Code" field by pressing Ctrl+V on your keyboard. Then click "Activate".</li>
|
86 |
-
<li><strong>Enjoy using Adobe Photoshop CS2 for free:</strong> If everything goes well, you will see a message saying "Thank you" and confirming that your product has been activated successfully. You can now use Adobe Photoshop CS2 for free without any limitations.</li>
|
87 |
-
</ol>
|
88 |
-
<h2>Risks and Disadvantages of Using a Keygen for Adobe Photoshop CS2</h2>
|
89 |
-
<p>While using a keygen may seem like an easy and convenient way to get Adobe Photoshop CS2 for free, it is not without its drawbacks and dangers. Here are some of the risks and disadvantages of using a keygen for Adobe Photoshop CS2:</p>
|
90 |
-
<h3>Legal Issues and Penalties for Software Piracy</h3>
|
91 |
-
<p>Using a keygen to activate Adobe Photoshop CS2 is considered as software piracy, which is a form of intellectual property theft. Software piracy is illegal in most countries and can result in serious legal consequences, such as fines, lawsuits, or even imprisonment. According to the Business Software Alliance (BSA), software piracy costs the global economy more than $63 billion annually in lost revenue and damages.</p>
|
92 |
-
<p>Moreover, using a keygen to activate Adobe Photoshop CS2 is also unethical and unfair to the software developers who spend a lot of time, money, and effort to create quality products. By using a keygen, you are depriving them of their rightful income and recognition.</p>
|
93 |
-
<h3>Malware Infection and Data Theft from Keygens</h3>
|
94 |
-
<p>Another risk of using a keygen to activate Adobe Photoshop CS2 is that you may expose your computer to malware infection and data theft. Keygens are often created by hackers or crackers who may have malicious intentions. They may embed viruses, Trojans, worms, spyware, ransomware, or other malware into the keygen files or programs that can harm your computer or steal your data.</p>
|
95 |
-
<p>Some of the possible effects of malware infection and data theft from keygens are:</p>
|
96 |
-
<ul>
|
97 |
-
<li><strong>Your computer may slow down or crash frequently:</strong> Malware can consume a lot of your system resources or damage your files or programs, causing your computer to perform poorly or become unstable.</li>
|
98 |
-
<li><strong>Your personal information may be compromised:</strong> Malware can monitor your online activities or keystrokes, access your webcam or microphone, or scan your hard drive for sensitive information, such as passwords, bank accounts, credit cards, or identity documents. This information can then be sent to remote servers or hackers who can use it for identity theft, fraud, blackmail, or other crimes.</li>
|
99 |
-
<li><strong>Your files may be encrypted or deleted:</strong> Malware can encrypt or delete your files or folders, making them inaccessible or unusable. You may then be asked to pay a ransom to get them back or risk losing them forever.</li>
|
100 |
-
</ul>
|
101 |
-
<h3>Poor Performance and Functionality of Cracked Software</h3>
|
102 |
-
<p>A final disadvantage of using a keygen to activate Adobe Photoshop CS2 is that you may experience poor performance and functionality of the cracked software. Cracked software is software that has been modified or tampered with to bypass its security features or limitations. However, this may also affect its quality or compatibility with your system or other programs.</p>
|
103 |
-
<p>Some of the possible problems that you may encounter with cracked software are:</p>
|
104 |
-
<ul>
|
105 |
-
<li><strong>Your software may not work properly or at all:</strong> Cracked software may have bugs, errors, glitches, or missing features that can prevent it from working properly or at all. You may also not be able to update it to fix these issues or get new features.</li>
|
106 |
-
<li><strong>Your software may be incompatible with your system or other programs:</strong> Cracked software may not be compatible with your operating system or hardware specifications. It may also conflict with other programs that you have installed on your computer, causing them to malfunction or crash.</li>
|
107 |
-
<li><strong>Your software may be detected as illegal by the software developer:</strong> Cracked software may be detected as illegal by the software developer through online verification or registration processes. You may then be blocked from using it or reported to the authorities.</li>
|
108 |
-
</ul>
|
109 |
-
<h2>Conclusion</h2>
|
110 |
-
<p>In conclusion, using a keygen to download and activate Adobe Photoshop CS2 for free is not a good idea. It is illegal, unethical, risky, and disadvantageous. You may face legal issues and penalties for software piracy, malware infection and data theft from keygens, and poor performance and functionality of cracked software.</p>
|
111 |
-
<p>If you want to use Adobe Photoshop CS2 legally and safely, you should buy a valid license from Adobe or use its free alternatives. Some of the free alternatives to Adobe Photoshop CS2 are GIMP, Paint.NET Continuing the article: <h2>Free Alternatives to Adobe Photoshop CS2</h2>
|
112 |
-
<p>If you don't want to use a keygen to download and activate Adobe Photoshop CS2 for free, but you also don't want to pay for a license, you may consider using some of the free alternatives to this software. There are many free photo editing software that can offer similar or even better features and functionality than Adobe Photoshop CS2. Some of the best free alternatives to Adobe Photoshop CS2 are:</p>
|
113 |
-
<h3>GIMP</h3>
|
114 |
-
<p>GIMP (GNU Image Manipulation Program) is one of the most popular and powerful free photo editing software that can help you create stunning images and graphics. It has a very similar interface and features to Adobe Photoshop CS2, such as layers, masks, filters, effects, brushes, tools, and more. You can also import and edit PSD files with GIMP, although some advanced features may not be supported.</p>
|
115 |
-
<p>GIMP is also highly customizable and extensible, as you can add plugins and scripts to enhance its functionality further. GIMP is available for Windows, Mac, and Linux platforms, and supports a wide range of file formats.</p>
|
116 |
-
<h3>Pixlr Editor</h3>
|
117 |
-
<p>Pixlr Editor is a web-based photo editing software that can run on any browser that supports Flash. It has a very similar interface and features to Adobe Photoshop CS2, such as layers, blending modes, filters, adjustments, tools, and more. You can also import and edit PSD files with Pixlr Editor, although some advanced features may not be supported.</p>
|
118 |
-
<p>Pixlr Editor is very easy to use and convenient, as you don't need to download or install anything on your computer. You can also save your work online or export it to your local drive. Pixlr Editor is available for free for personal use.</p>
|
119 |
-
<h3>Paint.NET</h3>
|
120 |
-
<p>Paint.NET is a free photo editing software that was originally designed as a more advanced version of Microsoft Paint. It has a simple and intuitive interface that offers basic and advanced features, such as layers, effects, adjustments, tools, and more. You can also import and edit PSD files with Paint.NET, although some advanced features may not be supported.</p>
|
121 |
-
<p>Paint.NET is also very fast and lightweight, as it runs on the .NET Framework. You can also add plugins and extensions to enhance its functionality further. Paint.NET is available for Windows platforms only.</p>
|
122 |
-
<h3>Krita</h3>
|
123 |
-
<p>Krita is a free and open-source photo editing software that is mainly focused on digital painting and drawing. It has a rich set of features that can help you create amazing artworks, such as brushes, textures, patterns, gradients, tools, filters, effects, layers, masks, and more. You can also import and edit PSD files with Krita, although some advanced features may not be supported.</p>
|
124 |
-
<p>Krita is also very user-friendly and customizable, as you can adjust the interface and settings to suit your preferences and workflow. You can also add plugins and scripts to enhance its functionality further. Krita is available for Windows Continuing the article: <h3>Krita</h3>
|
125 |
-
<p>Krita is a free and open-source photo editing software that is mainly focused on digital painting and drawing. It has a rich set of features that can help you create amazing artworks, such as brushes, textures, patterns, gradients, tools, filters, effects, layers, masks, and more. You can also import and edit PSD files with Krita, although some advanced features may not be supported.</p>
|
126 |
-
<p>Krita is also very user-friendly and customizable, as you can adjust the interface and settings to suit your preferences and workflow. You can also add plugins and scripts to enhance its functionality further. Krita is available for Windows, Mac, Linux, Android, and ChromeOS platforms.</p>
|
127 |
-
<h2>Conclusion</h2>
|
128 |
-
<p>In conclusion, Adobe Photoshop CS2 is a powerful and versatile photo editing software that can help you create stunning images and graphics. However, it is not free and you need to pay a license fee to use it legally. If you want to use it for free without paying anything, you may consider using a keygen to download and activate it. However, this is illegal, unethical, risky, and disadvantageous. You may face legal issues and penalties for software piracy, malware infection and data theft from keygens, and poor performance and functionality of cracked software.</p>
|
129 |
-
<p>If you want to use a free photo editing software that can offer similar or even better features and functionality than Adobe Photoshop CS2, you may consider using some of the free alternatives to this software. Some of the best free alternatives to Adobe Photoshop CS2 are GIMP, Pixlr Editor, Paint.NET Continuing the article: <h3>Paint.NET</h3>
|
130 |
-
<p>Paint.NET is a free photo editing software that was originally designed as a more advanced version of Microsoft Paint. It has a simple and intuitive interface that offers basic and advanced features, such as layers, effects, adjustments, tools, and more. You can also import and edit PSD files with Paint.NET, although some advanced features may not be supported.</p>
|
131 |
-
<p>Paint.NET is also very fast and lightweight, as it runs on the .NET Framework. You can also add plugins and extensions to enhance its functionality further. Paint.NET is available for Windows platforms only.</p>
|
132 |
-
<h3>Krita</h3>
|
133 |
-
<p>Krita is a free and open-source photo editing software that is mainly focused on digital painting and drawing. It has a rich set of features that can help you create amazing artworks, such as brushes, textures, patterns, gradients, tools, filters, effects, layers, masks, and more. You can also import and edit PSD files with Krita, although some advanced features may not be supported.</p>
|
134 |
-
<p>Krita is also very user-friendly and customizable, as you can adjust the interface and settings to suit your preferences and workflow. You can also add plugins and scripts to enhance its functionality further. Krita is available for Windows, Mac, Linux, Android, and ChromeOS platforms.</p>
|
135 |
-
<h3>Photopea</h3>
|
136 |
-
<p>Photopea is a web-based photo editing software that can run on any browser that supports HTML5. It has a very similar interface and features to Adobe Photoshop CS2, such as layers, blending modes, filters, adjustments, tools, and more. You can also import and edit PSD files with Photopea, as well as other file formats such as Sketch or XCF.</p>
|
137 |
-
<p>Photopea is very easy to use and convenient, as you don't need to download or install anything on your computer. You can also save your work online or export it to your local drive. Photopea is available for free for personal use.</p>
|
138 |
-
<h2>Conclusion</h2>
|
139 |
-
<p>In conclusion, Adobe Photoshop CS2 is a powerful and versatile photo editing software that can help you create stunning images and graphics. However, it is not free and you need to pay a license fee to use it legally. If you want to use it for free without paying anything, you may consider using a keygen to download and activate it. However, this is illegal, unethical, risky, and disadvantageous. You may face legal issues and penalties for software piracy, malware infection and data theft from keygens, and poor performance and functionality of cracked software.</p>
|
140 |
-
<p>If you want to use a free photo editing software that can offer similar or even better features and functionality than Adobe Photoshop CS2 Continuing the article: <p>If you want to use a free photo editing software that can offer similar or even better features and functionality than Adobe Photoshop CS2, you may consider using some of the free alternatives to this software. Some of the best free alternatives to Adobe Photoshop CS2 are GIMP, Pixlr Editor, Paint.NET, Krita, and Photopea. These software are free, legal, safe, and effective. You can use them to edit, enhance, and manipulate your images and graphics without any limitations or risks.</p>
|
141 |
-
<h2>FAQs</h2>
|
142 |
-
<p>Here are some of the frequently asked questions about Adobe Photoshop CS2 and its free alternatives:</p>
|
143 |
-
<h3>Q: Is Adobe Photoshop CS2 still available?</h3>
|
144 |
-
<p>A: Adobe Photoshop CS2 is no longer available for purchase or download from Adobe. The only way to get it is to have a valid license from the past and download it from the official website. However, this is only intended for users who need to reinstall it on their computers. It is not meant for new users who have never bought this software before.</p>
|
145 |
-
<h3>Q: Is Adobe Photoshop CS2 compatible with Windows 10?</h3>
|
146 |
-
<p>A: Adobe Photoshop CS2 is not officially compatible with Windows 10. It may work on some systems, but it may also cause errors or crashes on others. It is recommended to use a newer version of Adobe Photoshop or a free alternative that is compatible with Windows 10.</p>
|
147 |
-
<h3>Q: What are the system requirements for Adobe Photoshop CS2?</h3>
|
148 |
-
<p>A: The minimum system requirements for Adobe Photoshop CS2 are:</p>
|
149 |
-
<ul>
|
150 |
-
<li>Intel Pentium III or 4 processor</li>
|
151 |
-
<li>Windows 2000/XP</li>
|
152 |
-
<li>320 MB of RAM (384 MB recommended)</li>
|
153 |
-
<li>650 MB of available hard-disk space</li>
|
154 |
-
<li>1024 x 768 monitor resolution with 16-bit video card</li>
|
155 |
-
<li>CD-ROM drive</li>
|
156 |
-
<li>Internet or phone connection required for product activation</li>
|
157 |
-
</ul>
|
158 |
-
<h3>Q: What are the advantages of using a keygen for Adobe Photoshop CS2?</h3>
|
159 |
-
<p>A: The only advantage of using a keygen for Adobe Photoshop CS2 is that you can download and activate this software for free without paying anything. However, this is illegal, unethical, risky, and disadvantageous. You may face legal issues and penalties for software piracy, malware infection and data theft from keygens, and poor performance and functionality of cracked software.</p>
|
160 |
-
<h3>Q: What are the disadvantages of using a keygen for Adobe Photoshop CS2?</h3>
|
161 |
-
<p>A: The disadvantages of using a keygen for Adobe Photoshop CS2 are:</p>
|
162 |
-
<ul>
|
163 |
-
<li>You will be violating the terms of service and the copyright laws of Adobe and the software developers.</li>
|
164 |
-
<li>You will be depriving the software developers of their rightful income and recognition.</li>
|
165 |
-
<li>You will be exposing your computer to malware infection and data theft from keygens.</li>
|
166 |
-
<li>You will be experiencing poor performance and functionality of cracked software.</li>
|
167 |
-
<li>You will not be able to update or get support for your software.</li>
|
168 |
-
</ul>
|
169 |
-
</p> 0a6ba089eb<br />
|
170 |
-
<br />
|
171 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Photoshop CS6 Nesabamedia A Risky and Illegal Way to Get Photoshop for Free.md
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download Photoshop CS6 Nesabamedia: A Guide for Beginners</h1>
|
3 |
-
<p>Photoshop CS6 is one of the most popular and powerful photo editing software in the world. It offers a range of features and tools that can help you create stunning images and graphics. However, Photoshop CS6 is not free and requires a license to use. If you want to download Photoshop CS6 Nesabamedia, a cracked version of the software, you need to be careful and follow some steps.</p>
|
4 |
-
<h2>download photoshop cs6 nesabamedia</h2><br /><p><b><b>Download</b> ☆ <a href="https://byltly.com/2uKzWm">https://byltly.com/2uKzWm</a></b></p><br /><br />
|
5 |
-
<p>In this article, we will show you how to download Photoshop CS6 Nesabamedia safely and legally. We will also explain the risks and disadvantages of using a cracked version of Photoshop CS6. Finally, we will suggest some alternatives to Photoshop CS6 that are free and easy to use.</p>
|
6 |
-
|
7 |
-
<h2>How to Download Photoshop CS6 Nesabamedia Safely and Legally</h2>
|
8 |
-
<p>Photoshop CS6 Nesabamedia is a modified version of Photoshop CS6 that bypasses the activation process and allows you to use the software without a license. However, downloading and using Photoshop CS6 Nesabamedia is illegal and can expose you to various risks. For example, you may encounter malware, viruses, spyware, or ransomware that can harm your computer or steal your personal information. You may also face legal consequences such as fines or lawsuits from Adobe, the owner of Photoshop CS6.</p>
|
9 |
-
<p>Therefore, we do not recommend downloading or using Photoshop CS6 Nesabamedia. Instead, we suggest you download Photoshop CS6 legally from Adobe's official website. You can get a free trial of Photoshop CS6 for 7 days and then decide whether you want to buy a subscription or not. You can also get a discounted price if you are a student or a teacher.</p>
|
10 |
-
<p>To download Photoshop CS6 legally from Adobe's website, follow these steps:</p>
|
11 |
-
<ol>
|
12 |
-
<li>Go to <a href="https://www.adobe.com/products/photoshop/free-trial-download.html">https://www.adobe.com/products/photoshop/free-trial-download.html</a> and click on "Download now".</li>
|
13 |
-
<li>Sign in with your Adobe ID or create one if you don't have one.</li>
|
14 |
-
<li>Choose your operating system (Windows or Mac) and language.</li>
|
15 |
-
<li>Follow the instructions on the screen to install Photoshop CS6 on your computer.</li>
|
16 |
-
<li>Launch Photoshop CS6 and enjoy your free trial for 7 days.</li>
|
17 |
-
</ol>
|
18 |
-
|
19 |
-
<h2>The Risks and Disadvantages of Using Photoshop CS6 Nesabamedia</h2>
|
20 |
-
<p>As we mentioned earlier, using Photoshop CS6 Nesabamedia is illegal and risky. Here are some of the main drawbacks of using a cracked version of Photoshop CS6:</p>
|
21 |
-
<p></p>
|
22 |
-
<ul>
|
23 |
-
<li>You may get infected with malware, viruses, spyware, or ransomware that can damage your computer or compromise your security.</li>
|
24 |
-
<li>You may not be able to access all the features and updates of Photoshop CS6 as Adobe may block or disable them.</li>
|
25 |
-
<li>You may experience bugs, errors, crashes, or poor performance as Photoshop CS6 Nesabamedia may not be compatible with your system or other software.</li>
|
26 |
-
<li>You may lose your work or data as Photoshop CS6 Nesabamedia may not have a reliable backup or recovery system.</li>
|
27 |
-
<li>You may violate the intellectual property rights of Adobe and face legal actions such as fines or lawsuits.</li>
|
28 |
-
<li>You may miss out on the benefits of being an Adobe customer such as customer support, tutorials, community forums, cloud storage, etc.</li>
|
29 |
-
</ul>
|
30 |
-
|
31 |
-
<h2>The Alternatives to Photoshop CS6 That Are Free and Easy to Use</h2>
|
32 |
-
<p>If you don't want to pay for Photoshop CS6 or use a cracked version of it, you can try some alternatives that are free and easy to use. Here are some of the best ones:</p>
|
33 |
-
<ul>
|
34 |
-
<li><a href="https://www.gimp.org/">GIMP</a>: GIMP is an open-source image editor that offers many features similar to Photoshop such as layers, masks, filters, brushes, etc. It is compatible with Windows, Mac, and Linux.</li>
|
35 |
-
<li><a href="https://pixlr.com/">Pixlr</a>: Pixlr is an online photo editor that allows you to edit your images in your browser without downloading anything. It has a simple interface and a variety of tools such as crop, resize, rotate, adjust colors, add effects, etc.</li>
|
36 |
-
<li><a href="https://www.photopea.com/">Photopea</p> ddb901b051<br />
|
37 |
-
<br />
|
38 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Virtual DJ 7 Cracked Version for Free and Unlock All the Features and Functions of This Software.md
DELETED
@@ -1,46 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Virtual DJ 7 Cracked Version for Free</h1>
|
3 |
-
<p>If you are looking for a way to mix music on your computer without spending a fortune, you might want to try Virtual DJ 7. This software is one of the most popular and versatile DJing tools that allows you to create professional-quality mixes and mashups with ease. However, if you want to use all the features and functions of Virtual DJ 7, you will need to purchase the pro or business version, which can be quite expensive. Fortunately, there is a way to download Virtual DJ 7 cracked version for free and enjoy all the benefits of this software without paying a dime.</p>
|
4 |
-
<h2>download virtual dj 7 cracked version</h2><br /><p><b><b>Download</b> »»» <a href="https://byltly.com/2uKyy1">https://byltly.com/2uKyy1</a></b></p><br /><br />
|
5 |
-
<p>In this article, we will show you how to download Virtual DJ 7 cracked version for free and how to install it on your computer. We will also explain some of the advantages and disadvantages of using a cracked version of Virtual DJ 7, as well as some tips and tricks to make the most out of it. But before we get into that, let's see what Virtual DJ 7 can do for you.</p>
|
6 |
-
<h2>What is Virtual DJ 7?</h2>
|
7 |
-
<p>Virtual DJ 7 is a software developed by Atomix Productions that allows you to mix music from your computer using various effects, filters, loops, samples, and more. You can use it with or without any hardware connected to your computer, such as a DJ controller, a mixer, or a turntable. You can also use it to create visuals that match with the beats that you are dropping, as well as stream your mixes live to various platforms like Facebook and YouTube.</p>
|
8 |
-
<p>Virtual DJ 7 has been around for over 20 years and has been updated regularly with new features and improvements. Some of the features that Virtual DJ 7 offers are:</p>
|
9 |
-
<p></p>
|
10 |
-
<ul>
|
11 |
-
<li>Standard sound music control</li>
|
12 |
-
<li>Selective control (from -34 to +34%)</li>
|
13 |
-
<li>Three-band equalizer with gain + kill</li>
|
14 |
-
<li>One click for beat new fame algorithm</li>
|
15 |
-
<li>Automatic beating machine</li>
|
16 |
-
<li>On-the-fly technology without the need to save or any formatting</li>
|
17 |
-
<li>Automatic counting and leveling</li>
|
18 |
-
<li>Stomp and throw beat</li>
|
19 |
-
<li>Real Search Simulator</li>
|
20 |
-
<li>Automatic rotation of the beats</li>
|
21 |
-
<li>12 point sync</li>
|
22 |
-
<li>Applying the actual sound algorithm</li>
|
23 |
-
<li>Fast performance with optimized</li>
|
24 |
-
<li>Simplified interface and user friendly</li>
|
25 |
-
</ul>
|
26 |
-
<p>Virtual DJ 7 is compatible with Windows XP, Vista, 7, 8, and 10. It also supports over 300 DJ controllers from various brands like Pioneer, Numark, Reloop, Denon, Hercules, Rane, and more. You can also sync it with your Android or IOS devices using the VirtualDJ Remote app.</p>
|
27 |
-
<h2>How to Download Virtual DJ 7 Cracked Version for Free?</h2>
|
28 |
-
<p>If you want to download Virtual DJ 7 cracked version for free, you will need to follow these steps:</p>
|
29 |
-
<ol>
|
30 |
-
<li>Go to this link and click on the download button.</li>
|
31 |
-
<li>You will be redirected to a safe and fast downloader that will help you get the file.</li>
|
32 |
-
<li>Once the download is complete, extract the file using WinRAR or any other software that can handle ZIP files.</li>
|
33 |
-
<li>Turn off your internet connection and disable any antivirus or windows defender that might interfere with the installation.</li>
|
34 |
-
<li>Run the installation file and follow the instructions on the screen.</li>
|
35 |
-
<li>Open the block_host.cmd file as administrator and run it. This will prevent Virtual DJ 7 from connecting to the internet and verifying your license.</li>
|
36 |
-
<li>Run the VDJ 7 application and open keygen.exe.</li>
|
37 |
-
<li>Click on generate and copy the serial number that appears.</li>
|
38 |
-
<li>Paste the serial number in the registration window of Virtual DJ 7 and click on OK.</li>
|
39 |
-
<li>Congratulations! You have successfully installed Virtual DJ 7 cracked version for free.</li>
|
40 |
-
</ol>
|
41 |
-
|
42 |
-
<h2>What are the Pros and Cons of Using Virtual DJ 7 Cracked Version?</h2>
|
43 |
-
|
44 |
-
<p>While using Virtual DJ 7 cracked version for free might seem like a great deal, there are some advantages and disadvantages that</p> ddb901b051<br />
|
45 |
-
<br />
|
46 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/HACK Camtasia Studio 7 Serial Tips and Tricks to Make the Most of the Software.md
DELETED
@@ -1,196 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>HACK Camtasia Studio 7 Serial: How to Get It for Free</h1>
|
3 |
-
<p>Camtasia Studio 7 is one of the most popular and powerful screen recording and video editing software in the market. It allows you to create stunning videos for various purposes, such as tutorials, presentations, demos, and more. But how can you get it for free without paying the hefty price tag? In this article, we will show you how to hack Camtasia Studio 7 serial key and enjoy all its features without spending a dime.</p>
|
4 |
-
<h2>HACK Camtasia Studio 7 Serial</h2><br /><p><b><b>Download File</b> >> <a href="https://byltly.com/2uKvmJ">https://byltly.com/2uKvmJ</a></b></p><br /><br />
|
5 |
-
<h2>What is Camtasia Studio 7?</h2>
|
6 |
-
<p>Camtasia Studio 7 is a software product developed by TechSmith, a global leader in screen recording and screen capture. It was released in October 2010 and has since been updated with new features and improvements. Camtasia Studio 7 is compatible with Windows and Mac operating systems.</p>
|
7 |
-
<h3>Features and benefits of Camtasia Studio 7</h3>
|
8 |
-
<p>Camtasia Studio 7 has many features and benefits that make it stand out from other screen recording and video editing software. Some of them are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>It can record anything on your computer screen, including your webcam, microphone, system audio, cursor movements, keystrokes, and more.</li>
|
11 |
-
<li>It has a built-in video editor that lets you trim, cut, split, crop, rotate, zoom, pan, add transitions, effects, annotations, captions, quizzes, and more.</li>
|
12 |
-
<li>It can export your videos in various formats and resolutions, such as MP4, WMV, AVI, MOV, GIF, etc.</li>
|
13 |
-
<li>It can upload your videos directly to YouTube, Vimeo, Screencast.com, Google Drive, Dropbox, etc.</li>
|
14 |
-
<li>It can create interactive videos with clickable links, buttons, hotspots, etc.</li>
|
15 |
-
<li>It can import media files from your computer or other sources, such as PowerPoint slides, images, audio files, etc.</li>
|
16 |
-
<li>It has a library of royalty-free music tracks and sound effects that you can use in your videos.</li>
|
17 |
-
<li>It has a smart focus feature that automatically zooms in on the important parts of your screen recording.</li>
|
18 |
-
<li>It has a speech-to-text feature that automatically generates captions from your narration.</li>
|
19 |
-
<li>It has a green screen feature that lets you change the background of your video with any image or video.</li>
|
20 |
-
</ul>
|
21 |
-
<h3>How much does Camtasia Studio 7 cost?</h3>
|
22 |
-
<p>The official price of Camtasia Studio 7 is $299 for a single-user license. This license allows you to install the software on up to two computers. You can also get a volume discount if you buy multiple licenses at once. For example, if you buy 5 licenses, you can save $50 per license. If you buy 10 licenses, you can save $100 per license.</p>
|
23 |
-
<p>However, if you are an educator or a student, you can get a special academic discount of $179 for a single-user license. This license allows you to install the software on up to two computers as well. You just need to provide proof of your academic status when purchasing the software.</p>
|
24 |
-
<h2>Why do you need a serial key for Camtasia Studio 7?</h2>
|
25 |
-
<h3>Activation and registration process</h3>
|
26 |
-
<p>A serial key is a unique code that is required to activate and register your copy of Camtasia Studio 7. Without a valid serial key, you cannot use the software beyond the 30-day trial period. The trial version has all the features of the full version except for the ability to produce videos without a watermark.</p>
|
27 |
-
<p>How to hack Camtasia Studio 7 with serial key<br />
|
28 |
-
Camtasia Studio 7 crack download free<br />
|
29 |
-
Camtasia Studio 7 serial number generator online<br />
|
30 |
-
Hack Camtasia Studio 7 activation code<br />
|
31 |
-
Camtasia Studio 7 license key hack<br />
|
32 |
-
Camtasia Studio 7 full version crack<br />
|
33 |
-
Camtasia Studio 7 keygen hack<br />
|
34 |
-
Camtasia Studio 7 patch hack<br />
|
35 |
-
Camtasia Studio 7 registration code hack<br />
|
36 |
-
Camtasia Studio 7 product key hack<br />
|
37 |
-
Hack Camtasia Studio 7 for free<br />
|
38 |
-
Camtasia Studio 7 hacked version download<br />
|
39 |
-
Camtasia Studio 7 crack file download<br />
|
40 |
-
Hack Camtasia Studio 7 without survey<br />
|
41 |
-
Camtasia Studio 7 serial key hack tool<br />
|
42 |
-
Camtasia Studio 7 crack software download<br />
|
43 |
-
Hack Camtasia Studio 7 offline activation<br />
|
44 |
-
Camtasia Studio 7 hack no virus<br />
|
45 |
-
Camtasia Studio 7 crack tutorial<br />
|
46 |
-
Hack Camtasia Studio 7 latest version<br />
|
47 |
-
Camtasia Studio 7 crack windows 10<br />
|
48 |
-
Hack Camtasia Studio 7 mac os<br />
|
49 |
-
Camtasia Studio 7 crack reddit<br />
|
50 |
-
Hack Camtasia Studio 7 youtube<br />
|
51 |
-
Camtasia Studio 7 crack blogspot<br />
|
52 |
-
Hack Camtasia Studio 7 quora<br />
|
53 |
-
Camtasia Studio 7 crack forum<br />
|
54 |
-
Hack Camtasia Studio 7 facebook<br />
|
55 |
-
Camtasia Studio 7 crack twitter<br />
|
56 |
-
Hack Camtasia Studio 7 instagram<br />
|
57 |
-
Camtasia Studio 7 crack pinterest<br />
|
58 |
-
Hack Camtasia Studio 7 telegram<br />
|
59 |
-
Camtasia Studio 7 crack whatsapp<br />
|
60 |
-
Hack Camtasia Studio 7 discord<br />
|
61 |
-
Camtasia Studio 7 crack email<br />
|
62 |
-
Hack Camtasia Studio 7 phone number<br />
|
63 |
-
Camtasia Studio 7 crack sms<br />
|
64 |
-
Hack Camtasia Studio 7 voice call<br />
|
65 |
-
Camtasia Studio 7 crack video call<br />
|
66 |
-
Hack Camtasia Studio 7 webcam<br />
|
67 |
-
Camtasia Studio 7 crack screen recorder<br />
|
68 |
-
Hack Camtasia Studio 7 screen capture<br />
|
69 |
-
Camtasia Studio 7 crack video editor<br />
|
70 |
-
Hack Camtasia Studio 7 video maker<br />
|
71 |
-
Camtasia Studio 7 crack audio editor<br />
|
72 |
-
Hack Camtasia Studio 7 audio recorder<br />
|
73 |
-
Camtasia Studio 7 crack animation maker<br />
|
74 |
-
Hack Camtasia Studio 7 animation editor<br />
|
75 |
-
Camtasia Studio 7 crack gif maker </p>
|
76 |
-
<p>To activate and register your copy of Camtasia Studio 7, you need to follow these steps:</p>
|
77 |
-
<ol>
|
78 |
-
<li>Download and install the software from the official website or from a CD/DVD.</li>
|
79 |
-
<li>Launch the software and enter your name and email address.</li>
|
80 |
-
<li>Enter your serial key when prompted. You can find your serial key in your order confirmation email or on the back of your CD/DVD case.</li>
|
81 |
-
<li>Click on Activate.</li>
|
82 |
-
<li>You will receive a confirmation message that your software has been activated and registered successfully.</li>
|
83 |
-
</ol>
|
84 |
-
<h3>Benefits of having a valid serial key</h3>
|
85 |
-
<p>Having a valid serial key for Camtasia Studio 7 has many benefits. Some of them are:</p>
|
86 |
-
<ul>
|
87 |
-
<li>You can use the software without any limitations or restrictions.</li>
|
88 |
-
<li>You can get free updates and bug fixes for the software.</li>
|
89 |
-
<li>You can get technical support from TechSmith via phone or email.</li>
|
90 |
-
<li>You can access online resources such as tutorials, forums, blogs, etc.</li>
|
91 |
-
<li>You can join the TechSmith community and share your feedback and ideas with other users.</li>
|
92 |
-
</ul>
|
93 |
-
<h3>Risks of using a hacked serial key</h3>
|
94 |
-
<p>Using a hacked serial key for Camtasia Studio 7 may seem tempting but it also comes with many risks. Some of them are:</p>
|
95 |
-
<ul>
|
96 |
-
<li>You may violate the terms and conditions of TechSmith and face legal consequences.</li>
|
97 |
-
<li>You may expose your computer to viruses, malware, spyware, etc. that may harm your system or steal your personal information.</li>
|
98 |
-
<li>You may experience errors, crashes, glitches, or poor performance with the software.</li>
|
99 |
-
<li>You may not be able to update or upgrade the software or access its online features.</li>
|
100 |
-
<li>You may not be able to get technical support or customer service from TechSmith.</li>
|
101 |
-
<li>You may lose your credibility and reputation as a professional or an educator.</li>
|
102 |
-
</ul>
|
103 |
-
<h2>How to hack Camtasia Studio 7 serial key?</h2>
|
104 |
-
<p>If you still want to hack Camtasia Studio 7 serial key despite knowing the risks involved, there are two common methods that people use: using a keygen program or using a crack file. However, we do not recommend or endorse these methods as they are illegal, unethical, and unsafe. Use them at your own risk!</p>
|
105 |
-
<h3>Method 1: Use a keygen program</h3>
|
106 |
-
<p>A keygen program is a software tool that generates random serial keys for various software products, including Camtasia Studio 7. You can download such programs from various websites, but be careful as they may contain viruses or malware. Also, make sure that you disable your antivirus or firewall before running them as they may be detected as threats by your security software.</p>
|
107 |
-
<h4>Steps to use a keygen program</h4>
|
108 |
-
<ol>
|
109 |
-
<li>Download and install Camtasia Studio 7 from the official website or from a CD/DVD. </li>
|
110 |
-
<li>Download and run a keygen program for Camtasia Studio 7 from any website. </li>
|
111 |
-
<li>Select Camtasia Studio 7 from the list of products. </li>
|
112 |
-
<li>Click on Generate. </li>
|
113 |
-
<li>A random serial key will be displayed. Copy it. </li>
|
114 |
-
<li>Launch Camtasia Studio 7 and enter your name and email address. </li>
|
115 |
-
<li>Paste the serial key when prompted. Click on Activate. </li>
|
116 |
-
<li>If the activation is successful, you will see a confirmation message. If not, try another serial key until you find one that works. </li>
|
117 |
-
</ol>
|
118 |
-
<h4>Pros and cons of using a keygen program</h4>
|
119 |
-
<p>The pros of using a keygen program are:</p>
|
120 |
-
<ul>
|
121 |
-
<li>You can get unlimited serial keys for free. </li>
|
122 |
-
<li>You can activate the software without any limitations or restrictions.</li>
|
123 |
-
<li>You can bypass the activation and registration process.</li>
|
124 |
-
</ul>
|
125 |
-
<p>The cons of using a keygen program are:</p>
|
126 |
-
<ul>
|
127 |
-
<li>You may violate the terms and conditions of TechSmith and face legal consequences.</li>
|
128 |
-
<li>You may expose your computer to viruses, malware, spyware, etc. that may harm your system or steal your personal information.</li>
|
129 |
-
<li>You may experience errors, crashes, glitches, or poor performance with the software.</li>
|
130 |
-
<li>You may not be able to update or upgrade the software or access its online features.</li>
|
131 |
-
<li>You may not be able to get technical support or customer service from TechSmith.</li>
|
132 |
-
<li>You may lose your credibility and reputation as a professional or an educator.</li>
|
133 |
-
</ul>
|
134 |
-
<h3>Method 2: Use a crack file</h3>
|
135 |
-
<p>A crack file is a modified version of the original executable file of Camtasia Studio 7. It is designed to bypass the activation and registration process and make the software think that it is already registered. You can download such files from various websites, but be careful as they may contain viruses or malware. Also, make sure that you backup your original executable file before replacing it with the crack file.</p>
|
136 |
-
<h4>Steps to use a crack file</h4>
|
137 |
-
<ol>
|
138 |
-
<li>Download and install Camtasia Studio 7 from the official website or from a CD/DVD.</li>
|
139 |
-
<li>Download and extract a crack file for Camtasia Studio 7 from any website.</li>
|
140 |
-
<li>Locate the original executable file of Camtasia Studio 7 on your computer. It is usually found in C:\Program Files\TechSmith\Camtasia Studio 7\ folder.</li>
|
141 |
-
<li>Rename the original executable file to something else, such as Camtasia.exe.bak.</li>
|
142 |
-
<li>Copy and paste the crack file to the same folder where the original executable file is located. Make sure that the crack file has the same name as the original executable file, such as Camtasia.exe.</li>
|
143 |
-
<li>Launch Camtasia Studio 7 and enjoy using it without any activation or registration.</li>
|
144 |
-
</ol>
|
145 |
-
<h4>Pros and cons of using a crack file</h4>
|
146 |
-
<p>The pros of using a crack file are:</p>
|
147 |
-
<ul>
|
148 |
-
<li>You can use the software without any limitations or restrictions.</li>
|
149 |
-
<li>You can bypass the activation and registration process.</li>
|
150 |
-
</ul>
|
151 |
-
<p>The cons of using a crack file are:</p>
|
152 |
-
<ul>
|
153 |
-
<li>You may violate the terms and conditions of TechSmith and face legal consequences.</li>
|
154 |
-
<li>You may expose your computer to viruses, malware, spyware, etc. that may harm your system or steal your personal information.</li>
|
155 |
-
<li>You may experience errors, crashes, glitches, or poor performance with the software.</li>
|
156 |
-
<li>You may not be able to update or upgrade the software or access its online features.</li>
|
157 |
-
<li>You may not be able to get technical support or customer service from TechSmith.</li>
|
158 |
-
<li>You may lose your credibility and reputation as a professional or an educator.</li>
|
159 |
-
</ul>
|
160 |
-
<h2>Conclusion</h2>
|
161 |
-
<p>In this article, we have shown you how to hack Camtasia Studio 7 serial key and get it for free. We have explained what Camtasia Studio 7 is, what its features and benefits are, how much it costs, why you need a serial key for it, and how to hack it using two methods: using a keygen program or using a crack file. However, we have also warned you about the risks and consequences of using these methods as they are illegal, unethical, and unsafe. Therefore, we do not recommend or endorse these methods at all. Use them at your own risk!</p>
|
162 |
-
<p>If you want to use Camtasia Studio 7 legally and safely, we suggest that you buy a valid serial key from TechSmith's official website or from an authorized reseller. You can also get a special academic discount if you are an educator or a student. By doing so, you will not only support the developers of this amazing software but also enjoy its full features and benefits without any hassle or worry. You will also get free updates, technical support, online resources, and access to the TechSmith community. You will also enhance your credibility and reputation as a professional or an educator.</p>
|
163 |
-
<p>So what are you waiting for? Get your serial key for Camtasia Studio 7 today and start creating amazing videos with ease!</p>
|
164 |
-
<h3>Summary of the main points</h3>
|
165 |
-
<ul>
|
166 |
-
<li>Camtasia Studio 7 is a powerful screen recording and video editing software that allows you to create stunning videos for various purposes.</li>
|
167 |
-
<li>The official price of Camtasia Studio 7 is $299 for a single-user license. Educators and students can get a special academic discount of $179 for a single-user license.</li>
|
168 |
-
<li>A serial key is required to activate and register Camtasia Studio 7. Without a valid serial key, you cannot use the software beyond the 30-day trial period.</li>
|
169 |
-
<li>There are two common methods to hack Camtasia Studio 7 serial key: using a keygen program or using a crack file. However, these methods are illegal, unethical, and unsafe. They may expose your computer to viruses or malware, violate TechSmith's terms and conditions, cause errors or crashes with the software, prevent you from updating or upgrading the software or accessing its online features, deprive you of technical support or customer service from TechSmith, and damage your credibility and reputation as a professional or an educator.</li>
|
170 |
-
</ul>
|
171 |
-
<h3>Call to action</h3>
|
172 |
-
<p>If you want to use Camtasia Studio 7 legally and safely, we urge you to buy a valid serial key from TechSmith's official website or from an authorized reseller. You can also get a special academic discount if you are an educator or a student. By doing so, you will support the developers of this amazing software and enjoy its full features and benefits without any hassle or worry. You will also get free updates, technical support, online resources, and access to the TechSmith community. You will also enhance your credibility and reputation as a professional or an educator. So what are you waiting for? Get your serial key for Camtasia Studio 7 today and start creating amazing videos with ease!</p>
|
173 |
-
<h2>Frequently Asked Questions</h2>
|
174 |
-
<ol>
|
175 |
-
<li><b>What is the difference between Camtasia Studio 7 and Camtasia 2023?</b></li>
|
176 |
-
<p>Camtasia Studio 7 is an older version of Camtasia that was released in 2010. Camtasia 2023 is the latest version of Camtasia that was released in 2023. Camtasia 2023 has more features and improvements than Camtasia Studio 7, such as new templates, themes, assets, tracks, animations, behaviors, transitions, effects, etc. Camtasia 2023 also has better performance and compatibility than Camtasia Studio 7. However, Camtasia Studio 7 still works well and has many loyal users who prefer its simplicity and familiarity.</p>
|
177 |
-
<li><b>Can I use both Camtasia Studio 7 and Camtasia 2023 on the same computer?</b></li>
|
178 |
-
<p>Yes, you can use both Camtasia Studio 7 and Camtasia 2023 on the same computer. They are separate products and do not interfere with each other. However, you cannot open or edit projects created in one version with another version. You need to convert them first using TechSmith's conversion tool.</p>
|
179 |
-
<li><b>How can I get technical support or customer service for Camtasia Studio 7?</b></li>
|
180 |
-
<p>If you have bought a valid serial key for Camtasia Studio 7 from TechSmith's official website or from an authorized reseller, you can get technical support or customer service from TechSmith via phone or email. You can also access online resources such as tutorials, forums, blogs, etc.. However, if you have hacked Camtasia Studio 7 serial key using a keygen program or a crack file, you will not be able to get any support or service from TechSmith. You will also risk losing your license or facing legal action from TechSmith.</p>
|
181 |
-
<li><b>Is there any alternative to hacking Camtasia Studio 7 serial key?</b></li>
|
182 |
-
<p>If you do not want to pay for Camtasia Studio 7 but still want to use it legally and safely, there are some alternatives that you can try. One alternative is to use TechSmith's free trial offer. You can download and install Camtasia Studio 7 for free and use it for up to 30 days without any limitations or restrictions. However, after the trial period expires, you will need to buy a valid serial key to continue using it. Another alternative is to use TechSmith's academic discount offer. If you are an educator or a student, you can buy a single-user license for Camtasia Studio 7 for only $ 179 for Camtasia Studio 7. You just need to provide proof of your academic status when purchasing the software. This license allows you to install the software on up to two computers as well. However, you will need to renew your license every year to keep using it.</p>
|
183 |
-
<li><b>What are some tips and tricks to create amazing videos with Camtasia Studio 7?</b></li>
|
184 |
-
<p>Camtasia Studio 7 is a powerful and versatile software that allows you to create amazing videos with ease. However, to make the most out of it, you need to know some tips and tricks that can enhance your video creation process. Here are some of them:</p>
|
185 |
-
<ul>
|
186 |
-
<li>Plan your video before you start recording. Think about the purpose, audience, message, and structure of your video. Write a script or an outline that covers the main points and details of your video. This will help you stay focused and organized while recording and editing.</li>
|
187 |
-
<li>Use a good microphone and a quiet environment for recording your narration. Make sure that your voice is clear, loud, and confident. Avoid background noises, echoes, or interruptions that may distract or annoy your viewers. You can also use a pop filter or a windscreen to reduce unwanted sounds.</li>
|
188 |
-
<li>Use the smart focus feature to automatically zoom in on the important parts of your screen recording. This will help you highlight the key information and actions that you want your viewers to see and follow. You can also adjust the smart focus settings or manually add or edit zoom points in the timeline.</li>
|
189 |
-
<li>Use transitions, effects, annotations, captions, quizzes, etc. to add interest and interactivity to your video. These elements can help you emphasize, explain, clarify, or reinforce your message. They can also make your video more engaging and interactive for your viewers. However, do not overuse them as they may clutter or distract from your content.</li>
|
190 |
-
<li>Use the library of royalty-free music tracks and sound effects to add mood and emotion to your video. These elements can help you create a professional and polished sound for your video. They can also make your video more appealing and memorable for your viewers. However, do not use music or sound effects that are too loud, repetitive, or irrelevant to your content.</li>
|
191 |
-
<li>Use the green screen feature to change the background of your video with any image or video. This feature can help you create a more realistic and immersive environment for your video. It can also make your video more creative and fun for your viewers. However, do not use a background that is too busy, distracting, or inappropriate for your content.</li>
|
192 |
-
</ul>
|
193 |
-
</ol>
|
194 |
-
</p> 0a6ba089eb<br />
|
195 |
-
<br />
|
196 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (ice age scrat no time for nuts 1080p) - See Scrat travel through time in search of his nut.md
DELETED
@@ -1,132 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>HD Online Player (Ice Age Scrat No Time for Nuts 1080p)</h1>
|
3 |
-
<p>If you are looking for a fun and hilarious way to spend seven minutes of your time, you should definitely watch <strong>Ice Age Scrat No Time for Nuts</strong>, a computer-animated short film from Blue Sky Studios, starring Scrat, the lovable squirrel from Ice Age. In this short film, Scrat accidentally travels through time in pursuit of his beloved acorn, causing chaos and comedy along the way.</p>
|
4 |
-
<h2>HD Online Player (ice age scrat no time for nuts 1080p)</h2><br /><p><b><b>Download</b> 🗸 <a href="https://byltly.com/2uKv4l">https://byltly.com/2uKv4l</a></b></p><br /><br />
|
5 |
-
<p>In this article, we will tell you everything you need to know about this short film, why it is worth watching, and how you can watch it online in HD quality. So sit back, relax, and get ready to laugh out loud with Scrat.</p>
|
6 |
-
<h2>What is Ice Age Scrat No Time for Nuts?</h2>
|
7 |
-
<p><strong>Ice Age Scrat No Time for Nuts</strong> is a short film that was released on November 21, 2006, on the DVD and Blu-ray release of <em>Ice Age: The Meltdown</em>, the second movie in the Ice Age franchise. It was directed by Chris Renaud and Mike Thurmeier, and features Chris Wedge as the voice of Scrat.</p>
|
8 |
-
<p>The short film follows Scrat on a chase after his nut, which has been accidentally sent forward in time by a frozen time machine that he dug up over an ice-encased skeletal body of a human time traveler. Scrat travels to various times and places in history, such as the Middle Ages, Ancient Rome, Titanic's sinking, World War I trench warfare, prehistoric Earth's asteroid impact event etc., trying to get back his nut while avoiding danger and disaster.</p>
|
9 |
-
<p>The short film was nominated for an Academy Award for Best Animated Short Film in 2007 (but lost to <em>The Danish Poet</em>), and also won an Annie Award for Best Animated Short Subject.</p>
|
10 |
-
<h3>Why is it worth watching?</h3>
|
11 |
-
<p>There are many reasons why <strong>Ice Age Scrat No Time for Nuts</strong> is a must-watch for anyone who loves animation, comedy, or Ice Age. Here are some of them:</p>
|
12 |
-
<ul>
|
13 |
-
<li><strong>The hilarious adventures of Scrat</strong>: Scrat is one of the most popular and beloved characters from Ice Age, thanks to his adorable appearance, expressive sounds, and relentless pursuit of his acorn. In this short film, he gets into even more trouble than usual as he travels through time and faces various challenges and enemies. Some of the scenes are so funny that you will laugh until you cry.</li>
|
14 |
-
<li><strong>The amazing animation and sound effects</strong>: The animation quality of this short film is superb, with smooth movements, realistic textures, vivid colors, and stunning details. The sound effects are also impressive, with crisp noises, clear voices (mostly Scrat's), and fitting music. You will feel like you are watching a mini-movie rather than a short film.</li>
|
15 |
-
<li><strong>The clever references and homages</strong>: The short film is full of references and homages to various historical and cultural events, such as King Arthur's sword Excalibur (which Scrat uses to free his acorn), Robin Hood's archers (who shoot at Scrat), Gladiator's Coliseum (where Scrat faces a lion), Titanic's iceberg (which Scrat causes), Charlie Chaplin's The Great Dictator (where Scrat mimics Hitler's speech), etc. These references add more humor and creativity to the short film.</li>
|
16 |
-
</ul>
|
17 |
-
<h2>How to watch Ice Age Scrat No Time for Nuts online?</h2>
|
18 |
-
<p>If you want to watch <strong>Ice Age Scrat No Time for Nuts</strong> online in HD quality, you have several options to choose from. Here are some of them:</p>
|
19 |
-
<p>Watch ice age scrat no time for nuts online in HD<br />
|
20 |
-
Ice age scrat no time for nuts 1080p streaming free<br />
|
21 |
-
How to download ice age scrat no time for nuts HD video<br />
|
22 |
-
Ice age scrat no time for nuts full movie online HD<br />
|
23 |
-
Best HD online player for ice age scrat no time for nuts<br />
|
24 |
-
Ice age scrat no time for nuts HD online player review<br />
|
25 |
-
Ice age scrat no time for nuts 1080p download link<br />
|
26 |
-
Where to watch ice age scrat no time for nuts online in HD<br />
|
27 |
-
Ice age scrat no time for nuts HD online player features<br />
|
28 |
-
Ice age scrat no time for nuts 1080p torrent magnet<br />
|
29 |
-
Ice age scrat no time for nuts online HD quality comparison<br />
|
30 |
-
Ice age scrat no time for nuts HD online player installation guide<br />
|
31 |
-
Ice age scrat no time for nuts 1080p subtitles download<br />
|
32 |
-
Ice age scrat no time for nuts online HD playback speed<br />
|
33 |
-
Ice age scrat no time for nuts HD online player troubleshooting<br />
|
34 |
-
Ice age scrat no time for nuts 1080p trailer watch online<br />
|
35 |
-
Ice age scrat no time for nuts online HD sound quality<br />
|
36 |
-
Ice age scrat no time for nuts HD online player compatibility<br />
|
37 |
-
Ice age scrat no time for nuts 1080p cast and crew<br />
|
38 |
-
Ice age scrat no time for nuts online HD screen size adjustment<br />
|
39 |
-
Ice age scrat no time for nuts HD online player update<br />
|
40 |
-
Ice age scrat no time for nuts 1080p behind the scenes<br />
|
41 |
-
Ice age scrat no time for nuts online HD buffering issues<br />
|
42 |
-
Ice age scrat no time for nuts HD online player alternatives<br />
|
43 |
-
Ice age scrat no time for nuts 1080p bonus features<br />
|
44 |
-
Ice age scrat no time for nuts online HD parental control<br />
|
45 |
-
Ice age scrat no time for nuts HD online player feedback<br />
|
46 |
-
Ice age scrat no time for nuts 1080p trivia and facts<br />
|
47 |
-
Ice age scrat no time for nuts online HD language options<br />
|
48 |
-
Ice age scrat no time for nuts HD online player ratings<br />
|
49 |
-
Ice age scrat no time for nuts 1080p awards and nominations<br />
|
50 |
-
Ice age scrat no time for nuts online HD genre and theme<br />
|
51 |
-
Ice age scrat no time for nuts HD online player security and privacy<br />
|
52 |
-
Ice age scrat no time for nuts 1080p release date and box office<br />
|
53 |
-
Ice age scrat no time for nuts online HD recommendations and suggestions<br />
|
54 |
-
Ice age scrat no time for nuts HD online player customer service and support<br />
|
55 |
-
Ice age scrat no time for nuts 1080p fan art and merchandise<br />
|
56 |
-
Ice age scrat no time for nuts online HD comments and reviews<br />
|
57 |
-
Ice age scrat no time for nuts HD online player FAQs and tips<br />
|
58 |
-
Ice age scrat no time for nuts 1080p soundtrack and score<br />
|
59 |
-
Ice age scrat no time for nuts online HD coupons and discounts<br />
|
60 |
-
Ice age scrat no time for nuts HD online player testimonials and success stories<br />
|
61 |
-
Ice age scrat no time for nuts 1080p making of and documentary<br />
|
62 |
-
Ice age scrat no time for nuts online HD blog and newsletter<br />
|
63 |
-
Ice age scrat no time for nuts HD online player affiliate program and commission<br />
|
64 |
-
Ice age scrat no time for nuts 1080p sequel and spin-off</p>
|
65 |
-
<h3>The official sources</h3>
|
66 |
-
<p>The easiest way to watch this short film online is to use one of the official sources where it is available. These include:</p>
|
67 |
-
<ul>
|
68 |
-
<li><strong>DVD or Blu-ray</strong>: You can buy or rent the DVD or Blu-ray release of <em>Ice Age: The Meltdown</em>, which includes this short film as a bonus feature. You can then play it on your DVD or Blu-ray player or computer.</li>
|
69 |
-
<li><strong>Streaming platforms</strong>: You can also stream this short film online on some streaming platforms that offer <em>Ice Age: The Meltdown</em>, such as Amazon Prime Video or iTunes. You will need to pay a fee or have a subscription to access these platforms.</li>
|
70 |
-
</ul>
|
71 |
-
<h3>The alternative sources</h3>
|
72 |
-
<p>If you don't want to use any of the official sources or pay any money to watch this short film online, you can also try some alternative sources that offer it for free or with ads. These include:</p>
|
73 |
-
<ul>
|
74 |
-
<li><strong>YouTube</strong>: You can find this short film on YouTube uploaded by various users or channels. However, be aware that some of these uploads may have low quality, incomplete content, or copyright issues.</li>
|
75 |
-
<li><strong>Dailymotion</strong>: You can also find this short film on Dailymotion uploaded by various users or channels. However, be aware that some of these uploads may have similar problems as YouTube.</li>
|
76 |
-
</ul>
|
77 |
-
<h3>The tips and tricks</h3>
|
78 |
-
<p>To enhance your online viewing experience of this short film, you can also use some tips and tricks that will help you avoid any issues or enjoy it more. These include:</p>
|
79 |
-
<ul>
|
80 |
-
<li><strong>Using a VPN</strong>: If you want to access any of the official or alternative sources that are not available in your region or country due to geo-restrictions or censorship laws (such as Netflix), you can use a VPN (virtual private network) service that will mask your IP address and location and allow you to bypass these barriers.</li>
|
81 |
-
<li><strong>Adjusting the settings</strong>: If you want to watch this short film in HD quality without any buffering or lagging issues due to your internet speed or connection stability (especially if you are using a free or ad-supported source), you can adjust the settings on your device or browser (such as resolution, quality level etc.) according to your preferences.</li>
|
82 |
-
<li><strong>Using headphones</strong>: If you want to enjoy this short film more fully without any distractions or noises from your surroundings (such as other people talking etc.), you can use headphones that will isolate you from external sounds and immerse you in the animation and sound effects.</li>
|
83 |
-
</ul>
|
84 |
-
from Ice Age?</h2>
|
85 |
-
<p>If you enjoyed watching <strong>Ice Age Scrat No Time for Nuts</strong>, you might also like some other related content from Ice Age that you can watch online. These include:</p>
|
86 |
-
<h3>The Ice Age movies</h3>
|
87 |
-
<p>The Ice Age movies are a series of five computer-animated comedy adventure films that follow the adventures of a group of prehistoric animals as they face various challenges and dangers in their changing world. The main characters are Manny, a woolly mammoth; Sid, a sloth; Diego, a saber-toothed tiger; and Scrat, a squirrel. The movies are:</p>
|
88 |
-
<ul>
|
89 |
-
<li><strong>Ice Age</strong> (2002): The first movie introduces the main characters and their journey to return a human baby to his tribe.</li>
|
90 |
-
<li><strong>Ice Age: The Meltdown</strong> (2006): The second movie shows the main characters escaping from a melting ice valley and meeting new friends and foes.</li>
|
91 |
-
<li><strong>Ice Age: Dawn of the Dinosaurs</strong> (2009): The third movie takes the main characters to a hidden world of dinosaurs and reveals Scrat's love interest, Scratte.</li>
|
92 |
-
<li><strong>Ice Age: Continental Drift</strong> (2012): The fourth movie separates the main characters on different continents due to the continental drift and introduces new characters such as pirates and sirens.</li>
|
93 |
-
<li><strong>Ice Age: Collision Course</strong> (2016): The fifth and final movie reunites the main characters as they try to stop a meteor from destroying the world and features Scrat's journey to outer space.</li>
|
94 |
-
</ul>
|
95 |
-
<h3>The Ice Age shorts</h3>
|
96 |
-
<p>The Ice Age shorts are a series of computer-animated short films that feature the characters from Ice Age in various scenarios and situations. Some of them are related to the movies, while others are standalone stories. The shorts are:</p>
|
97 |
-
<ul>
|
98 |
-
<li><strong>Gone Nutty</strong> (2002): The first short shows Scrat's attempt to store his acorn collection in a tree, which causes a chain reaction of disasters.</li>
|
99 |
-
<li><strong>No Time for Nuts</strong> (2006): The second short shows Scrat's accidental time travel in pursuit of his acorn, which causes chaos and comedy along the way.</li>
|
100 |
-
<li><strong>Surviving Sid</strong> (2008): The third short shows Sid's attempt to lead a group of young animals on a camping trip, which goes horribly wrong.</li>
|
101 |
-
<li><strong>Scrat's Continental Crack-Up</strong> (2010-2011): The fourth and fifth shorts show Scrat's role in causing the continental drift and his encounter with a female saber-toothed squirrel named Scratazon.</li>
|
102 |
-
<li><strong>Cosmic Scrat-tastrophe</strong> (2015): The sixth short shows Scrat's adventure in outer space and his impact on the solar system.</li>
|
103 |
-
<li><strong>Scrat: Spaced Out</strong> (2016): The seventh short combines scenes from <em>Collision Course</em> and <em>Cosmic Scrat-tastrophe</em> and shows Scrat's battle with Scratazon over his acorn.</li>
|
104 |
-
<li><strong>Ice Age: A Mammoth Christmas</strong> (2011): The eighth short is a Christmas special that shows Manny's attempt to save Christmas after he accidentally destroys Santa's workshop.</li>
|
105 |
-
<li><strong>Ice Age: The Great Egg-Scapade</strong> (2016): The ninth short is an Easter special that shows Sid's attempt to run an egg-sitting service, which leads to a kidnapping plot by an evil pirate bunny.</li>
|
106 |
-
</ul>
|
107 |
-
<h3>The Ice Age games</h3>
|
108 |
-
<p>The Ice Age games are a series of video games that are based on or inspired by the Ice Age movies and shorts. Some of them are adaptations of the movies, while others are original stories or spin-offs. The games are available to play online or on various platforms such as PC, console, or mobile devices. Some of the games are:</p>
|
109 |
-
<ul>
|
110 |
-
<li><strong>Ice Age Village</strong>: A simulation game where you can create your own village of prehistoric animals and interact with them.</li>
|
111 |
-
<li><strong>Ice Age Adventures</strong>: An adventure game where you can explore different islands, rescue your friends, and collect items.</li>
|
112 |
-
<li><strong>Ice Age Avalanche</strong>: A puzzle game where you can match fruits and nuts to clear levels and unlock new characters and locations.</li>
|
113 |
-
<li><strong>Ice Age Arctic Blast</strong>: A puzzle game where you can blast ice cubes and create combos to clear levels and unlock new characters and locations.</li>
|
114 |
-
<li><strong>Ice Age Scrats Nutty Adventure</strong>: An action-adventure game where you can control Scrat and help him find his acorn in various environments.</li>
|
115 |
-
</ul>
|
116 |
-
<h1>Conclusion</h1>
|
117 |
-
<p>In conclusion, <strong>Ice Age Scrat No Time for Nuts</strong> is a hilarious and entertaining short film that you should not miss if you love animation, comedy, or Ice Age. It features Scrat, the lovable squirrel from Ice Age, who travels through time in pursuit of his acorn, causing chaos and comedy along the way. You can watch this short film online in HD quality using one of the official or alternative sources that we have listed above. You can also enjoy some other related content from Ice Age that we have recommended above. So what are you waiting for? Go ahead and watch <em>No Time for Nuts</em>, and have fun with Scrat!</p>
|
118 |
-
<h2>Frequently Asked Questions (FAQs)</h2>
|
119 |
-
<ul>
|
120 |
-
<li><b>Q: How long is Ice Age Scrat No Time for Nuts?</b></li>
|
121 |
-
<li>A: The original version of this short film is 7 minutes long, while the 4-D version is 12 minutes long.</li>
|
122 |
-
<li><b>Q: Who directed Ice Age Scrat No Time for Nuts?</b></li>
|
123 |
-
<li>A: This short film was directed by Chris Renaud and Mike Thurmeier.</li>
|
124 |
-
<li><b>Q: Who voiced Scrat in Ice Age Scrat No Time for Nuts?</b></li>
|
125 |
-
<li>A: Scrat was voiced by Chris Wedge, who is also one of the co-founders of Blue Sky Studios.</li>
|
126 |
-
<li><b>Q: What awards did Ice Age Scrat No Time for Nuts win?</b></li>
|
127 |
-
<li>A: This short film won an Annie Award for Best Animated Short Subject in 2007.</li>
|
128 |
-
<li><b>Q: Where can I find more information about Ice Age Scrat No Time for Nuts?</b></li>
|
129 |
-
<li>A: You can find more information about this short film on Wikipedia, IMDb, or Ice Age Wiki. </li>
|
130 |
-
</p> 0a6ba089eb<br />
|
131 |
-
<br />
|
132 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Dos Vidas En Un Instante Descargar Espa Ol.md
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Dos vidas en un instante: una comedia romántica sobre el azar y el destino</h1>
|
3 |
-
<p>Dos vidas en un instante es una pelÃcula de 1998 dirigida por Peter Howitt y protagonizada por Gwyneth Paltrow y John Hannah. La pelÃcula explora dos posibles escenarios de la vida de Helen Quilley, una ejecutiva de publicidad de Londres, que dependen de si pierde o no el metro el dÃa que es despedida de su trabajo y descubre la infidelidad de su novio.</p>
|
4 |
-
<p>En una versión, Helen logra subir al tren y llega a casa a tiempo para sorprender a su novio Gerry (John Lynch) con otra mujer. En la otra, Helen se queda en el andén y llega más tarde, cuando la amante ya se ha ido. A partir de ahÃ, las dos historias se desarrollan de forma paralela y divergente, mostrando cómo una pequeña circunstancia puede cambiar el rumbo de la vida.</p>
|
5 |
-
<h2>dos vidas en un instante descargar espa ol</h2><br /><p><b><b>Download File</b> »»» <a href="https://imgfil.com/2uxXyh">https://imgfil.com/2uxXyh</a></b></p><br /><br />
|
6 |
-
<p>En la primera versión, Helen deja a Gerry, conoce a un hombre encantador llamado James (John Hannah) y mejora su situación laboral y personal. En la segunda, Helen sigue con Gerry, sospecha de su fidelidad y se vuelve más infeliz. La pelÃcula juega con el humor, la fantasÃa y el romance para reflexionar sobre el papel del azar y la fuerza del destino en nuestras vidas.</p>
|
7 |
-
<p>Dos vidas en un instante fue un éxito de crÃtica y público, y recibió varias nominaciones a premios como los BAFTA o los Globos de Oro. La pelÃcula también se ha convertido en un referente cultural por su original planteamiento narrativo y su banda sonora, que incluye canciones de artistas como Jamiroquai, Aqua o Elton John.</p>
|
8 |
-
<p>Si quieres ver Dos vidas en un instante online, puedes encontrarla en plataformas como Netflix, Amazon Prime Video o Disney Plus[^1^]. También puedes descargarla en español en sitios web como JustWatch[^1^] o YouTube[^2^]. Disfruta de esta divertida y emotiva pelÃcula que te hará pensar en cómo una simple decisión puede cambiarlo todo.</p>
|
9 |
-
|
10 |
-
<p>Dos vidas en un instante es una pelÃcula que te hará reÃr, llorar y reflexionar sobre las diferentes posibilidades que se abren ante nosotros cada dÃa. La pelÃcula también te hará apreciar el valor de las segundas oportunidades, el amor verdadero y la felicidad. No te pierdas esta obra maestra del cine británico que te cautivará desde el primer minuto.</p>
|
11 |
-
<p>Si te ha gustado Dos vidas en un instante, quizás te interesen otras pelÃculas que exploran el tema de las realidades alternativas, como El efecto mariposa, El show de Truman o Matrix. Estas pelÃculas te harán cuestionar tu percepción de la realidad y te mostrarán cómo nuestras elecciones tienen consecuencias imprevisibles.</p>
|
12 |
-
<p></p>
|
13 |
-
<p>Esperamos que hayas disfrutado de este artÃculo sobre Dos vidas en un instante y que te animes a verla o a volver a verla. Recuerda que puedes dejarnos tus comentarios y opiniones sobre la pelÃcula en nuestra sección de abajo. También puedes compartir este artÃculo con tus amigos y familiares para que ellos también puedan disfrutar de esta maravillosa pelÃcula.</p> d5da3c52bf<br />
|
14 |
-
<br />
|
15 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download WhatsApp Business for Windows 10 (64-bit) and Boost Your Sales and Customer Satisfaction.md
DELETED
@@ -1,196 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download WhatsApp Business for Windows 10 64 Bits</h1>
|
3 |
-
<p>WhatsApp is one of the most popular messaging apps in the world, with more than 2 billion users. But did you know that there is also a version of WhatsApp designed specifically for businesses? It's called <strong>WhatsApp Business</strong>, and it can help you connect with your customers, showcase your products, and grow your business.</p>
|
4 |
-
<p>In this article, we will show you how to download WhatsApp Business for Windows 10 64 bits, how to set it up and use it on your PC, and how to make the most of its features. Whether you have a small or a large business, WhatsApp Business can help you reach your goals.</p>
|
5 |
-
<h2>download whatsapp business for windows 10 64 bits</h2><br /><p><b><b>Download File</b> 🆓 <a href="https://jinyurl.com/2uNMpJ">https://jinyurl.com/2uNMpJ</a></b></p><br /><br />
|
6 |
-
<h2>What is WhatsApp Business and Why You Need It</h2>
|
7 |
-
<p>WhatsApp Business is a free-to-download app that allows you to create a business profile, communicate with your customers, and manage your business operations on WhatsApp. You can use it as a standalone app or as a complement to your existing WhatsApp Messenger account.</p>
|
8 |
-
<p>WhatsApp Business is ideal for businesses of any size, as it offers different solutions depending on your needs. Here are some of the benefits and features of WhatsApp Business for small and large businesses.</p>
|
9 |
-
<h3>Benefits of WhatsApp Business for Small Businesses</h3>
|
10 |
-
<p>If you have a small business, you can use the WhatsApp Business app to:</p>
|
11 |
-
<p>How to install whatsapp business app on windows 10 64 bit<br />
|
12 |
-
Whatsapp business for windows 10 64 bit free download<br />
|
13 |
-
Whatsapp business desktop app for windows 10 64 bit<br />
|
14 |
-
Whatsapp business app download for pc windows 10 64 bit<br />
|
15 |
-
Download whatsapp business for laptop windows 10 64 bit<br />
|
16 |
-
Whatsapp business for windows 10 pro 64 bit download<br />
|
17 |
-
Whatsapp business apk download for windows 10 64 bit<br />
|
18 |
-
Whatsapp business app for windows 10 home 64 bit download<br />
|
19 |
-
Download whatsapp business for windows 10 enterprise 64 bit<br />
|
20 |
-
Whatsapp business web download for windows 10 64 bit<br />
|
21 |
-
Whatsapp business app for pc windows 10 64 bit offline installer<br />
|
22 |
-
Download whatsapp business for windows 10 education 64 bit<br />
|
23 |
-
Whatsapp business setup download for windows 10 64 bit<br />
|
24 |
-
Whatsapp business app for windows 10 professional 64 bit download<br />
|
25 |
-
Download whatsapp business for windows 10 home premium 64 bit<br />
|
26 |
-
Whatsapp business software download for windows 10 64 bit<br />
|
27 |
-
Whatsapp business app for windows 10 ultimate 64 bit download<br />
|
28 |
-
Download whatsapp business for windows 10 single language 64 bit<br />
|
29 |
-
Whatsapp business messenger download for windows 10 64 bit<br />
|
30 |
-
Whatsapp business app for windows 10 starter edition 64 bit download<br />
|
31 |
-
Download whatsapp business for windows server 2019 standard edition (x64)<br />
|
32 |
-
Whatsapp business application download for windows server core (x64)<br />
|
33 |
-
Download whatsapp business for windows server essentials (x64)<br />
|
34 |
-
Whatsapp business client download for windows server datacenter (x64)<br />
|
35 |
-
Download whatsapp business for hyper-v server (x64)<br />
|
36 |
-
Whatsapp business exe download for windows server foundation (x64)<br />
|
37 |
-
Download whatsapp business for azure stack hub (x64)<br />
|
38 |
-
Whatsapp business file download for azure stack edge (x64)<br />
|
39 |
-
Download whatsapp business for azure stack hci (x64)<br />
|
40 |
-
Whatsapp business installer download for azure sphere os (x64)<br />
|
41 |
-
Download whatsapp business for linux subsystem on windows (wsl) (x64)<br />
|
42 |
-
Whatsapp business latest version download for docker desktop on windows (x64)<br />
|
43 |
-
Download whatsapp business for kubernetes on windows (x64)<br />
|
44 |
-
Whatsapp business new version download for virtualbox on windows (x64)<br />
|
45 |
-
Download whatsapp business for vmware workstation on windows (x64)<br />
|
46 |
-
Whatsapp business old version download for parallels desktop on windows (x64)<br />
|
47 |
-
Download whatsapp business for microsoft edge on chromium (x64)<br />
|
48 |
-
Whatsapp business update download for google chrome on windows (x64)<br />
|
49 |
-
Download whatsapp business for mozilla firefox on windows (x64)<br />
|
50 |
-
Whatsapp business beta version download for opera browser on windows (x64)<br />
|
51 |
-
Download whatsapp business for safari browser on windows (x64)<br />
|
52 |
-
Whatsapp business cracked version download for tor browser on windows (x64)<br />
|
53 |
-
Download whatsapp business for brave browser on windows (x64)<br />
|
54 |
-
Whatsapp business modded version download for vivaldi browser on windows (x64)<br />
|
55 |
-
Download whatsapp business for microsoft office outlook on windows (x64)<br />
|
56 |
-
Whatsapp business premium version download for microsoft office teams on windows (x64)<br />
|
57 |
-
Download whatsapp business for microsoft office skype on windows (x64)<br />
|
58 |
-
Whatsapp business full version download for zoom cloud meetings on windows (x64)<br />
|
59 |
-
Download whatsapp business for slack desktop app on windows (x64)</p>
|
60 |
-
<ul>
|
61 |
-
<li>Create a professional and personalized business profile with your logo, description, contact details, website, and catalog.</li>
|
62 |
-
<li>Showcase your products or services in an easy-to-browse catalog that customers can access from your profile or chat.</li>
|
63 |
-
<li>Send and receive messages, calls, photos, videos, documents, and voice notes with your customers using end-to-end encryption.</li>
|
64 |
-
<li>Use labels, quick replies, and automated messages to organize your chats, save time, and provide better customer service.</li>
|
65 |
-
<li>Get insights into your messaging activity, such as the number of messages sent, delivered, read, and received.</li>
|
66 |
-
<li>Use the same phone number as your personal WhatsApp account or a different one.</li>
|
67 |
-
</ul>
|
68 |
-
<h3>Features of WhatsApp Business for Large Businesses</h3>
|
69 |
-
<p>If you have a large business or an enterprise, you can use the <strong>WhatsApp Business API</strong> to:</p>
|
70 |
-
<ul>
|
71 |
-
<li>Integrate WhatsApp with your existing systems, such as CRM, ERP, or e-commerce platforms.</li>
|
72 |
-
<li>Scale your communication with customers using chatbots, live agents, or a combination of both.</li>
|
73 |
-
<li>Send notifications, confirmations, reminders, updates, and other relevant information to your customers.</li>
|
74 |
-
<li>Provide customer support and feedback through rich media messages, such as images, videos, audio files, or PDFs.</li>
|
75 |
-
<li>Analyze your performance and customer satisfaction using metrics and reports.</li>
|
76 |
-
<li>Use a verified business account with a green checkmark badge next to your name.</li>
|
77 |
-
</ul>
|
78 |
-
<h2>How to Download and Install WhatsApp Business on Your PC</h2>
|
79 |
-
<p>If you want to use WhatsApp Business on your PC running Windows 10 64 bits, you have two options: you can download it from the official website or from the Microsoft Store. Here are the steps for both methods.</p>
|
80 |
-
<h3>Requirements for WhatsApp Business on PC</h3>
|
81 |
-
<p>Before you download and install WhatsApp Business on your PC, make sure you meet the following requirements:</p>
|
82 |
-
<ul>
|
83 |
-
<li>You have a Windows 10 PC with a 64-bit processor and at least 4 GB of RAM.</li>
|
84 |
-
<li>You have an active internet connection on your PC and your phone.</li>
|
85 |
-
<li>You have a phone number that you can use to verify your WhatsApp Business account.</li>
|
86 |
-
<li>You have a backup of your WhatsApp Messenger chats if you want to migrate them to WhatsApp Business.</li>
|
87 |
-
</ul>
|
88 |
-
<h3>Steps to Download WhatsApp Business from the Official Website</h3>
|
89 |
-
<p>To download WhatsApp Business from the official website, follow these steps:</p>
|
90 |
-
<ol>
|
91 |
-
<li>Go to <a href="">https://www.whatsapp.com/business/</a> on your PC browser.</li>
|
92 |
-
<li>Click on the <strong>Download</strong> button and choose <strong>Windows (64-bit)</strong>.</li>
|
93 |
-
<li>Save the <strong>WhatsAppBusinessSetup.exe</strong> file on your PC and run it once it's downloaded.</li>
|
94 |
-
<li>Follow the instructions on the screen to install WhatsApp Business on your PC.</li>
|
95 |
-
<li>Launch WhatsApp Business and scan the QR code with your phone to link your account.</li>
|
96 |
-
</ol>
|
97 |
-
<h3>Steps to Download WhatsApp Business from the Microsoft Store</h3>
|
98 |
-
<p>To download WhatsApp Business from the Microsoft Store, follow these steps:</p>
|
99 |
-
<ol>
|
100 |
-
<li>Go to <a href="">https://www.microsoft.com/en-us/p/whatsapp-business/9nksqgp7f2nh</a> on your PC browser or open the Microsoft Store app on your PC.</li>
|
101 |
-
<li>Click on the <strong>Get</strong> button and sign in with your Microsoft account if prompted.</li>
|
102 |
-
<li>Wait for the app to download and install on your PC.</li>
|
103 |
-
<li>Launch WhatsApp Business and scan the QR code with your phone to link your account.</li>
|
104 |
-
</ol>
|
105 |
-
<h2>How to Set Up and Use WhatsApp Business on Your PC</h2>
|
106 |
-
<p>Once you have downloaded and installed WhatsApp Business on your PC, you can start setting it up and using it. Here are some of the things you can do with WhatsApp Business on your PC.</p>
|
107 |
-
<h3>How to Migrate Your Existing WhatsApp Account to WhatsApp Business</h3>
|
108 |
-
<p>If you already have a WhatsApp Messenger account and you want to migrate it to WhatsApp Business, you can do so by following these steps:</p>
|
109 |
-
<ol>
|
110 |
-
<li>Create a backup of your WhatsApp Messenger chats on your phone by going to <strong>Settings > Chats > Chat backup > Back up</strong>.</li>
|
111 |
-
<li>Open WhatsApp Business on your phone and verify your phone number. Make sure it's the same number as your WhatsApp Messenger account.</li>
|
112 |
-
<li>Restore your chat backup when prompted. You will see a message saying that your chat history will be transferred from WhatsApp Messenger to WhatsApp Business.</li>
|
113 |
-
<li>Open WhatsApp Messenger on your phone and tap <strong>Agree & Continue</strong>. You will see a message saying that your phone number is no longer registered with WhatsApp Messenger and that you can use it with WhatsApp Business instead.</li>
|
114 |
-
<li>Open WhatsApp Business on your PC and scan the QR code with your phone. You will see your chats and contacts synced with your PC.</li>
|
115 |
-
</ol>
|
116 |
-
<p>Note that you can't use both WhatsApp Messenger and WhatsApp Business with the same phone number. If you want to use both apps, you need to have two different phone numbers.</p>
|
117 |
-
<h3>How to Create Your Business Profile and Catalog</h3>
|
118 |
-
<p>To create your business profile and catalog on WhatsApp Business, follow these steps:</p>
|
119 |
-
<ol>
|
120 |
-
<li>Open WhatsApp Business on your PC and click on the <strong>Menu</strong> icon (three dots) at the top right corner.</li>
|
121 |
-
<li>Select <strong>Settings > Business tools > Business profile</strong>.</li>
|
122 |
-
<li>Fill in the details of your business, such as name, category, description, location, hours, email, website, etc. You can also upload a logo or a cover photo for your profile.</li>
|
123 |
-
<li>Click on <strong>Save</strong>.</li>
|
124 |
-
<li>To create a catalog, go back to <strong>Business tools > Catalog</strong>.</li>
|
125 |
-
<li>Select <strong>Add product or service</strong>.</li>
|
126 |
-
<li>Add an image, name, price, description, link, and code for each product or service you want to showcase. You can add up to 500 items in your catalog.</li>
|
127 |
-
<li>Select <strong>Add to catalog</strong>.</li>
|
128 |
-
</ol>
|
129 |
-
<p>Your business profile and catalog will be visible to anyone who views or chats with you on WhatsApp. You can also share them with your customers by clicking on the <strong>Catalog</strong > icon at the bottom of the chat window.</p>
|
130 |
-
<h3>How to Send and Receive Messages and Calls with WhatsApp Business</h3>
|
131 |
-
<p>To send and receive messages and calls with WhatsApp Business, follow these steps:</p>
|
132 |
-
<ol>
|
133 |
-
<li>Open WhatsApp Business on your PC and select a contact or a chat from the left panel.</li>
|
134 |
-
<li>Type your message in the text box at the bottom of the chat window. You can also attach files, photos, videos, documents, contacts, or locations by clicking on the <strong>Attach</strong> icon (paperclip).</li>
|
135 |
-
<li>To send a voice note, click on the <strong>Microphone</strong> icon and hold it while you record your message. Release it when you are done.</li>
|
136 |
-
<li>To make a voice or video call, click on the <strong>Voice call</strong> or <strong>Video call</strong> icon at the top of the chat window. You can also make group calls by adding more participants from the call screen.</li>
|
137 |
-
<li>To receive a message or a call, you will see a notification on your PC. You can reply to the message or answer the call from the notification or from the app.</li>
|
138 |
-
</ol>
|
139 |
-
<p>All your messages and calls are end-to-end encrypted, which means that only you and the person you are communicating with can read or listen to them.</p>
|
140 |
-
<h3>How to Use WhatsApp Web or Desktop with WhatsApp Business</h3>
|
141 |
-
<p>If you don't want to download WhatsApp Business on your PC, you can also use WhatsApp Web or Desktop with your WhatsApp Business account. WhatsApp Web is a browser-based version of WhatsApp, while WhatsApp Desktop is an app that you can download from the Microsoft Store. Both versions allow you to access your WhatsApp Business account from your PC.</p>
|
142 |
-
<p>To use WhatsApp Web or Desktop with WhatsApp Business, follow these steps:</p>
|
143 |
-
<ol>
|
144 |
-
<li>Open WhatsApp Business on your phone and tap on the <strong>Menu</strong> icon (three dots) at the top right corner.</li>
|
145 |
-
<li>Select <strong>WhatsApp Web/Desktop</strong>.</li>
|
146 |
-
<li>Go to <a href="">https://web.whatsapp.com/</a> on your PC browser or open WhatsApp Desktop on your PC.</li>
|
147 |
-
<li>Scan the QR code on your PC screen with your phone.</li>
|
148 |
-
<li>You will see your WhatsApp Business account synced with your PC. You can use it as you would use the app.</li>
|
149 |
-
</ol>
|
150 |
-
<p>Note that you need to have your phone connected to the internet and close to your PC for WhatsApp Web or Desktop to work.</p>
|
151 |
-
<h2>Tips and Tricks for Using WhatsApp Business Effectively</h2>
|
152 |
-
<p>Now that you know how to download, install, set up, and use WhatsApp Business on your PC, here are some tips and tricks to help you use it effectively for your business.</p>
|
153 |
-
<h3>How to Use Labels, Quick Replies, and Automated Messages</h3>
|
154 |
-
<p>Labels, quick replies, and automated messages are some of the features that can help you organize your chats, save time, and provide better customer service with WhatsApp Business. Here's how to use them:</p>
|
155 |
-
<ul>
|
156 |
-
<li><strong>Labels:</strong> Labels are color-coded tags that you can assign to your chats or contacts to categorize them. For example, you can use labels such as new customer, pending payment, order complete, etc. To create and assign labels, go to <strong>Menu > Settings > Business tools > Labels</strong>.</li>
|
157 |
-
<li><strong>Quick replies:</strong> Quick replies are predefined messages that you can send with a shortcut. For example, you can create a quick reply for greeting your customers, thanking them for their purchase, answering frequently asked questions, etc. To create and use quick replies, go to <strong>Menu > Settings > Business tools > Quick replies</strong>.</li>
|
158 |
-
<li><strong>Automated messages:</strong> Automated messages are messages that are sent automatically when certain conditions are met. For example, you can create an automated message for greeting new customers, sending away messages when you are offline, sending greeting messages when you are online, etc. To create and enable automated messages, go to <strong>Menu > Settings > Business tools > Away message / Greeting message</strong>.</li>
|
159 |
-
</ul>
|
160 |
-
<h3>How to Use Chatbots and APIs with WhatsApp Business</h3>
|
161 |
-
<p>If you have a large business or an enterprise, you may want to use chatbots and APIs with WhatsApp Business to scale your communication with customers and integrate it with your existing systems. Here's how to do it:</p>
|
162 |
-
<ul>
|
163 |
-
<li><strong>Chatbots:</strong> Chatbots are software applications that can simulate human conversations and provide automated responses to customer queries. You can use chatbots with WhatsApp Business to handle common requests, provide information, collect feedback, etc. To create and use chatbots with WhatsApp Business, you need to use a third-party service or platform that supports WhatsApp Business API, such as Twilio, Dialogflow, Chatfuel, etc. You can find a list of official partners here: <a href="">https://www.whatsapp.com/business/api/partners</a>.</li>
|
164 |
-
<li><strong>APIs:</strong> APIs are application programming interfaces that allow you to connect WhatsApp Business with your existing systems, such as CRM, ERP, or e-commerce platforms. You can use APIs with WhatsApp Business to send and receive messages, notifications, media files, etc. To use APIs with WhatsApp Business, you need to register for the WhatsApp Business API and follow the documentation here: <a href="">https://developers.facebook.com/docs/whatsapp</a>.</li>
|
165 |
-
</ul>
|
166 |
-
<h3>How to Analyze Your Performance and Feedback with WhatsApp Business</h3>
|
167 |
-
<p>To measure your success and improve your customer satisfaction with WhatsApp Business, you can use the following features:</p>
|
168 |
-
<ul>
|
169 |
-
<li><strong>Statistics:</strong> Statistics are metrics that show you how your messages are performing, such as the number of messages sent, delivered, read, and received. You can access your statistics by going to <strong>Menu > Settings > Business tools > Statistics</strong>.</li>
|
170 |
-
<li><strong>Reports:</strong> Reports are detailed analyses that show you how your customers are interacting with your business, such as the number of chats, calls, messages, media files, etc. You can access your reports by using a third-party service or platform that supports WhatsApp Business API, such as Twilio, Dialogflow, Chatfuel, etc.</li>
|
171 |
-
<li><strong>Feedback:</strong> Feedback is the opinion or rating that your customers give you about your products or services. You can collect feedback from your customers by using surveys, polls, ratings, reviews, etc. You can create and send feedback forms by using a third-party service or platform that supports WhatsApp Business API, such as Twilio, Dialogflow, Chatfuel, etc.</li>
|
172 |
-
</ul>
|
173 |
-
<h2>Conclusion</h2>
|
174 |
-
<p>WhatsApp Business is a powerful tool that can help you connect with your customers, showcase your products, and grow your business. In this article, we showed you how to download WhatsApp Business for Windows 10 64 bits, how to set it up and use it on your PC, and how to make the most of its features. Whether you have a small or a large business, WhatsApp Business can help you reach your goals.</p>
|
175 |
-
<p>We hope you found this article helpful and informative. If you have any questions or comments, feel free to leave them below. Thank you for reading!</p>
|
176 |
-
<h2>FAQs</h2>
|
177 |
-
<p>Here are some of the frequently asked questions about WhatsApp Business:</p>
|
178 |
-
<ol>
|
179 |
-
<li><strong>Is WhatsApp Business free?</strong><br>
|
180 |
-
Yes, WhatsApp Business is free to download and use for both small and large businesses. However, if you use the WhatsApp Business API or a third-party service or platform that supports it, you may incur some costs depending on the provider.</li>
|
181 |
-
<li><strong>Can I use WhatsApp Business and WhatsApp Messenger on the same phone?</strong><br>
|
182 |
-
Yes, you can use both apps on the same phone as long as you use different phone numbers for each account. You can also link both accounts and switch between them easily.</li>
|
183 |
-
<li><strong>Can I use WhatsApp Business on multiple devices?</strong><br>
|
184 |
-
Yes, you can use WhatsApp Business on multiple devices by using WhatsApp Web or Desktop on your PC or tablet. However, you can only have one active session at a time. If you log in on another device, the previous session will be closed.</li>
|
185 |
-
<li><strong>How can I get verified on WhatsApp Business?</strong><br>
|
186 |
-
To get verified on WhatsApp Business and get a green checkmark badge next to your name, you need to use the WhatsApp Business API and follow the verification process here: <a href="">https://developers.facebook.com/docs/whatsapp/business-verification</a>. Verification is not guaranteed and depends on several factors.</li>
|
187 |
-
<li><strong>How can I delete my WhatsApp Business account?</strong><br>
|
188 |
-
To delete your WhatsApp Business account and erase all your data from WhatsApp servers, follow these steps:</p>
|
189 |
-
<ol>
|
190 |
-
<li>Open WhatsApp Business on your phone and tap on the <strong>Menu</strong> icon (three dots) at the top right corner.</li>
|
191 |
-
<li>Select <strong>Settings > Account > Delete my account</strong>.</li>
|
192 |
-
<li>Enter your phone number and tap <strong>Delete my account</strong>.</li>
|
193 |
-
<li>Confirm your decision by tapping <strong>Delete my account</strong> again.</li>
|
194 |
-
</ol></li></ol></p> 401be4b1e0<br />
|
195 |
-
<br />
|
196 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download and Listen to Woodys Pull String Phrases The Best Toy Story Sound Effects.md
DELETED
@@ -1,153 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Woody Pull String Phrases Download: How to Get the Voice of Your Favorite Toy Story Character</h1>
|
3 |
-
<h2>Introduction</h2>
|
4 |
-
<p>If you are a fan of Toy Story, you probably know who Woody is. He is the main protagonist of the animated movie series, and one of the most beloved characters in Disney history. He is a cowboy doll with a pull-string that makes him say various catchphrases when pulled.</p>
|
5 |
-
<p>But did you know that you can download woody pull string phrases for your own toy or project? Whether you want to recreate your childhood memories, make a gift for someone special, or create something fun and creative, you can find and use woody pull string phrases for your own purposes.</p>
|
6 |
-
<h2>woody pull string phrases download</h2><br /><p><b><b>Download File</b> ❤ <a href="https://jinyurl.com/2uNP0H">https://jinyurl.com/2uNP0H</a></b></p><br /><br />
|
7 |
-
<p>In this article, we will tell you everything you need to know about woody pull string phrases download. We will explain what Woody is and why he is popular, what are pull string phrases and how they work, and how to download woody pull string phrases for your own toy or project. Let's get started!</p>
|
8 |
-
<h2>What is Woody and Why is He Popular</h2>
|
9 |
-
<p>Woody is a fictional character created by Pixar Animation Studios and Walt Disney Pictures. He first appeared in the 1995 film Toy Story, and has since starred in three sequels: Toy Story 2 (1999), Toy Story 3 (2010), and Toy Story 4 (2019). He is also featured in various spin-offs, shorts, video games, and merchandise.</p>
|
10 |
-
<p>woody doll voice box phrases mp3<br />
|
11 |
-
toy story woody pull string instructions pdf<br />
|
12 |
-
how to make a custom woody voice box<br />
|
13 |
-
toy story sound effects collection download<br />
|
14 |
-
woody's pull string sound wiki<br />
|
15 |
-
toy story sheriff woody voice box youtube<br />
|
16 |
-
woody action figure with guitar and hat<br />
|
17 |
-
tom hanks woody sound effects creator<br />
|
18 |
-
toy story pull string woody hasbro manual<br />
|
19 |
-
woody's voice box phrases list<br />
|
20 |
-
toy story woody sound clips download free<br />
|
21 |
-
how to edit woody's voice in adobe audition<br />
|
22 |
-
toy story woody pull string repair guide<br />
|
23 |
-
woody's voice box sound effects fandom<br />
|
24 |
-
toy story 1 4 woody voice box phrases<br />
|
25 |
-
toy story woody pull string replacement parts<br />
|
26 |
-
how to record your own woody voice box<br />
|
27 |
-
toy story woody soundboard online<br />
|
28 |
-
woody's pull string sound effects history<br />
|
29 |
-
toy story sheriff woody pull string rules<br />
|
30 |
-
toy story woody voice box phrases generator<br />
|
31 |
-
how to clean and restore a woody doll<br />
|
32 |
-
toy story woody pull string batteries size<br />
|
33 |
-
woody's voice box sound effects collection<br />
|
34 |
-
toy story 4 woody voice box scene youtube<br />
|
35 |
-
toy story pull string woody instructions video<br />
|
36 |
-
how to make a woody doll costume with voice box<br />
|
37 |
-
toy story sound effects download zip file<br />
|
38 |
-
woody's pull string sound wiki trivia<br />
|
39 |
-
toy story sheriff woody voice box tutorial<br />
|
40 |
-
toy story pull string woody hasbro reviews<br />
|
41 |
-
woody's voice box phrases quiz online<br />
|
42 |
-
toy story woody sound clips download mp3<br />
|
43 |
-
how to fix a broken woody voice box<br />
|
44 |
-
toy story woody pull string dimensions and weight<br />
|
45 |
-
woody's voice box sound effects origin and owner<br />
|
46 |
-
toy story 4 woody voice box quotes and meaning<br />
|
47 |
-
toy story pull string woody instructions printable version<br />
|
48 |
-
how to change the language of a woody voice box<br />
|
49 |
-
toy story sound effects collection list and description<br />
|
50 |
-
woody's pull string sound wiki categories and pages<br />
|
51 |
-
toy story sheriff woody voice box phrases transcript<br />
|
52 |
-
toy story pull string woody hasbro product info and faqs<br />
|
53 |
-
woody's voice box phrases fun facts and trivia<br />
|
54 |
-
toy story sound clips download for free online</p>
|
55 |
-
<h3>Woody's Origin and Appearance</h3>
|
56 |
-
<p>Woody was originally conceived as a ventriloquist dummy, but was later changed to a pull-string cowboy doll to make him more appealing and relatable. He is based on the generic western heroes of the 1950s and 1960s, such as Roy Rogers and The Lone Ranger. He wears a yellow plaid shirt, a red bandana, blue jeans, brown boots, a brown cowboy hat, and a sheriff badge. He has brown hair, blue eyes, a big nose, and a friendly smile.</p>
|
57 |
-
<h3>Woody's Personality and Role in Toy Story</h3>
|
58 |
-
<p>Woody is the leader of the toys that belong to a boy named Andy. He is loyal, brave, smart, kind, and optimistic. He cares deeply for his friends and his owner, and will do anything to protect them. He is also very proud of being Andy's favorite toy, and sometimes gets jealous or insecure when he feels threatened by other toys. He often clashes with Buzz Lightyear, a space ranger action figure who becomes his rival and later his best friend.</p>
|
59 |
-
<p>In the Toy Story series, Woody goes through many adventures and challenges with his fellow toys. He faces villains such as Sid, Lotso, Gabby Gabby, and Stinky Pete. He also meets new friends such as Bo Peep, Jessie, Bullseye, Forky, and Duke Caboom. He learns valuable lessons about friendship, loyalty, courage, sacrifice, and identity.</p>
|
60 |
-
<h3>Woody's Catchphrases and Voice Actor</h3>
|
61 |
-
<p>Woody is known for his iconic catchphrases that he says when his pull string is pulled. Some of them are:</p>
|
62 |
-
<ul>
|
63 |
-
<li>"There's a snake in my boot!"</li>
|
64 |
-
<li>"You're my favorite deputy!"</li>
|
65 |
-
<li>"Reach for the sky!"</li>
|
66 |
-
<li>"Howdy, partner!"</li>
|
67 |
-
<li>"Somebody's poisoned the waterhole!"</li>
|
68 |
-
<li>"This town ain't big enough for the two of us!"</li>
|
69 |
-
<li>"Yee-haw! Giddyup, partner! We gotta get this wagon train a-movin'!"</li>
|
70 |
-
</ul>
|
71 |
-
<p>Woody's voice actor is Tom Hanks, a famous Hollywood star who has won two Academy Awards for Best Actor. Tom Hanks has voiced Woody in all four Toy Story films, as well as in other media. He has said that Woody is one of his favorite characters to play, and that he feels a special connection with him.</p>
|
72 |
-
<h2>What are Pull String Phrases and How Do They Work</h2>
|
73 |
-
<p>Pull string phrases are pre-recorded messages that are played when a toy or doll's pull string is pulled. They are also known as pull string talkers or pull string voice boxes. They are a type of sound-producing device that can be found in many toys or dolls.</p>
|
74 |
-
<h3>The History of Pull String Toys and Dolls</h3>
|
75 |
-
<p>The first pull string toy was invented by Thomas Edison in 1890. It was called the Phonograph Doll, and it used a miniature phonograph inside its body to play nursery rhymes when its string was pulled. However, it was not very successful due to its high cost and poor sound quality.</p>
|
76 |
-
<p>The first successful pull string toy was Chatty Cathy, a doll that was introduced by Mattel in 1960. It used a vinyl record inside its chest to play 11 phrases when its ring was pulled. It was very popular among children, especially girls, and sold millions of units. It also inspired many other pull string toys and dolls in the following years.</p>
|
77 |
-
<h3>The Mechanism and Technology of Pull String Voice Boxes</h3>
|
78 |
-
<p>A pull string voice box consists of four main parts: a record or chip that stores the sound data, a needle or sensor that reads the sound data, a speaker that amplifies the sound data, and a spring that rewinds the record or chip when the string is released.</p>
|
79 |
-
<p>When the string is pulled, it rotates the record or chip inside the voice box. The needle or sensor then picks up the sound data from the record or chip and converts it into an electrical signal. The electrical signal is then sent to the speaker that amplifies the sound data and produces the sound. The spring then pulls back the record or chip to its original position when the string is released.</p>
|
80 |
-
<p>The technology of pull string voice boxes has evolved over time. The earliest models used vinyl records that had limited storage capacity and sound quality. Later models used magnetic tapes that had more storage capacity and sound quality, but were prone to wear and tear. Modern models use digital chips that have high storage capacity and sound quality, and are more durable and reliable.</p>
|
81 |
-
<h3>The Advantages and Disadvantages of Pull String Phrases</h3>
|
82 |
-
<p>Pull string phrases have some advantages and disadvantages as a sound-producing device. Some of the advantages are:</p>
|
83 |
-
<ul>
|
84 |
-
<li>They are simple and easy to use. Anyone can pull the string and hear the sound.</li>
|
85 |
-
<li>They are fun and interactive. They can make the toy or doll more lively and engaging.</li>
|
86 |
-
<li>They are nostalgic and classic. They can remind people of their childhood memories and favorite characters.</li>
|
87 |
-
</ul>
|
88 |
-
<p>Some of the disadvantages are:</p>
|
89 |
-
<ul>
|
90 |
-
<li>They are limited and repetitive. They can only play a fixed number of phrases that may get boring or annoying over time.</li>
|
91 |
-
<li>They are fragile and sensitive. They can break or malfunction if handled roughly or exposed to moisture or dust.</li>
|
92 |
-
<li>They are noisy and intrusive. They can disturb other people or interfere with other sounds in the environment.</li>
|
93 |
-
</ul>
|
94 |
-
<h2>How to Download Woody Pull String Phrases for Your Own Toy or Project</h2>
|
95 |
-
<p>If you want to download woody pull string phrases for your own toy or project, you will need to follow some steps and use some tools. Here is a guide on how to do it:</p>
|
96 |
-
<h3>The Sources and Formats of Woody Pull String Phrases Online</h3>
|
97 |
-
<p>The first step is to find and download woody pull string phrases from the internet. There are many websites and platforms that offer woody pull string phrases for free or for a fee. Some of them are:</p>
|
98 |
-
<table>
|
99 |
-
<tr>
|
100 |
-
<th>Website/Platform</th>
|
101 |
-
<th>Description</th>
|
102 |
-
</tr>
|
103 |
-
<tr>
|
104 |
-
<td><a href="">Soundboard.com</a></td>
|
105 |
-
<td>A website that hosts thousands of sound clips from various movies, TV shows, games, etc. You can find woody pull string phrases under the Toy Story category.</td>
|
106 |
-
</tr>
|
107 |
-
<tr>
|
108 |
-
<td><a href="">YouTube</a></td>
|
109 |
-
<td>A video-sharing platform that hosts millions of videos from various genres and topics. You can find woody pull string phrases by searching for keywords such as "woody pull string", "woody voice", "woody quotes", etc.</td>
|
110 |
-
</tr>
|
111 |
-
<tr>
|
112 |
-
<td><a href="">Amazon</a></td>
|
113 |
-
<td>An online marketplace that sells various products and services. You can find woody pull string phrases by buying a digital album or a CD that contains them.</td>
|
114 |
-
</tr>
|
115 |
-
</table>
|
116 |
-
<p>The formats of woody pull string phrases online may vary depending on the source. Some of the common formats are MP3, WAV, OGG, etc. You will need to choose a format that is compatible with your device or software.</p>
|
117 |
-
<h3>The Steps and Tools to Download and Edit Woody Pull String Phrases</h3>
|
118 |
-
<p>The second step is to download and edit woody pull string phrases according to your needs and preferences. You will need some tools to do this, such as a computer, a smartphone, a microphone, a software, etc. Here are some steps to follow:</p>
|
119 |
-
<ol>
|
120 |
-
<li>Download the woody pull string phrases from the source of your choice. Save them in a folder on your device.</li>
|
121 |
-
<li>Open the software of your choice that can play and edit audio files. Some examples are Audacity, VLC Media Player, Windows Media Player, etc.</li>
|
122 |
-
<li>Import the woody pull string phrases into the software. You can drag and drop them or use the file menu.</li>
|
123 |
-
<li>Edit the woody pull string phrases as you wish. You can cut, copy, paste, trim, crop, fade, amplify, normalize, equalize, etc.</li>
|
124 |
-
<li>Save the edited woody pull string phrases in a format of your choice. You can use the same format as the original or convert it to another format.</li>
|
125 |
-
<li>Export the edited woody pull string phrases to your device or upload them to your online storage or platform.</li>
|
126 |
-
</ol>
|
127 |
-
<h3>The Tips and Tricks to Make Your Own Woody Pull String Toy or Project</h3>
|
128 |
-
<p>The third step is to make your own woody pull string toy or project using the downloaded and edited woody pull string phrases. You will need some materials and tools to do this, such as a wooden figure, a voice box, a speaker, a battery, a pull string, some paint, some glue, etc. Here are some tips and tricks to follow:</p>
|
129 |
-
<ol>
|
130 |
-
<li>Choose a wooden figure that resembles Woody or make your own from scratch. You can use a wooden block, a wooden spoon, a wooden peg, etc. You can also carve or sculpt the figure using a knife or a chisel.</li>
|
131 |
-
<li>Paint and decorate the wooden figure to look like Woody. You can use acrylic paint, markers, stickers, fabric, etc. You can also add some details such as hair, eyes, mouth, hat, badge, etc.</li>
|
132 |
-
<li>Make a hole in the back of the wooden figure and insert the voice box inside. You can use a drill, a saw, a knife, etc. You can also glue or tape the voice box to the back of the figure.</li>
|
133 |
-
<li>Attach the speaker to the voice box and make sure it is loud and clear enough. You can use wires, soldering, clips, etc. You can also hide the speaker behind the figure or inside the hat.</li>
|
134 |
-
<li>Connect the battery to the voice box and make sure it has enough power and life span. You can use wires, soldering, clips, etc. You can also hide the battery behind the figure or inside the hat.</li>
|
135 |
-
<li>Attach the pull string to the voice box and make sure it is long and strong enough. You can use a cord, a rope, a ribbon, etc. You can also decorate the pull string with beads, charms, etc.</li>
|
136 |
-
<li>Test your woody pull string toy or project and enjoy it. You can pull the string and hear Woody say his phrases. You can also play with it or display it as you wish.</li>
|
137 |
-
</ol>
|
138 |
-
<h2>Conclusion</h2>
|
139 |
-
<p>In conclusion, woody pull string phrases download is a fun and easy way to get the voice of your favorite Toy Story character. You can find and download woody pull string phrases from various sources online, edit them according to your needs and preferences, and use them for your own toy or project. You can also make your own woody pull string toy or project using some materials and tools, and following some tips and tricks. We hope this article has helped you learn more about woody pull string phrases download and inspired you to try it yourself.</p>
|
140 |
-
<h2>FAQs</h2>
|
141 |
-
<p>Here are some frequently asked questions about woody pull string phrases download:</p>
|
142 |
-
<h3>Q1: Where can I buy a woody pull string toy or doll?</h3>
|
143 |
-
<p>A1: You can buy a woody pull string toy or doll from various online or physical stores, such as Amazon, eBay, Walmart, Target, etc. You can also check out some vintage or collectible shops for older models of woody pull string toys or dolls.</p>
|
144 |
-
<h3>Q2: How many phrases does woody say when you pull his string?</h3>
|
145 |
-
<p>A2: Depending on the model and version of the woody pull string toy or doll, he can say anywhere from 11 to 18 phrases when you pull his string. Some of the most common phrases are "There's a snake in my boot!", "You're my favorite deputy!", "Reach for the sky!", "Howdy, partner!", "Somebody's poisoned the waterhole!", etc.</p>
|
146 |
-
<h3>Q3: How can I change the voice or language of woody pull string phrases?</h3>
|
147 |
-
<p>A3: If you want to change the voice or language of woody pull string phrases, you will need to download a different set of phrases from the internet, or record your own voice using a microphone and a software. Then, you will need to replace the original record inside the voice box of the toy or doll with the new one, using a screwdriver and some wires. Be careful not to damage the toy or doll while doing this.</p>
|
148 |
-
<h3>Q4: How can I make my own woody pull string toy or project?</h3>
|
149 |
-
<p>A4: If you want to make your own woody pull string toy or project, you will need some materials and tools such as a wooden figure, a voice box, a speaker, a battery, a pull string, some paint, some glue, etc. You can follow some online tutorials or guides on how to assemble and decorate your own woody pull string toy or project. You can also use your creativity and imagination to make it unique and personalized.</p>
|
150 |
-
<h3>Q5: What are some other characters or toys that have pull string phrases?</h3>
|
151 |
-
<p>A5: Some other characters or toys that have pull string phrases are Chatty Cathy, Buzz Lightyear, Jessie, Mr. Potato Head, Barbie, Mickey Mouse, Elmo, etc. You can also download their phrases from the internet or make your own.</p> 401be4b1e0<br />
|
152 |
-
<br />
|
153 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download the Thrilling Nollywood Tv Series - Jagaban ft. Selina Tested (Episode 6).md
DELETED
@@ -1,187 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download Jagaban Episode 6 Online</h1>
|
3 |
-
<p>If you are a fan of Nigerian action drama series, you might have heard of Jagaban, a popular YouTube series that features Selina Tested, another hit series. Jagaban is a thrilling story of revenge, betrayal, and survival in the streets of Lagos. In this article, we will show you how to download Jagaban episode 6 online, so you can watch it anytime and anywhere you want.</p>
|
4 |
-
<h2>What is Jagaban Series?</h2>
|
5 |
-
<h3>A brief introduction to the plot and characters of Jagaban series</h3>
|
6 |
-
<p>Jagaban is a YouTube series created by Holy Ghost Concept, a Nigerian production company that specializes in action and comedy videos. The series follows the life of Jagaban, a young man who was betrayed by his friends and left for dead. He survives and vows to take revenge on those who wronged him. Along the way, he meets Selina, a smart and fearless girl who is also on a mission to avenge her brother's death. Together, they face many enemies and challenges in their quest for justice.</p>
|
7 |
-
<h2>download jagaban episode 6</h2><br /><p><b><b>DOWNLOAD</b> ✫✫✫ <a href="https://jinyurl.com/2uNSGD">https://jinyurl.com/2uNSGD</a></b></p><br /><br />
|
8 |
-
<h3>Why you should watch Jagaban series</h3>
|
9 |
-
<p>Jagaban series is not just an ordinary action drama series. It is a captivating and realistic portrayal of the struggles and joys of living in Lagos, Nigeria's largest city. The series showcases the culture, language, music, and humor of the Nigerian people. It also features talented actors and actresses who deliver impressive performances and stunts. The series has a loyal fan base that eagerly awaits each new episode. If you are looking for an exciting and entertaining series that will keep you on the edge of your seat, you should definitely watch Jagaban series.</p>
|
10 |
-
<h2>Where to Watch Jagaban Episode 6 Online</h2>
|
11 |
-
<h3>The official YouTube channel of Holy Ghost Concept</h3>
|
12 |
-
<h4>How to access and subscribe to the channel</h4>
|
13 |
-
<p>The best and easiest way to watch Jagaban episode 6 online is to visit the official YouTube channel of Holy Ghost Concept. This is where they upload all their videos, including Jagaban series, Selina Tested series, Lightweight series, and more. You can access their channel by clicking [here](^1^) or by searching "Holy Ghost Concept" on YouTube. You can also subscribe to their channel by clicking the red "Subscribe" button below their banner. This way, you will get notified whenever they upload new videos.</p>
|
14 |
-
<h4>How to watch and download the video from YouTube</h4>
|
15 |
-
<p>Once you are on their channel, you can scroll down until you find Jagaban episode 6. You can also use the search bar on their channel page and type "Jagaban episode 6". Then, click on the video thumbnail to start watching it. You can adjust the quality and speed of the video according to your preference. You can also like, comment, share, or save the video for later viewing.</p>
|
16 |
-
<p>If you want to download the video from YouTube, you will need a third-party tool or app that can extract videos from YouTube. There are many options available online, but some may not work well or may contain viruses or malware. Therefore, you should be careful when choosing a video downloader tool or app. We <p>We recommend using one of the following video downloader tools or apps that are compatible with Windows 10 and YouTube:</p>
|
17 |
-
<ul>
|
18 |
-
<li><strong>4K Video Downloader</strong>: This is a low-priced tool that allows you to download videos from YouTube and other sites in various resolutions, formats, and qualities. You can also download playlists, channels, subtitles, and 3D and 360-degree videos. It has a simple and user-friendly interface and supports batch downloading. You can try it for free with some limitations or buy the premium version for $15.</li>
|
19 |
-
<li><strong>By Click Downloader</strong>: This is a well-priced software that enables you to download videos from YouTube and more than 40 other sites with one click. You can choose the quality, format, and name of the video, and also download subtitles, playlists, and live videos. It has a modern and intuitive interface and supports batch downloading. You can use it for free with some restrictions or upgrade to the premium version for $5.</li>
|
20 |
-
<li><strong>HitPaw Video Downloader and Converter</strong>: This is a freemium tool that works on Windows and MacOS and allows you to download videos from YouTube and other popular sites. You can also convert the videos to different formats and devices, and edit them with basic features. It has a sleek and easy-to-use interface and supports multiple downloads. You can use it for free with some limitations or buy the full version for $9.99.</li>
|
21 |
-
</ul>
|
22 |
-
<p>Once you have chosen your preferred video downloader tool or app, you can follow these general steps to download Jagaban episode 6 from YouTube:</p>
|
23 |
-
<ol>
|
24 |
-
<li>Install and launch the video downloader tool or app on your computer.</li>
|
25 |
-
<li>Go to the YouTube video of Jagaban episode 6 that you want to download. You can find it by clicking [here] or by searching "Jagaban episode 6" on YouTube.</li>
|
26 |
-
<li>Copy the URL of the video from the address bar of your browser.</li>
|
27 |
-
<li>Paste the URL into the video downloader tool or app and click the "Download" or "Analyze" button.</li>
|
28 |
-
<li>Select the resolution, format, quality, and location of the video that you want to download.</li>
|
29 |
-
<li>Click the "Download" or "Start" button to begin the downloading process.</li>
|
30 |
-
<li>Wait for the video to be downloaded to your computer.</li>
|
31 |
-
</ol> <h3>Other video download websites that support Jagaban episode 6</h3>
|
32 |
-
<h4>A list of some popular and reliable video download websites</h4>
|
33 |
-
<p>If you don't want to use a video downloader tool or app, you can also use some online video download websites that support Jagaban episode 6. These websites allow you to download videos from YouTube and other sites without installing any software. However, they may have some drawbacks, such as limited options, slow speed, ads, pop-ups, or malware. Therefore, you should be careful when using these websites and only choose the ones that are trustworthy and secure.</p>
|
34 |
-
<p>Here are some of the popular and reliable video download websites that you can use to download Jagaban episode 6:</p>
|
35 |
-
<ul>
|
36 |
-
<li><strong>Y2mate</strong>: This is a free and fast website that lets you download videos from YouTube and other sites in various formats and qualities. You can also convert videos to audio files, edit videos online, and cut videos to your desired length. It has a simple and clean interface and supports multiple languages. You can access it by clicking [here] or by searching "Y2mate" on Google.</li>
|
37 |
-
<li><strong>SaveFrom.net</strong>: This is another free and easy website that enables you to download videos from YouTube and more than 40 other sites with one click. You can choose the quality and format of the video, and also download subtitles, playlists, and channels. It has a modern and user-friendly interface and supports multiple languages. You can visit it by clicking [here] or by searching "SaveFrom.net" on Google.</li>
|
38 |
-
<li><strong>BitDownloader</strong>: This is a free and powerful website that allows you to download videos from YouTube and hundreds of other sites in high quality and speed. You can also download videos in bulk, extract audio from videos, and download private videos. It has a sleek and easy-to-use interface and supports multiple languages. You can reach it by clicking [here] or by searching "BitDownloader" on Google.</li>
|
39 |
-
</ul>
|
40 |
-
<h4>How to use these websites to download the video</h4>
|
41 |
-
<p>Once you have chosen your preferred video download website, you can follow these general steps to download Jagaban episode 6 from YouTube:</p>
|
42 |
-
<p>* download jagaban episode 6 youtube<br />
|
43 |
-
* download jagaban episode 6 facebook<br />
|
44 |
-
* download jagaban episode 6 mp4<br />
|
45 |
-
* download jagaban episode 6 hd<br />
|
46 |
-
* download jagaban episode 6 free<br />
|
47 |
-
* download jagaban episode 6 online<br />
|
48 |
-
* download jagaban episode 6 full movie<br />
|
49 |
-
* download jagaban episode 6 english subtitles<br />
|
50 |
-
* download jagaban episode 6 selina tested<br />
|
51 |
-
* download jagaban episode 6 lightweight<br />
|
52 |
-
* download jagaban episode 6 revenge<br />
|
53 |
-
* download jagaban episode 6 anmctv<br />
|
54 |
-
* download jagaban episode 6 holy ghost concept<br />
|
55 |
-
* download jagaban episode 6 latest<br />
|
56 |
-
* download jagaban episode 6 nigerian movie<br />
|
57 |
-
* download jagaban episode 6 action movie<br />
|
58 |
-
* download jagaban episode 6 thriller movie<br />
|
59 |
-
* download jagaban episode 6 drama movie<br />
|
60 |
-
* download jagaban episode 6 comedy movie<br />
|
61 |
-
* download jagaban episode 6 crime movie<br />
|
62 |
-
* download jagaban episode 6 part 1<br />
|
63 |
-
* download jagaban episode 6 part 2<br />
|
64 |
-
* download jagaban episode 6 part 3<br />
|
65 |
-
* download jagaban episode 6 part 4<br />
|
66 |
-
* download jagaban episode 6 part 5<br />
|
67 |
-
* download jagaban episode 7 preview<br />
|
68 |
-
* download jagaban season 1 episode 6<br />
|
69 |
-
* download jagaban season 2 episode 6<br />
|
70 |
-
* download jagaban season 3 episode 6<br />
|
71 |
-
* download jagaban season finale episode 6<br />
|
72 |
-
* how to download jagaban episode 6<br />
|
73 |
-
* where to download jagaban episode 6<br />
|
74 |
-
* best site to download jagaban episode 6<br />
|
75 |
-
* fastest way to download jagaban episode 6<br />
|
76 |
-
* easiest way to download jagaban episode 6<br />
|
77 |
-
* watch and download jagaban episode 6<br />
|
78 |
-
* stream and download jagaban episode 6<br />
|
79 |
-
* torrent and download jagaban episode 6<br />
|
80 |
-
* magnet and download jagaban episode 6<br />
|
81 |
-
* direct link and download jagaban episode 6</p>
|
82 |
-
<ol>
|
83 |
-
<li>Go to the YouTube video of Jagaban episode 6 that you want to download. You can find it by clicking [here] or by searching "Jagaban episode 6" on YouTube.</li>
|
84 |
-
<li>Copy the URL of the video from the address bar of your browser.</li>
|
85 |
-
<li>Open a new tab on your browser and go to the video download website that you have selected.</li>
|
86 |
-
<li>Paste the URL of the video into the search box or input field of the website and click the "Download" or "Go" button.</li>
|
87 |
-
<li>Select the resolution, format, quality, and location of the video that you want to download.</li>
|
88 |
-
<li>Click the "Download" or "Save" button to start the downloading process.</li>
|
89 |
-
<li>Wait for the video to be downloaded to your computer.</li>
|
90 |
-
</ol> <h2>How to Enjoy Jagaban Episode 6 Offline</h2>
|
91 |
-
<h3>How to transfer the downloaded video to your device</h3>
|
92 |
-
<h4>How to use a USB cable or a cloud service to transfer the video</h4>
|
93 |
-
<p>After you have downloaded Jagaban episode 6 to your computer, you may want to transfer it to your device, such as your smartphone, tablet, or laptop, so you can watch it offline. There are two common ways to do this: using a USB cable or a cloud service.</p>
|
94 |
-
<p>If you want to use a USB cable, you will need a compatible cable that can connect your computer and your device. You will also need to enable the file transfer mode on your device and grant permission to access the device storage on your computer. Then, you can follow these general steps to transfer the video:</p>
|
95 |
-
<ol>
|
96 |
-
<li>Connect your device and your computer with the USB cable.</li>
|
97 |
-
<li>On your device, swipe down from the top of the screen and tap the notification that says "USB charging this device".</li>
|
98 |
-
<li>Select "File Transfer" or "Media Transfer Protocol (MTP)" from the options.</li>
|
99 |
-
<li>On your computer, open the File Explorer or Finder and locate your device name under "This PC" or "Devices".</li>
|
100 |
-
<li>Double-click on your device name and open the folder where you want to save the video, such as "Movies" or "Downloads".</li>
|
101 |
-
<li>Drag and drop the video file from your computer to the folder on your device.</li>
|
102 |
-
<li>Wait for the transfer to complete and then safely eject your device from your computer.</li>
|
103 |
-
</ol>
|
104 |
-
<p>If you want to use a cloud service, you will need an account and an app of a cloud service provider, such as Google Drive, Dropbox, or OneDrive. You will also need an internet connection on both your computer and your device. Then, you can follow these general steps to transfer the video:</p>
|
105 |
-
<ol>
|
106 |
-
<li>On your computer, open the app or website of the cloud service provider and sign in with your account.</li>
|
107 |
-
<li>Upload the video file from your computer to the cloud service.</li>
|
108 |
-
<li>On your device, open the app or website of the cloud service provider and sign in with the same account.</li>
|
109 |
-
<li>Download the video file from the cloud service to your device.</li>
|
110 |
-
<li>Wait for the download to finish and then open the video file on your device.</li>
|
111 |
-
</ol>
|
112 |
-
<h4>How to play the video on your device using a media player app</h4>
|
113 |
-
<p>Once you have transferred Jagaban episode 6 to your device, you can play it offline using a media player app. There are many media player apps available for different devices and platforms, but some may not support all video formats or qualities. Therefore, you should choose a media player app that is compatible with the video file that you have downloaded.</p>
|
114 |
-
<p>Here are some of the popular and reliable media player apps that you can use to play Jagaban episode 6 on your device:</p>
|
115 |
-
<ul>
|
116 |
-
<li><strong>VLC Media Player</strong>: This is a free and open-source app that can play almost any video format and quality. It also has many features and settings that allow you to customize your viewing experience. It is available for Windows, MacOS, Linux, Android, iOS, and more. You can download it by clicking [here] or by searching "VLC Media Player" on Google.</li>
|
117 |
-
<li><strong>MX Player</strong>: This is a powerful and versatile app that can play various video formats and qualities. It also supports subtitles, gestures, zooming, playback speed, and more. It is mainly designed for Android devices, but it also has versions for Windows and iOS. You can get it by clicking [here] or by searching "MX Player" on Google.</li>
|
118 |
-
<li><strong>KMPlayer</strong>: This is another free and feature-rich app that can play different video formats and qualities. It also supports subtitles, playlists, screen capture, editing, and more. It is compatible with Windows, MacOS, Android, iOS, and more. You can install it by clicking [here] or by searching "KMPlayer" on Google.</li>
|
119 |
-
</ul>
|
120 |
-
<p>Once you have chosen your preferred media player app, you can follow these general steps to play Jagaban episode 6 on your device:</p>
|
121 |
-
<ol>
|
122 |
-
<li>Install and launch the media player app on your device.</li>
|
123 |
-
<li>Browse or search for the video file that you have transferred to your device.</li>
|
124 |
-
<li>Tap or click on the video file to start playing it.</li>
|
125 |
-
<li>Adjust the volume, brightness, subtitles, playback speed, and other settings according to your preference.</li>
|
126 |
-
<li>Enjoy watching Jagaban episode 6 offline!</li>
|
127 |
-
</ol>
|
128 |
-
<h2>How to Share <h2>How to Share Your Thoughts and Feedback on Jagaban Episode 6</h2>
|
129 |
-
<h3>How to leave a comment or a review on the YouTube channel or the video download website</h3>
|
130 |
-
<p>After you have watched Jagaban episode 6 offline, you may want to share your thoughts and feedback on the episode with the creators and other viewers. You can do this by leaving a comment or a review on the YouTube channel or the video download website that you have used to download the episode.</p>
|
131 |
-
<p>If you want to leave a comment or a review on the YouTube channel, you will need a Google account and an internet connection. You will also need to visit the YouTube video of Jagaban episode 6 again. Then, you can follow these general steps to leave a comment or a review:</p>
|
132 |
-
<ol>
|
133 |
-
<li>Sign in to your Google account on YouTube.</li>
|
134 |
-
<li>Go to the YouTube video of Jagaban episode 6 that you have watched offline. You can find it by clicking [here] or by searching "Jagaban episode 6" on YouTube.</li>
|
135 |
-
<li>Scroll down to the bottom of the video page and find the comment section.</li>
|
136 |
-
<li>Type your comment or review in the text box and click the "Comment" button.</li>
|
137 |
-
<li>Wait for your comment or review to be posted and visible to others.</li>
|
138 |
-
</ol>
|
139 |
-
<p>If you want to leave a comment or a review on the video download website, you may or may not need an account and an internet connection. You will also need to visit the video download website that you have used to download the episode again. Then, you can follow these general steps to leave a comment or a review:</p>
|
140 |
-
<ol>
|
141 |
-
<li>Go to the video download website that you have used to download Jagaban episode 6. You can find it by clicking [here] or by searching "video download website" on Google.</li>
|
142 |
-
<li>Find the page or section where you can leave a comment or a review on the website. It may be under the video title, description, rating, or feedback.</li>
|
143 |
-
<li>Type your comment or review in the text box and click the "Submit" or "Post" button.</li>
|
144 |
-
<li>Wait for your comment or review to be published and visible to others.</li>
|
145 |
-
</ol>
|
146 |
-
<h3>How to join the online community of Jagaban fans on social media</h3>
|
147 |
-
<h4>How to find and follow the official social media accounts of Holy Ghost Concept and Jagaban series</h4>
|
148 |
-
<p>Another way to share your thoughts and feedback on Jagaban episode 6 is to join the online community of Jagaban fans on social media. You can interact with the creators and other fans of Jagaban series, get updates and news about the series, participate in polls and contests, and more. You can also show your support and appreciation for Holy Ghost Concept and Jagaban series by following their official social media accounts.</p>
|
149 |
-
<p>Here are some of the official social media accounts of Holy Ghost Concept and Jagaban series that you can find and follow:</p>
|
150 |
-
<ul>
|
151 |
-
<li><strong>Facebook</strong>: This is where they post their videos, photos, announcements, events, and more. You can like, comment, share, and react to their posts, as well as send them messages. You can access their Facebook page by clicking [here] or by searching "Holy Ghost Concept" on Facebook.</li>
|
152 |
-
<li><strong>Instagram</strong>: This is where they share their behind-the-scenes pictures, stories, reels, and more. You can like, comment, share, and save their posts, as well as send them direct messages. You can visit their Instagram profile by clicking [here] or by searching "@holyghostconcept" on Instagram.</li>
|
153 |
-
<li><strong>Twitter</strong>: This is where they tweet their updates, news, opinions, and more. You can like, retweet, reply, and quote their tweets, as well as send them direct messages. You can reach their Twitter account by clicking [here] or by searching "@holyghostconcept" on Twitter.</li>
|
154 |
-
</ul>
|
155 |
-
<h4>How to use hashtags and tags to join the conversation about Jagaban episode 6</h4>
|
156 |
-
<p>Besides following the official social media accounts of Holy Ghost Concept and Jagaban series, you can also use hashtags and tags to join the conversation about Jagaban episode 6. Hashtags are words or phrases that start with a # symbol and are used to categorize posts on social media. Tags are words or phrases that start with an @ symbol and are used to mention or notify someone on social media.</p>
|
157 |
-
<p>By using hashtags and tags related to Jagaban episode 6, you can make your posts more visible and relevant to other users who are interested in the same topic. You can also see what other users are saying about Jagaban episode 6 by searching for these hashtags and tags on social media. You can also tag or mention the official accounts of Holy Ghost Concept and Jagaban series, as well as the actors and actresses who star in the series, to show your appreciation and feedback.</p>
|
158 |
-
<p>Here are some of the hashtags and tags that you can use to join the conversation about Jagaban episode 6:</p>
|
159 |
-
<ul>
|
160 |
-
<li><strong>#JagabanEpisode6</strong>: This is the main hashtag that is used to talk about Jagaban episode 6. You can use it to share your thoughts, opinions, questions, and reactions about the episode. You can also search for it to see what other users are saying about the episode.</li>
|
161 |
-
<li><strong>#JagabanSeries</strong>: This is the general hashtag that is used to talk about Jagaban series as a whole. You can use it to express your love, support, and admiration for the series. You can also search for it to see what other users are saying about the series.</li>
|
162 |
-
<li><strong>#HolyGhostConcept</strong>: This is the hashtag that is used to talk about Holy Ghost Concept, the production company behind Jagaban series and other popular videos. You can use it to show your gratitude, respect, and feedback for their work. You can also search for it to see what other users are saying about Holy Ghost Concept.</li>
|
163 |
-
<li><strong>@holyghostconcept</strong>: This is the tag that you can use to mention or notify Holy Ghost Concept on social media. You can use it to ask them questions, give them suggestions, or request for more videos. You can also follow them to get updates and news from them.</li>
|
164 |
-
<li><strong>@selinatested</strong>: This is the tag that you can use to mention or notify Selina Tested, the hit series that features Selina, one of the main characters of Jagaban series. You can use it to show your appreciation, curiosity, and excitement for Selina Tested. You can also follow them to get more videos from them.</li>
|
165 |
-
<li><strong>@jagabanofficial</strong>: This is the tag that you can use to mention or notify Jagaban Official, the official account of Jagaban series on Instagram. You can use it to show your support, admiration, and feedback for Jagaban series. You can also follow them to get more pictures, stories, reels, and more from them.</li>
|
166 |
-
<li><strong>@davidjonesdavid</strong>: This is the tag that you can use to mention or notify David Jones David, the actor who plays Jagaban in Jagaban series. You can use it to show your respect, praise, and criticism for his performance. You can also follow him to get more updates and news from him.</li>
|
167 |
-
<li><strong>@princesssalt2</strong>: This is the tag that you can use to mention or notify Princess Salt, the actress who plays Selina in Jagaban series and Selina Tested. You can use it to show your respect, praise, and criticism for her performance. You can also follow her to get more updates and news from her.</li>
|
168 |
-
</ul>
|
169 |
-
<h2>Conclusion</h2>
|
170 |
-
<p>Jagaban episode 6 is a thrilling and entertaining episode of Jagaban series that you don't want to miss. It is available online on YouTube and other video download websites that you can use to watch it anytime and anywhere you want. You can also transfer it to your device and play it offline using a media player app. Moreover, you can share your thoughts and feedback on Jagaban episode 6 by joining the online community of Jagaban fans on social media using hashtags and tags.</p>
|
171 |
-
<p>We hope this article has helped you learn how to download Jagaban episode 6 online and enjoy it offline. If you have any questions or suggestions, please feel free to leave a comment below or contact us through our social media accounts. Thank you for reading and happy watching!</p>
|
172 |
-
<h2>FAQs</h2>
|
173 |
-
<p>Here are some of the frequently asked questions about Jagaban episode 6:</p>
|
174 |
-
<ol>
|
175 |
-
<li><strong>When was Jagaban episode 6 released?</strong></li>
|
176 |
-
<p>Jagaban episode 6 was released on June 18, 2023 on YouTube by Holy Ghost Concept.</p>
|
177 |
-
<li><strong>How long is Jagaban episode 6?</strong></li>
|
178 |
-
<p>Jagaban episode 6 is about 30 minutes long.</p>
|
179 |
-
<li><strong>What happens in Jagaban episode 6?</strong></li>
|
180 |
-
<p>Jagaban episode 6 continues the story of Jagaban and Selina as they face more enemies and challenges in their quest for revenge. In this episode, they encounter a new rival gang led by a mysterious leader who wants to take over their territory. They also have to deal with some internal conflicts and betrayals within their own gang. Meanwhile, they also discover some shocking secrets and revelations about their pasts.</p>
|
181 |
-
<li <li><strong>Where can I watch the previous episodes of Jagaban series?</strong></li>
|
182 |
-
<p>You can watch the previous episodes of Jagaban series on the official YouTube channel of Holy Ghost Concept. You can access their channel by clicking [here] or by searching "Holy Ghost Concept" on YouTube. You can also use the video download websites or tools that we have mentioned in this article to download the previous episodes and watch them offline.</p>
|
183 |
-
<li><strong>Will there be more episodes of Jagaban series?</strong></li>
|
184 |
-
<p>Yes, there will be more episodes of Jagaban series coming soon. Holy Ghost Concept has confirmed that they are working on the next episodes and they will release them as soon as possible. You can follow their social media accounts to get the latest updates and news about Jagaban series.</p>
|
185 |
-
</ol></p> 401be4b1e0<br />
|
186 |
-
<br />
|
187 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Free Fire Advance Server on iPhone How to Get Activation Code and Play.md
DELETED
@@ -1,136 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download and Access Free Fire Advance Server on iPhone?</h1>
|
3 |
-
<p>Free Fire is one of the most popular mobile games in the world, with millions of players enjoying its thrilling battle royale mode. But did you know that there is a way to experience the new features and updates of Free Fire before they are officially released? Yes, you heard it right. There is a special server called Free Fire Advance Server where you can test the latest content and give feedback to the developers.</p>
|
4 |
-
<p>However, accessing the Advance Server is not as easy as it sounds. You need to register on the official website and get an activation code to join the server. The server is also only available for a limited time and for Android devices only. So, if you want to try out the latest Free Fire content, you need to hurry and sign up for the Advance Server.</p>
|
5 |
-
<h2>free fire advance server download iphone</h2><br /><p><b><b>Download Zip</b> ✶ <a href="https://jinyurl.com/2uNLIS">https://jinyurl.com/2uNLIS</a></b></p><br /><br />
|
6 |
-
<p>In this article, I will guide you through the steps of registering, downloading, installing, and accessing the Free Fire Advance Server. I will also tell you about some of the benefits, features, and problems of the Advance Server. Let's get started!</p>
|
7 |
-
<h2>How to register for Free Fire Advance Server?</h2>
|
8 |
-
<p>The first step to access the Free Fire Advance Server is to register on the official website. Here are the steps you need to follow:</p>
|
9 |
-
<ol>
|
10 |
-
<li>Open your web browser and go to <a href="(^1^)">https://ff-advance.ff.garena.com/</a>. This is the official website of Free Fire Advance Server.</li>
|
11 |
-
<li>On the homepage, you will see two options to log in: Facebook and Google. Choose the one that is linked to your Free Fire account.</li>
|
12 |
-
<li>After logging in, you will see a registration form where you need to enter your name, email address, and phone number.</li>
|
13 |
-
<li>Click on "Join Now" to submit your registration.</li>
|
14 |
-
<li>Wait for Garena to review your application and send you an activation code via email.</li>
|
15 |
-
</ol>
|
16 |
-
<p>Note: Not everyone who registers will receive an activation code. Garena will only select a limited number of users based on their criteria. So, make sure you register as soon as possible to increase your chances of getting selected.</p>
|
17 |
-
<h2>How to download and install Free Fire Advance Server APK?</h2>
|
18 |
-
<p>The next step is to download and install the Free Fire Advance Server APK file on your Android device. Here are the steps you need to follow:</p>
|
19 |
-
<ol>
|
20 |
-
<li>Once you receive an activation code via email, go back to <a href="(^1^)">https://ff-advance.ff.garena.com/</a> and log in with your Facebook or Google account.</li>
|
21 |
-
<li>You will see a "Download APK" button on the website. Click on it to download the APK file.</li>
|
22 |
-
<li>The APK file size is around 700 MB, so make sure you have enough storage space on your device.</li>
|
23 |
-
<li>After downloading the APK file, locate it in your file manager and tap on it to install it.</li>
|
24 |
-
<li>You may need to enable "Install from unknown sources" in your device settings to allow the installation of the APK file.</li>
|
25 |
-
<li>Follow the on-screen instructions to complete the installation.</li>
|
26 |
-
</ol>
|
27 |
-
<p>Note: You need to have at least 2 GB of RAM and Android 4.4 or higher to run the Free Fire Advance Server APK.</p>
|
28 |
-
<p>How to get free fire advance server on iphone<br />
|
29 |
-
Free fire advance server ios download link<br />
|
30 |
-
Free fire advance server registration for iphone users<br />
|
31 |
-
Free fire advance server iphone app<br />
|
32 |
-
Free fire advance server download in ios 2022<br />
|
33 |
-
Free fire advance server iphone mai kaise download kare<br />
|
34 |
-
Free fire advance server ios release date<br />
|
35 |
-
Free fire advance server iphone gameplay<br />
|
36 |
-
Free fire advance server download for iphone 6<br />
|
37 |
-
Free fire advance server ios apk<br />
|
38 |
-
Free fire advance server iphone 7 download<br />
|
39 |
-
Free fire advance server ios beta<br />
|
40 |
-
Free fire advance server download for iphone x<br />
|
41 |
-
Free fire advance server ios update<br />
|
42 |
-
Free fire advance server iphone 8 plus<br />
|
43 |
-
Free fire advance server ios ob35<br />
|
44 |
-
Free fire advance server download for iphone 11<br />
|
45 |
-
Free fire advance server ios version<br />
|
46 |
-
Free fire advance server iphone xr<br />
|
47 |
-
Free fire advance server ios ob36<br />
|
48 |
-
Free fire advance server download for iphone 12<br />
|
49 |
-
Free fire advance server ios requirements<br />
|
50 |
-
Free fire advance server iphone xs max<br />
|
51 |
-
Free fire advance server ios ob37<br />
|
52 |
-
Free fire advance server download for iphone se<br />
|
53 |
-
Free fire advance server ios review<br />
|
54 |
-
Free fire advance server iphone 6s plus<br />
|
55 |
-
Free fire advance server ios ob38<br />
|
56 |
-
Free fire advance server download for iphone 13<br />
|
57 |
-
Free fire advance server ios features<br />
|
58 |
-
Free fire advance server iphone 7 plus download<br />
|
59 |
-
Free fire advance server ios ob39<br />
|
60 |
-
Free fire advance server download for iphone 5s<br />
|
61 |
-
Free fire advance server ios tips and tricks<br />
|
62 |
-
Free fire advance server iphone x download<br />
|
63 |
-
Free fire advance server ios ob40<br />
|
64 |
-
Free fire advance server download for iphone 4s<br />
|
65 |
-
Free fire advance server ios gameplay video<br />
|
66 |
-
Free fire advance server iphone 8 download<br />
|
67 |
-
Free fire advance server ios ob41<br />
|
68 |
-
Free fire advance server download for iphone xr max <br />
|
69 |
-
Free fire advance server ios bug report <br />
|
70 |
-
Free fire advance server iphone se 2020 <br />
|
71 |
-
Free fire advance server ios ob42 <br />
|
72 |
-
Free fire advance server download for iphone 12 pro max <br />
|
73 |
-
Free fire advance server ios rewards <br />
|
74 |
-
Free fire advance server iphone xs <br />
|
75 |
-
Free fire advance server ios ob43</p>
|
76 |
-
<h2>How to access Free Fire Advance Server?</h2>
|
77 |
-
<p>The final step is to access the Free Fire Advance Server and enjoy the new features and updates. Here are the steps you need to follow:</p>
|
78 |
-
<ol>
|
79 |
-
<li>Open the Free Fire Advance Server app on your device. You will see a different icon and name than the regular Free Fire app.</li>
|
80 |
-
<li>Enter your activation code that you received via email and tap on "Confirm".</li>
|
81 |
-
<li>You will be taken to the main menu of the game, where you can choose your game mode and start playing.</li>
|
82 |
-
<li>You can also access the "Report" button on the top right corner of the screen, where you can report any bugs or glitches you encounter in the game.</li>
|
83 |
-
<li>You can also access the "Feedback" button on the bottom right corner of the screen, where you can give your suggestions and opinions about the new features and updates.</li>
|
84 |
-
</ol>
|
85 |
-
<p>Note: You can only access the Free Fire Advance Server during the testing period, which is usually a few days before the official update. After that, the server will be closed and you will have to wait for the next testing period.</p>
|
86 |
-
<h2>What are the benefits of Free Fire Advance Server?</h2>
|
87 |
-
<p>There are many benefits of joining the Free Fire Advance Server, such as:</p>
|
88 |
-
<ul>
|
89 |
-
<li>You can experience the new features and updates of Free Fire before anyone else.</li>
|
90 |
-
<li>You can help the developers improve the game by reporting bugs and giving feedback.</li>
|
91 |
-
<li>You can earn free diamonds as a reward for your participation and contribution. The more bugs you report and feedback you give, the more diamonds you get.</li>
|
92 |
-
<li>You can have fun and challenge yourself with new content and gameplay.</li>
|
93 |
-
</ul>
|
94 |
-
<h2>What are the features of Free Fire Advance Server?</h2>
|
95 |
-
<p>The features of Free Fire Advance Server vary depending on the testing period and the upcoming update. However, some of the common features that you can expect to see are:</p>
|
96 |
-
<ul>
|
97 |
-
<li>New characters, pets, weapons, skins, emotes, and items.</li>
|
98 |
-
<li>New game modes, maps, events, and missions.</li>
|
99 |
-
<li>New graphics, animations, sounds, and effects.</li>
|
100 |
-
<li>New balance changes, bug fixes, and optimizations.</li>
|
101 |
-
</ul>
|
102 |
-
<p>For example, in the latest testing period of June 2023, some of the new features that were introduced in the Advance Server were:</p>
|
103 |
-
<ul>
|
104 |
-
<li>A new character named Remy, who has a passive skill called "Quick Reload" that reduces his reload time by 10%.</li>
|
105 |
-
<li>A new pet named Fifi, who has a passive skill called "Furry Friend" that increases his owner's movement speed by 5% when they are near him.</li>
|
106 |
-
<li>A new weapon called M82B Sniper Rifle, which has a special ability called "Armor Penetration" that ignores 30% of the enemy's armor when shooting.</li>
|
107 |
-
<li>A new game mode called "Bomb Squad", where two teams have to plant or defuse bombs in different locations within a time limit.</li>
|
108 |
-
<li>A new map called "Bermuda Remastered", which is a revamped version of the classic Bermuda map with new locations, structures, and details.</li>
|
109 |
-
</ul>
|
110 |
-
<h2>What are the problems of Free Fire Advance Server?</h2>
|
111 |
-
<p>While joining the Free Fire Advance Server can be exciting and rewarding, it also comes with some problems that you need to be aware of, such as:</p>
|
112 |
-
<ul>
|
113 |
-
<li>The server is only available for Android devices. If you have an iPhone or any other device, you cannot access the Advance Server.</li>
|
114 |
-
<li>The server is only open for a limited time. You cannot play on the Advance Server whenever you want. You have to wait for Garena to announce the testing period and open the server.</li>
|
115 |
-
<li>The server is not stable. Since it is a testing server, you may encounter many bugs, glitches, crashes, and errors in the game. The game may also lag or freeze at times.</li>
|
116 |
-
<li>The server is not compatible with your regular Free Fire account. You cannot use your existing account to play on the Advance Server. You have to create a new account and start from scratch. You also cannot transfer your progress or items from one server to another.</li>
|
117 |
-
</ul>
|
118 |
-
<h2>Conclusion</h2>
|
119 |
-
<p>Free Fire Advance Server is a great way to enjoy the new features and updates of Free Fire before they are officially released. You can also help the developers improve the game by reporting bugs and giving feedback. You can also earn free diamonds as a reward for your participation and contribution.</p>
|
120 |
-
<p>However, accessing the Advance Server is not as easy as it sounds. You need to register on the official website and get an activation code to join the server. The server is also only available for a limited time and for Android devices only. So, if you want to try out the latest Free Fire content, you need to hurry and sign up for the Advance Server.</p>
|
121 |
-
<p>In this article, I have guided you through the steps of registering, downloading, installing, and accessing the Free Fire Advance Server. I have also told you about some of the benefits, features, and problems of the Advance Server. I hope you found this article helpful and informative.</p>
|
122 |
-
<p>If you have any questions or comments, feel free to leave them below. I would love to hear from you. And if you liked this article, please share it with your friends and fellow Free Fire fans. Thank you for reading!</p>
|
123 |
-
<h2>FAQs</h2>
|
124 |
-
<p>Here are some of the frequently asked questions about Free Fire Advance Server:</p>
|
125 |
-
<h3>Q: How can I get an activation code for Free Fire Advance Server?</h3>
|
126 |
-
<p>A: You can get an activation code for Free Fire Advance Server by registering on the official website and waiting for Garena to send you an email with the code. However, not everyone who registers will receive an activation code. Garena will only select a limited number of users based on their criteria.</p>
|
127 |
-
<h3>Q: How can I earn free diamonds on Free Fire Advance Server?</h3>
|
128 |
-
<p>A: You can earn free diamonds on Free Fire Advance Server by reporting bugs and giving feedback to the developers. The more bugs you report and feedback you give, the more diamonds you get. You can also earn diamonds by completing certain tasks and missions on the server.</p>
|
129 |
-
<h3>Q: How can I update Free Fire Advance Server?</h3>
|
130 |
-
<p>A: You can update Free Fire Advance Server by downloading and installing the latest APK file from the official website. You will also receive a notification on the app when there is a new update available.</p>
|
131 |
-
<h3>Q: How can I delete Free Fire Advance Server?</h3>
|
132 |
-
<p>A: You can delete Free Fire Advance Server by uninstalling the app from your device. You can also remove your account from the server by logging out from the app and deleting your data from the website.</p>
|
133 |
-
<h3>Q: How can I contact Free Fire Advance Server support?</h3>
|
134 |
-
<p>A: You can contact Free Fire Advance Server support by sending an email to <a href="">[email protected]</a>. You can also visit their <a href="">Facebook page</a> or <a href="">Instagram account</a> for more information and updates.</p> 197e85843d<br />
|
135 |
-
<br />
|
136 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/ppdiffusers/pipelines/alt_diffusion/modeling_roberta_series.py
DELETED
@@ -1,134 +0,0 @@
|
|
1 |
-
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
2 |
-
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
from dataclasses import dataclass
|
17 |
-
from typing import Optional, Tuple
|
18 |
-
|
19 |
-
import paddle
|
20 |
-
from paddle import nn
|
21 |
-
|
22 |
-
from paddlenlp.transformers import RobertaConfig as XLMRobertaConfig
|
23 |
-
from paddlenlp.transformers import RobertaModel as XLMRobertaModel
|
24 |
-
from paddlenlp.transformers import RobertaPretrainedModel
|
25 |
-
from paddlenlp.transformers.model_outputs import ModelOutput
|
26 |
-
|
27 |
-
|
28 |
-
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
|
29 |
-
"""
|
30 |
-
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
|
31 |
-
are ignored. This is modified from fairseq's `utils.make_positions`.
|
32 |
-
|
33 |
-
Args:
|
34 |
-
x: paddle.Tensor x:
|
35 |
-
Returns: paddle.Tensor
|
36 |
-
|
37 |
-
"""
|
38 |
-
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
|
39 |
-
mask = (input_ids != padding_idx).cast("int64")
|
40 |
-
incremental_indices = (paddle.cumsum(mask, axis=1) + past_key_values_length) * mask
|
41 |
-
return incremental_indices + padding_idx
|
42 |
-
|
43 |
-
|
44 |
-
@dataclass
|
45 |
-
class TransformationModelOutput(ModelOutput):
|
46 |
-
"""
|
47 |
-
Base class for text model's outputs that also contains a pooling of the last hidden states.
|
48 |
-
Args:
|
49 |
-
text_embeds (`paddle.Tensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
|
50 |
-
The text embeddings obtained by applying the projection layer to the pooler_output.
|
51 |
-
last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
52 |
-
Sequence of hidden-states at the output of the last layer of the model.
|
53 |
-
hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
54 |
-
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
55 |
-
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
56 |
-
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
57 |
-
attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
58 |
-
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
59 |
-
sequence_length)`.
|
60 |
-
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
61 |
-
heads.
|
62 |
-
"""
|
63 |
-
|
64 |
-
projection_state: Optional[paddle.Tensor] = None
|
65 |
-
last_hidden_state: paddle.Tensor = None
|
66 |
-
hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
67 |
-
attentions: Optional[Tuple[paddle.Tensor]] = None
|
68 |
-
|
69 |
-
|
70 |
-
class RobertaSeriesConfig(XLMRobertaConfig):
|
71 |
-
model_type = "roberta"
|
72 |
-
|
73 |
-
def __init__(
|
74 |
-
self,
|
75 |
-
pad_token_id=1,
|
76 |
-
bos_token_id=0,
|
77 |
-
eos_token_id=2,
|
78 |
-
project_dim=512,
|
79 |
-
pooler_fn="cls",
|
80 |
-
learn_encoder=False,
|
81 |
-
use_attention_mask=True,
|
82 |
-
**kwargs,
|
83 |
-
):
|
84 |
-
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
|
85 |
-
self.project_dim = project_dim
|
86 |
-
self.pooler_fn = pooler_fn
|
87 |
-
self.learn_encoder = learn_encoder
|
88 |
-
self.use_attention_mask = use_attention_mask
|
89 |
-
|
90 |
-
|
91 |
-
class RobertaSeriesModelWithTransformation(RobertaPretrainedModel):
|
92 |
-
_keys_to_ignore_on_load_unexpected = [r"pooler"]
|
93 |
-
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
|
94 |
-
base_model_prefix = "roberta"
|
95 |
-
config_class = RobertaSeriesConfig
|
96 |
-
|
97 |
-
def __init__(self, config: RobertaSeriesConfig):
|
98 |
-
super().__init__(config)
|
99 |
-
self.roberta = XLMRobertaModel(config)
|
100 |
-
self.transformation = nn.Linear(config.hidden_size, config.project_dim)
|
101 |
-
self.apply(self.init_weights)
|
102 |
-
|
103 |
-
def forward(
|
104 |
-
self,
|
105 |
-
input_ids: Optional[paddle.Tensor] = None,
|
106 |
-
attention_mask: Optional[paddle.Tensor] = None,
|
107 |
-
token_type_ids: Optional[paddle.Tensor] = None,
|
108 |
-
position_ids: Optional[paddle.Tensor] = None,
|
109 |
-
output_attentions: Optional[bool] = None,
|
110 |
-
return_dict: Optional[bool] = None,
|
111 |
-
output_hidden_states: Optional[bool] = None,
|
112 |
-
):
|
113 |
-
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
114 |
-
|
115 |
-
if position_ids is None:
|
116 |
-
position_ids = create_position_ids_from_input_ids(input_ids, self.config.pad_token_id)
|
117 |
-
outputs = self.base_model(
|
118 |
-
input_ids=input_ids,
|
119 |
-
attention_mask=attention_mask,
|
120 |
-
token_type_ids=token_type_ids,
|
121 |
-
position_ids=position_ids,
|
122 |
-
output_attentions=output_attentions,
|
123 |
-
output_hidden_states=output_hidden_states,
|
124 |
-
return_dict=return_dict,
|
125 |
-
)
|
126 |
-
|
127 |
-
projection_state = self.transformation(outputs.last_hidden_state)
|
128 |
-
|
129 |
-
return TransformationModelOutput(
|
130 |
-
projection_state=projection_state,
|
131 |
-
last_hidden_state=outputs.last_hidden_state,
|
132 |
-
hidden_states=outputs.hidden_states,
|
133 |
-
attentions=outputs.attentions,
|
134 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/nets.py
DELETED
@@ -1,123 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch import nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
|
5 |
-
import layers
|
6 |
-
from . import spec_utils
|
7 |
-
|
8 |
-
|
9 |
-
class BaseASPPNet(nn.Module):
|
10 |
-
def __init__(self, nin, ch, dilations=(4, 8, 16)):
|
11 |
-
super(BaseASPPNet, self).__init__()
|
12 |
-
self.enc1 = layers.Encoder(nin, ch, 3, 2, 1)
|
13 |
-
self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1)
|
14 |
-
self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1)
|
15 |
-
self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1)
|
16 |
-
|
17 |
-
self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations)
|
18 |
-
|
19 |
-
self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1)
|
20 |
-
self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1)
|
21 |
-
self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1)
|
22 |
-
self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1)
|
23 |
-
|
24 |
-
def __call__(self, x):
|
25 |
-
h, e1 = self.enc1(x)
|
26 |
-
h, e2 = self.enc2(h)
|
27 |
-
h, e3 = self.enc3(h)
|
28 |
-
h, e4 = self.enc4(h)
|
29 |
-
|
30 |
-
h = self.aspp(h)
|
31 |
-
|
32 |
-
h = self.dec4(h, e4)
|
33 |
-
h = self.dec3(h, e3)
|
34 |
-
h = self.dec2(h, e2)
|
35 |
-
h = self.dec1(h, e1)
|
36 |
-
|
37 |
-
return h
|
38 |
-
|
39 |
-
|
40 |
-
class CascadedASPPNet(nn.Module):
|
41 |
-
def __init__(self, n_fft):
|
42 |
-
super(CascadedASPPNet, self).__init__()
|
43 |
-
self.stg1_low_band_net = BaseASPPNet(2, 16)
|
44 |
-
self.stg1_high_band_net = BaseASPPNet(2, 16)
|
45 |
-
|
46 |
-
self.stg2_bridge = layers.Conv2DBNActiv(18, 8, 1, 1, 0)
|
47 |
-
self.stg2_full_band_net = BaseASPPNet(8, 16)
|
48 |
-
|
49 |
-
self.stg3_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0)
|
50 |
-
self.stg3_full_band_net = BaseASPPNet(16, 32)
|
51 |
-
|
52 |
-
self.out = nn.Conv2d(32, 2, 1, bias=False)
|
53 |
-
self.aux1_out = nn.Conv2d(16, 2, 1, bias=False)
|
54 |
-
self.aux2_out = nn.Conv2d(16, 2, 1, bias=False)
|
55 |
-
|
56 |
-
self.max_bin = n_fft // 2
|
57 |
-
self.output_bin = n_fft // 2 + 1
|
58 |
-
|
59 |
-
self.offset = 128
|
60 |
-
|
61 |
-
def forward(self, x, aggressiveness=None):
|
62 |
-
mix = x.detach()
|
63 |
-
x = x.clone()
|
64 |
-
|
65 |
-
x = x[:, :, : self.max_bin]
|
66 |
-
|
67 |
-
bandw = x.size()[2] // 2
|
68 |
-
aux1 = torch.cat(
|
69 |
-
[
|
70 |
-
self.stg1_low_band_net(x[:, :, :bandw]),
|
71 |
-
self.stg1_high_band_net(x[:, :, bandw:]),
|
72 |
-
],
|
73 |
-
dim=2,
|
74 |
-
)
|
75 |
-
|
76 |
-
h = torch.cat([x, aux1], dim=1)
|
77 |
-
aux2 = self.stg2_full_band_net(self.stg2_bridge(h))
|
78 |
-
|
79 |
-
h = torch.cat([x, aux1, aux2], dim=1)
|
80 |
-
h = self.stg3_full_band_net(self.stg3_bridge(h))
|
81 |
-
|
82 |
-
mask = torch.sigmoid(self.out(h))
|
83 |
-
mask = F.pad(
|
84 |
-
input=mask,
|
85 |
-
pad=(0, 0, 0, self.output_bin - mask.size()[2]),
|
86 |
-
mode="replicate",
|
87 |
-
)
|
88 |
-
|
89 |
-
if self.training:
|
90 |
-
aux1 = torch.sigmoid(self.aux1_out(aux1))
|
91 |
-
aux1 = F.pad(
|
92 |
-
input=aux1,
|
93 |
-
pad=(0, 0, 0, self.output_bin - aux1.size()[2]),
|
94 |
-
mode="replicate",
|
95 |
-
)
|
96 |
-
aux2 = torch.sigmoid(self.aux2_out(aux2))
|
97 |
-
aux2 = F.pad(
|
98 |
-
input=aux2,
|
99 |
-
pad=(0, 0, 0, self.output_bin - aux2.size()[2]),
|
100 |
-
mode="replicate",
|
101 |
-
)
|
102 |
-
return mask * mix, aux1 * mix, aux2 * mix
|
103 |
-
else:
|
104 |
-
if aggressiveness:
|
105 |
-
mask[:, :, : aggressiveness["split_bin"]] = torch.pow(
|
106 |
-
mask[:, :, : aggressiveness["split_bin"]],
|
107 |
-
1 + aggressiveness["value"] / 3,
|
108 |
-
)
|
109 |
-
mask[:, :, aggressiveness["split_bin"] :] = torch.pow(
|
110 |
-
mask[:, :, aggressiveness["split_bin"] :],
|
111 |
-
1 + aggressiveness["value"],
|
112 |
-
)
|
113 |
-
|
114 |
-
return mask * mix
|
115 |
-
|
116 |
-
def predict(self, x_mag, aggressiveness=None):
|
117 |
-
h = self.forward(x_mag, aggressiveness)
|
118 |
-
|
119 |
-
if self.offset > 0:
|
120 |
-
h = h[:, :, :, self.offset : -self.offset]
|
121 |
-
assert h.size()[3] > 0
|
122 |
-
|
123 |
-
return h
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Zero-to-Hero/07-SL-Chatbot-Blenderbot/app.py
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
#from streamlit_chat import message as st_message
|
3 |
-
from streamlit_chat import message as st_message
|
4 |
-
from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
|
5 |
-
|
6 |
-
st.title("Chatbot Blenderbot Streamlit")
|
7 |
-
|
8 |
-
if "history" not in st.session_state:
|
9 |
-
st.session_state.history = []
|
10 |
-
|
11 |
-
def get_models():
|
12 |
-
tokenizer = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
|
13 |
-
model = BlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill")
|
14 |
-
return tokenizer, model
|
15 |
-
|
16 |
-
def generate_answer():
|
17 |
-
tokenizer, model = get_models()
|
18 |
-
user_message = st.session_state.input_text
|
19 |
-
inputs = tokenizer(st.session_state.input_text, return_tensors="pt")
|
20 |
-
result = model.generate(**inputs)
|
21 |
-
message_bot = tokenizer.decode(result[0], skip_special_tokens=True) # .replace("<s>", "").replace("</s>", "")
|
22 |
-
st.session_state.history.append({"message": user_message, "is_user": True})
|
23 |
-
st.session_state.history.append({"message": message_bot, "is_user": False})
|
24 |
-
|
25 |
-
st.text_input("Response", key="input_text", on_change=generate_answer)
|
26 |
-
|
27 |
-
for chat in st.session_state.history:
|
28 |
-
st_message(**chat)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/utils/ui.py
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
from pathlib import Path
|
2 |
-
|
3 |
-
import gradio as gr
|
4 |
-
import torch
|
5 |
-
|
6 |
-
refresh_symbol = '\U0001f504' # 🔄
|
7 |
-
|
8 |
-
class ToolButton(gr.Button, gr.components.IOComponent):
|
9 |
-
"""Small button with single emoji as text, fits inside gradio forms"""
|
10 |
-
|
11 |
-
def __init__(self, **kwargs):
|
12 |
-
super().__init__(**kwargs)
|
13 |
-
|
14 |
-
def get_block_name(self):
|
15 |
-
return "button"
|
16 |
-
|
17 |
-
|
18 |
-
def create_refresh_button(refresh_component, refresh_method, refreshed_args, elem_class):
|
19 |
-
def refresh():
|
20 |
-
refresh_method()
|
21 |
-
args = refreshed_args() if callable(refreshed_args) else refreshed_args
|
22 |
-
|
23 |
-
for k, v in args.items():
|
24 |
-
setattr(refresh_component, k, v)
|
25 |
-
|
26 |
-
return gr.update(**(args or {}))
|
27 |
-
|
28 |
-
refresh_button = ToolButton(value=refresh_symbol, elem_classes=elem_class, scale=1, size="sm", container=False)
|
29 |
-
refresh_button.click(
|
30 |
-
fn=refresh,
|
31 |
-
inputs=[],
|
32 |
-
outputs=[refresh_component]
|
33 |
-
)
|
34 |
-
return refresh_button
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/tokenize_caption.py
DELETED
@@ -1,86 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
from tqdm import tqdm
|
3 |
-
import re
|
4 |
-
import fire
|
5 |
-
|
6 |
-
|
7 |
-
def tokenize_caption(input_json: str,
|
8 |
-
keep_punctuation: bool = False,
|
9 |
-
host_address: str = None,
|
10 |
-
character_level: bool = False,
|
11 |
-
zh: bool = True,
|
12 |
-
output_json: str = None):
|
13 |
-
"""Build vocabulary from csv file with a given threshold to drop all counts < threshold
|
14 |
-
|
15 |
-
Args:
|
16 |
-
input_json(string): Preprossessed json file. Structure like this:
|
17 |
-
{
|
18 |
-
'audios': [
|
19 |
-
{
|
20 |
-
'audio_id': 'xxx',
|
21 |
-
'captions': [
|
22 |
-
{
|
23 |
-
'caption': 'xxx',
|
24 |
-
'cap_id': 'xxx'
|
25 |
-
}
|
26 |
-
]
|
27 |
-
},
|
28 |
-
...
|
29 |
-
]
|
30 |
-
}
|
31 |
-
threshold (int): Threshold to drop all words with counts < threshold
|
32 |
-
keep_punctuation (bool): Includes or excludes punctuation.
|
33 |
-
|
34 |
-
Returns:
|
35 |
-
vocab (Vocab): Object with the processed vocabulary
|
36 |
-
"""
|
37 |
-
data = json.load(open(input_json, "r"))["audios"]
|
38 |
-
|
39 |
-
if zh:
|
40 |
-
from nltk.parse.corenlp import CoreNLPParser
|
41 |
-
from zhon.hanzi import punctuation
|
42 |
-
parser = CoreNLPParser(host_address)
|
43 |
-
for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
|
44 |
-
for cap_idx in range(len(data[audio_idx]["captions"])):
|
45 |
-
caption = data[audio_idx]["captions"][cap_idx]["caption"]
|
46 |
-
# Remove all punctuations
|
47 |
-
if not keep_punctuation:
|
48 |
-
caption = re.sub("[{}]".format(punctuation), "", caption)
|
49 |
-
if character_level:
|
50 |
-
tokens = list(caption)
|
51 |
-
else:
|
52 |
-
tokens = list(parser.tokenize(caption))
|
53 |
-
data[audio_idx]["captions"][cap_idx]["tokens"] = " ".join(tokens)
|
54 |
-
else:
|
55 |
-
from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
|
56 |
-
captions = {}
|
57 |
-
for audio_idx in range(len(data)):
|
58 |
-
audio_id = data[audio_idx]["audio_id"]
|
59 |
-
captions[audio_id] = []
|
60 |
-
for cap_idx in range(len(data[audio_idx]["captions"])):
|
61 |
-
caption = data[audio_idx]["captions"][cap_idx]["caption"]
|
62 |
-
captions[audio_id].append({
|
63 |
-
"audio_id": audio_id,
|
64 |
-
"id": cap_idx,
|
65 |
-
"caption": caption
|
66 |
-
})
|
67 |
-
tokenizer = PTBTokenizer()
|
68 |
-
captions = tokenizer.tokenize(captions)
|
69 |
-
for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
|
70 |
-
audio_id = data[audio_idx]["audio_id"]
|
71 |
-
for cap_idx in range(len(data[audio_idx]["captions"])):
|
72 |
-
tokens = captions[audio_id][cap_idx]
|
73 |
-
data[audio_idx]["captions"][cap_idx]["tokens"] = tokens
|
74 |
-
|
75 |
-
if output_json:
|
76 |
-
json.dump(
|
77 |
-
{ "audios": data }, open(output_json, "w"),
|
78 |
-
indent=4, ensure_ascii=not zh)
|
79 |
-
else:
|
80 |
-
json.dump(
|
81 |
-
{ "audios": data }, open(input_json, "w"),
|
82 |
-
indent=4, ensure_ascii=not zh)
|
83 |
-
|
84 |
-
|
85 |
-
if __name__ == "__main__":
|
86 |
-
fire.Fire(tokenize_caption)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/word2vec/create_word_embedding.py
DELETED
@@ -1,67 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
#!/usr/bin/env python3
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
import pandas as pd
|
6 |
-
import torch
|
7 |
-
import gensim
|
8 |
-
from gensim.models import Word2Vec
|
9 |
-
from tqdm import tqdm
|
10 |
-
import fire
|
11 |
-
|
12 |
-
import sys
|
13 |
-
import os
|
14 |
-
sys.path.append(os.getcwd())
|
15 |
-
from utils.build_vocab import Vocabulary
|
16 |
-
|
17 |
-
def create_embedding(vocab_file: str,
|
18 |
-
embed_size: int,
|
19 |
-
output: str,
|
20 |
-
caption_file: str = None,
|
21 |
-
pretrained_weights_path: str = None,
|
22 |
-
**word2vec_kwargs):
|
23 |
-
vocabulary = torch.load(vocab_file, map_location="cpu")
|
24 |
-
|
25 |
-
if pretrained_weights_path:
|
26 |
-
model = gensim.models.KeyedVectors.load_word2vec_format(
|
27 |
-
fname=pretrained_weights_path,
|
28 |
-
binary=True,
|
29 |
-
)
|
30 |
-
if model.vector_size != embed_size:
|
31 |
-
assert embed_size < model.vector_size, f"only reduce dimension, cannot add dimesion {model.vector_size} to {embed_size}"
|
32 |
-
from sklearn.decomposition import PCA
|
33 |
-
pca = PCA(n_components=embed_size)
|
34 |
-
model.vectors = pca.fit_transform(model.vectors)
|
35 |
-
else:
|
36 |
-
caption_df = pd.read_json(caption_file)
|
37 |
-
caption_df["tokens"] = caption_df["tokens"].apply(lambda x: ["<start>"] + [token for token in x] + ["<end>"])
|
38 |
-
sentences = list(caption_df["tokens"].values)
|
39 |
-
epochs = word2vec_kwargs.get("epochs", 10)
|
40 |
-
if "epochs" in word2vec_kwargs:
|
41 |
-
del word2vec_kwargs["epochs"]
|
42 |
-
model = Word2Vec(size=embed_size, min_count=1, **word2vec_kwargs)
|
43 |
-
model.build_vocab(sentences=sentences)
|
44 |
-
model.train(sentences=sentences, total_examples=len(sentences), epochs=epochs)
|
45 |
-
|
46 |
-
word_embeddings = np.random.randn(len(vocabulary), embed_size)
|
47 |
-
|
48 |
-
if isinstance(model, gensim.models.word2vec.Word2Vec):
|
49 |
-
model = model.wv
|
50 |
-
with tqdm(total=len(vocabulary), ascii=True) as pbar:
|
51 |
-
for word, idx in vocabulary.word2idx.items():
|
52 |
-
try:
|
53 |
-
word_embeddings[idx] = model.get_vector(word)
|
54 |
-
except KeyError:
|
55 |
-
print(f"word {word} not found in word2vec model, it is random initialized!")
|
56 |
-
pbar.update()
|
57 |
-
|
58 |
-
np.save(output, word_embeddings)
|
59 |
-
|
60 |
-
print("Finish writing word2vec embeddings to " + output)
|
61 |
-
|
62 |
-
|
63 |
-
if __name__ == "__main__":
|
64 |
-
fire.Fire(create_embedding)
|
65 |
-
|
66 |
-
|
67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/tts/fs2_orig.py
DELETED
@@ -1,102 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch import nn
|
3 |
-
from text_to_speech.modules.commons.layers import Embedding
|
4 |
-
from text_to_speech.modules.commons.nar_tts_modules import EnergyPredictor, PitchPredictor
|
5 |
-
from text_to_speech.modules.tts.commons.align_ops import expand_states
|
6 |
-
from text_to_speech.modules.tts.fs import FastSpeech
|
7 |
-
from text_to_speech.utils.audio.cwt import cwt2f0, get_lf0_cwt
|
8 |
-
from text_to_speech.utils.audio.pitch.utils import denorm_f0, f0_to_coarse, norm_f0
|
9 |
-
import numpy as np
|
10 |
-
|
11 |
-
|
12 |
-
class FastSpeech2Orig(FastSpeech):
|
13 |
-
def __init__(self, dict_size, hparams, out_dims=None):
|
14 |
-
super().__init__(dict_size, hparams, out_dims)
|
15 |
-
predictor_hidden = hparams['predictor_hidden'] if hparams['predictor_hidden'] > 0 else self.hidden_size
|
16 |
-
if hparams['use_energy_embed']:
|
17 |
-
self.energy_embed = Embedding(300, self.hidden_size, 0)
|
18 |
-
self.energy_predictor = EnergyPredictor(
|
19 |
-
self.hidden_size, n_chans=predictor_hidden,
|
20 |
-
n_layers=hparams['predictor_layers'], dropout_rate=hparams['predictor_dropout'], odim=2,
|
21 |
-
kernel_size=hparams['predictor_kernel'])
|
22 |
-
if hparams['pitch_type'] == 'cwt' and hparams['use_pitch_embed']:
|
23 |
-
self.pitch_predictor = PitchPredictor(
|
24 |
-
self.hidden_size, n_chans=predictor_hidden,
|
25 |
-
n_layers=hparams['predictor_layers'], dropout_rate=hparams['predictor_dropout'], odim=11,
|
26 |
-
kernel_size=hparams['predictor_kernel'])
|
27 |
-
self.cwt_stats_layers = nn.Sequential(
|
28 |
-
nn.Linear(self.hidden_size, self.hidden_size), nn.ReLU(),
|
29 |
-
nn.Linear(self.hidden_size, self.hidden_size), nn.ReLU(), nn.Linear(self.hidden_size, 2))
|
30 |
-
|
31 |
-
def forward(self, txt_tokens, mel2ph=None, spk_embed=None, spk_id=None,
|
32 |
-
f0=None, uv=None, energy=None, infer=False, **kwargs):
|
33 |
-
ret = {}
|
34 |
-
encoder_out = self.encoder(txt_tokens) # [B, T, C]
|
35 |
-
src_nonpadding = (txt_tokens > 0).float()[:, :, None]
|
36 |
-
style_embed = self.forward_style_embed(spk_embed, spk_id)
|
37 |
-
|
38 |
-
# add dur
|
39 |
-
dur_inp = (encoder_out + style_embed) * src_nonpadding
|
40 |
-
mel2ph = self.forward_dur(dur_inp, mel2ph, txt_tokens, ret)
|
41 |
-
tgt_nonpadding = (mel2ph > 0).float()[:, :, None]
|
42 |
-
decoder_inp = decoder_inp_ = expand_states(encoder_out, mel2ph)
|
43 |
-
|
44 |
-
# add pitch and energy embed
|
45 |
-
if self.hparams['use_pitch_embed']:
|
46 |
-
pitch_inp = (decoder_inp_ + style_embed) * tgt_nonpadding
|
47 |
-
decoder_inp = decoder_inp + self.forward_pitch(pitch_inp, f0, uv, mel2ph, ret, encoder_out)
|
48 |
-
|
49 |
-
# add pitch and energy embed
|
50 |
-
if self.hparams['use_energy_embed']:
|
51 |
-
energy_inp = (decoder_inp_ + style_embed) * tgt_nonpadding
|
52 |
-
decoder_inp = decoder_inp + self.forward_energy(energy_inp, energy, ret)
|
53 |
-
|
54 |
-
# decoder input
|
55 |
-
ret['decoder_inp'] = decoder_inp = (decoder_inp + style_embed) * tgt_nonpadding
|
56 |
-
if self.hparams['dec_inp_add_noise']:
|
57 |
-
B, T, _ = decoder_inp.shape
|
58 |
-
z = kwargs.get('adv_z', torch.randn([B, T, self.z_channels])).to(decoder_inp.device)
|
59 |
-
ret['adv_z'] = z
|
60 |
-
decoder_inp = torch.cat([decoder_inp, z], -1)
|
61 |
-
decoder_inp = self.dec_inp_noise_proj(decoder_inp) * tgt_nonpadding
|
62 |
-
ret['mel_out'] = self.forward_decoder(decoder_inp, tgt_nonpadding, ret, infer=infer, **kwargs)
|
63 |
-
return ret
|
64 |
-
|
65 |
-
def forward_pitch(self, decoder_inp, f0, uv, mel2ph, ret, encoder_out=None):
|
66 |
-
if self.hparams['pitch_type'] == 'cwt':
|
67 |
-
decoder_inp = decoder_inp.detach() + self.hparams['predictor_grad'] * (decoder_inp - decoder_inp.detach())
|
68 |
-
pitch_padding = mel2ph == 0
|
69 |
-
ret['cwt'] = cwt_out = self.pitch_predictor(decoder_inp)
|
70 |
-
stats_out = self.cwt_stats_layers(decoder_inp.mean(1)) # [B, 2]
|
71 |
-
mean = ret['f0_mean'] = stats_out[:, 0]
|
72 |
-
std = ret['f0_std'] = stats_out[:, 1]
|
73 |
-
cwt_spec = cwt_out[:, :, :10]
|
74 |
-
if f0 is None:
|
75 |
-
std = std * self.hparams['cwt_std_scale']
|
76 |
-
f0 = self.cwt2f0_norm(cwt_spec, mean, std, mel2ph)
|
77 |
-
if self.hparams['use_uv']:
|
78 |
-
assert cwt_out.shape[-1] == 11
|
79 |
-
uv = cwt_out[:, :, -1] > 0
|
80 |
-
ret['f0_denorm'] = f0_denorm = denorm_f0(f0, uv if self.hparams['use_uv'] else None,
|
81 |
-
pitch_padding=pitch_padding)
|
82 |
-
pitch = f0_to_coarse(f0_denorm) # start from 0
|
83 |
-
pitch_embed = self.pitch_embed(pitch)
|
84 |
-
return pitch_embed
|
85 |
-
else:
|
86 |
-
return super(FastSpeech2Orig, self).forward_pitch(decoder_inp, f0, uv, mel2ph, ret, encoder_out)
|
87 |
-
|
88 |
-
def forward_energy(self, decoder_inp, energy, ret):
|
89 |
-
decoder_inp = decoder_inp.detach() + self.hparams['predictor_grad'] * (decoder_inp - decoder_inp.detach())
|
90 |
-
ret['energy_pred'] = energy_pred = self.energy_predictor(decoder_inp)[:, :, 0]
|
91 |
-
energy_embed_inp = energy_pred if energy is None else energy
|
92 |
-
energy_embed_inp = torch.clamp(energy_embed_inp * 256 // 4, min=0, max=255).long()
|
93 |
-
energy_embed = self.energy_embed(energy_embed_inp)
|
94 |
-
return energy_embed
|
95 |
-
|
96 |
-
def cwt2f0_norm(self, cwt_spec, mean, std, mel2ph):
|
97 |
-
_, cwt_scales = get_lf0_cwt(np.ones(10))
|
98 |
-
f0 = cwt2f0(cwt_spec, mean, std, cwt_scales)
|
99 |
-
f0 = torch.cat(
|
100 |
-
[f0] + [f0[:, -1:]] * (mel2ph.shape[1] - f0.shape[1]), 1)
|
101 |
-
f0_norm = norm_f0(f0, None)
|
102 |
-
return f0_norm
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb32-60e_deepfashion2_short_sleeved_shirt_256x192.py
DELETED
@@ -1,172 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../../../_base_/default_runtime.py',
|
3 |
-
'../../../_base_/datasets/deepfashion2.py'
|
4 |
-
]
|
5 |
-
|
6 |
-
default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater'))
|
7 |
-
|
8 |
-
resume = False # 断点恢复
|
9 |
-
load_from = None # 模型权重加载
|
10 |
-
train_cfg = dict(by_epoch=True, max_epochs=60, val_interval=10) # 训练轮数,测试间隔
|
11 |
-
param_scheduler = [
|
12 |
-
dict( # warmup策略
|
13 |
-
type='LinearLR',
|
14 |
-
begin=0,
|
15 |
-
end=500,
|
16 |
-
start_factor=0.001,
|
17 |
-
by_epoch=False),
|
18 |
-
dict( # scheduler
|
19 |
-
type='MultiStepLR',
|
20 |
-
begin=0,
|
21 |
-
end=60,
|
22 |
-
milestones=[20, 40],
|
23 |
-
gamma=0.1,
|
24 |
-
by_epoch=True)
|
25 |
-
]
|
26 |
-
optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005)) # 优化器和学习率
|
27 |
-
auto_scale_lr = dict(base_batch_size=512) # 根据batch_size自动缩放学习率
|
28 |
-
|
29 |
-
backend_args = dict(backend='local') # 数据加载后端设置,默认从本地硬盘加载
|
30 |
-
dataset_type = 'DeepFashion2Dataset' # 数据集类名 DeepFashionDataset
|
31 |
-
data_mode = 'topdown' # 算法结构类型,用于指定标注信息加载策略
|
32 |
-
data_root = 'data/deepfashion2/' # 数据存放路径
|
33 |
-
# 定义数据编解码器,用于生成target和对pred进行解码,同时包含了输入图片和输出heatmap尺寸等信息
|
34 |
-
codec = dict(
|
35 |
-
type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)
|
36 |
-
|
37 |
-
train_pipeline = [
|
38 |
-
dict(type='LoadImage'),
|
39 |
-
dict(type='GetBBoxCenterScale'),
|
40 |
-
dict(type='RandomFlip', direction='horizontal'),
|
41 |
-
dict(
|
42 |
-
type='RandomBBoxTransform',
|
43 |
-
shift_prob=0,
|
44 |
-
rotate_factor=60,
|
45 |
-
scale_factor=(0.75, 1.25)),
|
46 |
-
dict(type='TopdownAffine', input_size=codec['input_size']),
|
47 |
-
dict(type='GenerateTarget', encoder=codec),
|
48 |
-
dict(type='PackPoseInputs')
|
49 |
-
]
|
50 |
-
val_pipeline = [ # 测试时数据增强
|
51 |
-
dict(type='LoadImage', backend_args=backend_args), # 加载图片
|
52 |
-
dict(type='GetBBoxCenterScale'), # 根据bbox获取center和scale
|
53 |
-
dict(type='TopdownAffine', input_size=codec['input_size']), # 根据变换矩阵更新目标数据
|
54 |
-
dict(type='PackPoseInputs') # 对target进行打包用于训练
|
55 |
-
]
|
56 |
-
train_dataloader = dict( # 训练数据加载
|
57 |
-
batch_size=32, # 批次大小
|
58 |
-
num_workers=6, # 数据加载进程数
|
59 |
-
persistent_workers=True, # 在不活跃时维持进程不终止,避免反复启动进程的开销
|
60 |
-
sampler=dict(type='DefaultSampler', shuffle=True), # 采样策略,打乱数据
|
61 |
-
dataset=dict(
|
62 |
-
type=dataset_type, # 数据集类名
|
63 |
-
data_root=data_root, # 数据集路径
|
64 |
-
data_mode=data_mode, # 算法类型
|
65 |
-
ann_file='train/deepfashion2_short_sleeved_shirt.json', # 标注文件路径
|
66 |
-
data_prefix=dict(img='train/image/'), # 图像路径
|
67 |
-
pipeline=train_pipeline # 数据流水线
|
68 |
-
))
|
69 |
-
val_dataloader = dict(
|
70 |
-
batch_size=32,
|
71 |
-
num_workers=4,
|
72 |
-
persistent_workers=True, # 在不活跃时维持进程不终止,避免反复启动进程的开销
|
73 |
-
drop_last=False,
|
74 |
-
sampler=dict(type='DefaultSampler', shuffle=False), # 采样策略,不进行打乱
|
75 |
-
dataset=dict(
|
76 |
-
type=dataset_type, # 数据集类名
|
77 |
-
data_root=data_root, # 数据集路径
|
78 |
-
data_mode=data_mode, # 算法类型
|
79 |
-
ann_file='validation/deepfashion2_short_sleeved_shirt.json', # 标注文件路径
|
80 |
-
data_prefix=dict(img='validation/image/'), # 图像路径
|
81 |
-
test_mode=True, # 测试模式开关
|
82 |
-
pipeline=val_pipeline # 数据流水线
|
83 |
-
))
|
84 |
-
test_dataloader = val_dataloader # 默认情况下不区分验证集和测试集,用户根据需要来自行定义
|
85 |
-
|
86 |
-
channel_cfg = dict(
|
87 |
-
num_output_channels=294,
|
88 |
-
dataset_joints=294,
|
89 |
-
dataset_channel=[
|
90 |
-
[
|
91 |
-
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
|
92 |
-
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
|
93 |
-
36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
|
94 |
-
53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
|
95 |
-
70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
|
96 |
-
87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
|
97 |
-
103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
|
98 |
-
116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128,
|
99 |
-
129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
|
100 |
-
142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
|
101 |
-
155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
|
102 |
-
168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180,
|
103 |
-
181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193,
|
104 |
-
194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206,
|
105 |
-
207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
|
106 |
-
220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
|
107 |
-
233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245,
|
108 |
-
246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258,
|
109 |
-
259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
|
110 |
-
272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
|
111 |
-
285, 286, 287, 288, 289, 290, 291, 292, 293
|
112 |
-
],
|
113 |
-
],
|
114 |
-
inference_channel=[
|
115 |
-
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
116 |
-
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
|
117 |
-
38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
|
118 |
-
56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
|
119 |
-
74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
|
120 |
-
92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
|
121 |
-
108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
|
122 |
-
122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
|
123 |
-
136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
|
124 |
-
150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
|
125 |
-
164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
|
126 |
-
178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
|
127 |
-
192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
|
128 |
-
206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
|
129 |
-
220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
|
130 |
-
234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
|
131 |
-
248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
|
132 |
-
262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
|
133 |
-
276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
|
134 |
-
290, 291, 292, 293
|
135 |
-
])
|
136 |
-
|
137 |
-
model = dict(
|
138 |
-
type='TopdownPoseEstimator', # 模型结构决定了算法流程
|
139 |
-
data_preprocessor=dict( # 数据归一化和通道顺序调整,作为模型的一部分
|
140 |
-
type='PoseDataPreprocessor',
|
141 |
-
mean=[123.675, 116.28, 103.53],
|
142 |
-
std=[58.395, 57.12, 57.375],
|
143 |
-
bgr_to_rgb=True),
|
144 |
-
backbone=dict(
|
145 |
-
type='ResNet',
|
146 |
-
depth=50,
|
147 |
-
init_cfg=dict(
|
148 |
-
type='Pretrained', # 预训练参数,只加载backbone权重用于迁移学习
|
149 |
-
checkpoint='torchvision://resnet50')),
|
150 |
-
head=dict( # 模型头部
|
151 |
-
type='HeatmapHead',
|
152 |
-
in_channels=2048,
|
153 |
-
out_channels=channel_cfg['num_output_channels'],
|
154 |
-
# deconv_out_channels=None,
|
155 |
-
loss=dict(type='KeypointMSELoss', use_target_weight=True), # 损失函数
|
156 |
-
decoder=codec), # 解码器,将heatmap解码成坐标值
|
157 |
-
test_cfg=dict(
|
158 |
-
flip_test=True, # 开启测试时水平翻转集成
|
159 |
-
flip_mode='heatmap', # 对heatmap进行翻转
|
160 |
-
shift_heatmap=True, # 对翻转后的结果进行平移提高精度
|
161 |
-
))
|
162 |
-
|
163 |
-
val_evaluator = [
|
164 |
-
dict(type='PCKAccuracy', thr=0.2),
|
165 |
-
dict(type='AUC'),
|
166 |
-
dict(type='EPE'),
|
167 |
-
]
|
168 |
-
test_evaluator = val_evaluator # 默认情况下不区分验证集和测试集,用户根据需要来自行定义
|
169 |
-
|
170 |
-
visualizer = dict(
|
171 |
-
vis_backends=[dict(type='LocalVisBackend'),
|
172 |
-
dict(type='WandbVisBackend')])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abhilashvj/planogram-compliance/CONTRIBUTING.md
DELETED
@@ -1,94 +0,0 @@
|
|
1 |
-
## Contributing to YOLOv5 🚀
|
2 |
-
|
3 |
-
We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible, whether it's:
|
4 |
-
|
5 |
-
- Reporting a bug
|
6 |
-
- Discussing the current state of the code
|
7 |
-
- Submitting a fix
|
8 |
-
- Proposing a new feature
|
9 |
-
- Becoming a maintainer
|
10 |
-
|
11 |
-
YOLOv5 works so well due to our combined community effort, and for every small improvement you contribute you will be
|
12 |
-
helping push the frontiers of what's possible in AI 😃!
|
13 |
-
|
14 |
-
## Submitting a Pull Request (PR) 🛠️
|
15 |
-
|
16 |
-
Submitting a PR is easy! This example shows how to submit a PR for updating `requirements.txt` in 4 steps:
|
17 |
-
|
18 |
-
### 1. Select File to Update
|
19 |
-
|
20 |
-
Select `requirements.txt` to update by clicking on it in GitHub.
|
21 |
-
<p align="center"><img width="800" alt="PR_step1" src="https://user-images.githubusercontent.com/26833433/122260847-08be2600-ced4-11eb-828b-8287ace4136c.png"></p>
|
22 |
-
|
23 |
-
### 2. Click 'Edit this file'
|
24 |
-
|
25 |
-
Button is in top-right corner.
|
26 |
-
<p align="center"><img width="800" alt="PR_step2" src="https://user-images.githubusercontent.com/26833433/122260844-06f46280-ced4-11eb-9eec-b8a24be519ca.png"></p>
|
27 |
-
|
28 |
-
### 3. Make Changes
|
29 |
-
|
30 |
-
Change `matplotlib` version from `3.2.2` to `3.3`.
|
31 |
-
<p align="center"><img width="800" alt="PR_step3" src="https://user-images.githubusercontent.com/26833433/122260853-0a87e980-ced4-11eb-9fd2-3650fb6e0842.png"></p>
|
32 |
-
|
33 |
-
### 4. Preview Changes and Submit PR
|
34 |
-
|
35 |
-
Click on the **Preview changes** tab to verify your updates. At the bottom of the screen select 'Create a **new branch**
|
36 |
-
for this commit', assign your branch a descriptive name such as `fix/matplotlib_version` and click the green **Propose
|
37 |
-
changes** button. All done, your PR is now submitted to YOLOv5 for review and approval 😃!
|
38 |
-
<p align="center"><img width="800" alt="PR_step4" src="https://user-images.githubusercontent.com/26833433/122260856-0b208000-ced4-11eb-8e8e-77b6151cbcc3.png"></p>
|
39 |
-
|
40 |
-
### PR recommendations
|
41 |
-
|
42 |
-
To allow your work to be integrated as seamlessly as possible, we advise you to:
|
43 |
-
|
44 |
-
- ✅ Verify your PR is **up-to-date with origin/master.** If your PR is behind origin/master an
|
45 |
-
automatic [GitHub actions](https://github.com/ultralytics/yolov5/blob/master/.github/workflows/rebase.yml) rebase may
|
46 |
-
be attempted by including the /rebase command in a comment body, or by running the following code, replacing 'feature'
|
47 |
-
with the name of your local branch:
|
48 |
-
|
49 |
-
```bash
|
50 |
-
git remote add upstream https://github.com/ultralytics/yolov5.git
|
51 |
-
git fetch upstream
|
52 |
-
git checkout feature # <----- replace 'feature' with local branch name
|
53 |
-
git merge upstream/master
|
54 |
-
git push -u origin -f
|
55 |
-
```
|
56 |
-
|
57 |
-
- ✅ Verify all Continuous Integration (CI) **checks are passing**.
|
58 |
-
- ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase
|
59 |
-
but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ -Bruce Lee
|
60 |
-
|
61 |
-
## Submitting a Bug Report 🐛
|
62 |
-
|
63 |
-
If you spot a problem with YOLOv5 please submit a Bug Report!
|
64 |
-
|
65 |
-
For us to start investigating a possibel problem we need to be able to reproduce it ourselves first. We've created a few
|
66 |
-
short guidelines below to help users provide what we need in order to get started.
|
67 |
-
|
68 |
-
When asking a question, people will be better able to provide help if you provide **code** that they can easily
|
69 |
-
understand and use to **reproduce** the problem. This is referred to by community members as creating
|
70 |
-
a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces
|
71 |
-
the problem should be:
|
72 |
-
|
73 |
-
* ✅ **Minimal** – Use as little code as possible that still produces the same problem
|
74 |
-
* ✅ **Complete** – Provide **all** parts someone else needs to reproduce your problem in the question itself
|
75 |
-
* ✅ **Reproducible** – Test the code you're about to provide to make sure it reproduces the problem
|
76 |
-
|
77 |
-
In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code
|
78 |
-
should be:
|
79 |
-
|
80 |
-
* ✅ **Current** – Verify that your code is up-to-date with current
|
81 |
-
GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new
|
82 |
-
copy to ensure your problem has not already been resolved by previous commits.
|
83 |
-
* ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this
|
84 |
-
repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️.
|
85 |
-
|
86 |
-
If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛 **
|
87 |
-
Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and providing
|
88 |
-
a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better
|
89 |
-
understand and diagnose your problem.
|
90 |
-
|
91 |
-
## License
|
92 |
-
|
93 |
-
By contributing, you agree that your contributions will be licensed under
|
94 |
-
the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/agentverse/environments/tasksolving_env/rules/base.py
DELETED
@@ -1,181 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
from typing import TYPE_CHECKING, Any, Dict, List, Tuple, Union, Optional
|
3 |
-
|
4 |
-
from agentverse.agents.base import BaseAgent
|
5 |
-
from agentverse.utils import AGENT_TYPES
|
6 |
-
from agentverse.environments.tasksolving_env.rules.decision_maker import (
|
7 |
-
BaseDecisionMaker,
|
8 |
-
decision_maker_registry,
|
9 |
-
)
|
10 |
-
from agentverse.environments.tasksolving_env.rules.evaluator import (
|
11 |
-
BaseEvaluator,
|
12 |
-
evaluator_registry,
|
13 |
-
)
|
14 |
-
from agentverse.environments.tasksolving_env.rules.executor import (
|
15 |
-
BaseExecutor,
|
16 |
-
executor_registry,
|
17 |
-
)
|
18 |
-
from agentverse.environments.tasksolving_env.rules.role_assigner import (
|
19 |
-
BaseRoleAssigner,
|
20 |
-
role_assigner_registry,
|
21 |
-
)
|
22 |
-
from agentverse.environments import BaseRule
|
23 |
-
|
24 |
-
if TYPE_CHECKING:
|
25 |
-
from agentverse.message import SolverMessage, ExecutorMessage
|
26 |
-
|
27 |
-
|
28 |
-
class TasksolvingRule(BaseRule):
|
29 |
-
role_assigner: BaseRoleAssigner
|
30 |
-
decision_maker: BaseDecisionMaker
|
31 |
-
executor: BaseExecutor
|
32 |
-
evaluator: BaseEvaluator
|
33 |
-
|
34 |
-
role_assign_only_once: bool = False
|
35 |
-
add_execution_result_to_critic: bool = False
|
36 |
-
add_execution_result_to_solver: bool = False
|
37 |
-
|
38 |
-
def __init__(
|
39 |
-
self,
|
40 |
-
role_assigner_config,
|
41 |
-
decision_maker_config,
|
42 |
-
executor_config,
|
43 |
-
evaluator_config,
|
44 |
-
*args,
|
45 |
-
**kwargs,
|
46 |
-
):
|
47 |
-
def build_components(config: Dict, registry):
|
48 |
-
component_type = config.pop("type")
|
49 |
-
component = registry.build(component_type, **config)
|
50 |
-
return component
|
51 |
-
|
52 |
-
role_assigner = build_components(
|
53 |
-
role_assigner_config,
|
54 |
-
role_assigner_registry,
|
55 |
-
)
|
56 |
-
decision_maker = build_components(
|
57 |
-
decision_maker_config,
|
58 |
-
decision_maker_registry,
|
59 |
-
)
|
60 |
-
executor = build_components(executor_config, executor_registry)
|
61 |
-
evaluator = build_components(evaluator_config, evaluator_registry)
|
62 |
-
super().__init__(
|
63 |
-
role_assigner=role_assigner,
|
64 |
-
decision_maker=decision_maker,
|
65 |
-
executor=executor,
|
66 |
-
evaluator=evaluator,
|
67 |
-
*args,
|
68 |
-
**kwargs,
|
69 |
-
)
|
70 |
-
|
71 |
-
def role_assign(
|
72 |
-
self,
|
73 |
-
task_description: str,
|
74 |
-
agents: List[BaseAgent],
|
75 |
-
cnt_turn: int,
|
76 |
-
advice: str = "",
|
77 |
-
) -> List[BaseAgent]:
|
78 |
-
"""Assign roles to agents"""
|
79 |
-
if self.role_assign_only_once and cnt_turn > 0:
|
80 |
-
agents = [agents[AGENT_TYPES.SOLVER]] + agents[AGENT_TYPES.CRITIC]
|
81 |
-
else:
|
82 |
-
agents = self.role_assigner.step(
|
83 |
-
role_assigner=agents[AGENT_TYPES.ROLE_ASSIGNMENT],
|
84 |
-
group_members=[agents[AGENT_TYPES.SOLVER]] + agents[AGENT_TYPES.CRITIC],
|
85 |
-
advice=advice,
|
86 |
-
task_description=task_description,
|
87 |
-
)
|
88 |
-
if self.role_assign_only_once and cnt_turn == 0:
|
89 |
-
agents[AGENT_TYPES.SOLVER] = agents[0]
|
90 |
-
agents[AGENT_TYPES.CRITIC] = agents[1:]
|
91 |
-
return agents
|
92 |
-
|
93 |
-
async def decision_making(
|
94 |
-
self,
|
95 |
-
task_description: str,
|
96 |
-
agents: List[BaseAgent],
|
97 |
-
previous_plan: str,
|
98 |
-
advice: str = "No advice yet.",
|
99 |
-
) -> List[SolverMessage]:
|
100 |
-
# TODO: plan should be string or a special type of object?
|
101 |
-
|
102 |
-
# dynamic
|
103 |
-
if "dynamic" in self.decision_maker.name:
|
104 |
-
plan = await self.decision_maker.astep(
|
105 |
-
agents=[agents[AGENT_TYPES.SOLVER], *agents[AGENT_TYPES.CRITIC]],
|
106 |
-
manager=agents[AGENT_TYPES.MANAGER],
|
107 |
-
task_description=task_description,
|
108 |
-
previous_plan=previous_plan,
|
109 |
-
advice=advice,
|
110 |
-
)
|
111 |
-
else:
|
112 |
-
plan = await self.decision_maker.astep(
|
113 |
-
agents=[agents[AGENT_TYPES.SOLVER], *agents[AGENT_TYPES.CRITIC]],
|
114 |
-
task_description=task_description,
|
115 |
-
previous_plan=previous_plan,
|
116 |
-
advice=advice,
|
117 |
-
)
|
118 |
-
return plan
|
119 |
-
|
120 |
-
async def execute(
|
121 |
-
self,
|
122 |
-
task_description: str,
|
123 |
-
agents: List[BaseAgent],
|
124 |
-
final_solution: List[SolverMessage],
|
125 |
-
) -> Any:
|
126 |
-
"""execution stage.
|
127 |
-
Use the executor to finish the task.
|
128 |
-
"""
|
129 |
-
|
130 |
-
results = await self.executor.astep(
|
131 |
-
agents[AGENT_TYPES.EXECUTION], task_description, final_solution
|
132 |
-
)
|
133 |
-
if self.add_execution_result_to_critic:
|
134 |
-
for agent in agents[AGENT_TYPES.CRITIC]:
|
135 |
-
agent.add_message_to_memory(results)
|
136 |
-
if self.add_execution_result_to_solver:
|
137 |
-
agents[AGENT_TYPES.SOLVER].add_message_to_memory(results)
|
138 |
-
return results
|
139 |
-
|
140 |
-
def evaluate(
|
141 |
-
self,
|
142 |
-
task_description: str,
|
143 |
-
agents: List[BaseAgent],
|
144 |
-
solution: List[SolverMessage],
|
145 |
-
result: List[ExecutorMessage],
|
146 |
-
) -> Tuple[List[int], str]:
|
147 |
-
"""evaluation stage."""
|
148 |
-
# if self.human_eval:
|
149 |
-
# print("This round, LLM gave the following result:")
|
150 |
-
# print(result)
|
151 |
-
# comprehensiveness = input("Please evaluate the comprehensiveness>> ")
|
152 |
-
# detailedness = input("Please evaluate the detailedness>> ")
|
153 |
-
# feasibility = input("Please evaluate the feasibility>> ")
|
154 |
-
# novelty = input("Please evaluate the novelty>> ")
|
155 |
-
# advice = input("Please give some advice>>")
|
156 |
-
# try:
|
157 |
-
# comprehensiveness = int(comprehensiveness)
|
158 |
-
# detailedness = int(detailedness)
|
159 |
-
# feasibility = int(feasibility)
|
160 |
-
# novelty = int(novelty)
|
161 |
-
# except ValueError:
|
162 |
-
# logger.error("Bad response from human evaluator!")
|
163 |
-
# return ([comprehensiveness, detailedness, feasibility, novelty], advice)
|
164 |
-
# else:
|
165 |
-
evaluation = self.evaluator.step(
|
166 |
-
agent=agents[AGENT_TYPES.EVALUATION],
|
167 |
-
solution=solution,
|
168 |
-
result=result,
|
169 |
-
task_description=task_description,
|
170 |
-
all_role_description=[
|
171 |
-
agents[AGENT_TYPES.SOLVER].role_description,
|
172 |
-
*[agent.role_description for agent in agents[AGENT_TYPES.CRITIC]],
|
173 |
-
],
|
174 |
-
)
|
175 |
-
return evaluation.score, evaluation.advice
|
176 |
-
|
177 |
-
def reset(self) -> None:
|
178 |
-
self.role_assigner.reset()
|
179 |
-
self.decision_maker.reset()
|
180 |
-
self.executor.reset()
|
181 |
-
self.evaluator.reset()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/audio/Factory.d.ts
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
import Audio from './Audio';
|
2 |
-
import Base from '../base/Base';
|
3 |
-
|
4 |
-
export default function Factory(
|
5 |
-
config?: Base.IConfig
|
6 |
-
): Audio;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fixwidthbuttons/AddChildMethods.js
DELETED
@@ -1,46 +0,0 @@
|
|
1 |
-
import FixWidthSizer from '../fixwidthsizer/FixWidthSizer.js';
|
2 |
-
import IsArray from '../../../plugins/utils/object/IsArray.js';
|
3 |
-
|
4 |
-
const SizerAdd = FixWidthSizer.prototype.add;
|
5 |
-
|
6 |
-
var Add = function (gameObject) {
|
7 |
-
SizerAdd.call(this, gameObject);
|
8 |
-
this.buttonGroup.add(gameObject);
|
9 |
-
return this;
|
10 |
-
};
|
11 |
-
|
12 |
-
export default {
|
13 |
-
addButton(gameObject) {
|
14 |
-
if (IsArray(gameObject)) {
|
15 |
-
var gameObjects = gameObject;
|
16 |
-
for (var i = 0, cnt = gameObjects.length; i < cnt; i++) {
|
17 |
-
Add.call(this, gameObjects[i]);
|
18 |
-
}
|
19 |
-
} else {
|
20 |
-
Add.call(this, gameObject);
|
21 |
-
}
|
22 |
-
return this;
|
23 |
-
},
|
24 |
-
|
25 |
-
addButtons(gameObjects) {
|
26 |
-
if (IsArray(gameObjects[0])) {
|
27 |
-
// 2d array
|
28 |
-
var lines = gameObjects, line;
|
29 |
-
for (var lineIdx = 0, lastLineIdx = (lines.length - 1); lineIdx <= lastLineIdx; lineIdx++) {
|
30 |
-
line = lines[lineIdx];
|
31 |
-
for (var i = 0, cnt = line.length; i < cnt; i++) {
|
32 |
-
Add.call(this, line[i]);
|
33 |
-
}
|
34 |
-
if (lineIdx > lastLineIdx) {
|
35 |
-
SizerAdd.addNewLine(this);
|
36 |
-
}
|
37 |
-
}
|
38 |
-
} else {
|
39 |
-
// 1d array
|
40 |
-
for (var i = 0, cnt = gameObjects.length; i < cnt; i++) {
|
41 |
-
Add.call(this, gameObjects[i]);
|
42 |
-
}
|
43 |
-
}
|
44 |
-
return this;
|
45 |
-
}
|
46 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/dnnlib/tflib/ops/__init__.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
# Copyright (c) SenseTime Research. All rights reserved.
|
2 |
-
|
3 |
-
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
|
4 |
-
#
|
5 |
-
# This work is made available under the Nvidia Source Code License-NC.
|
6 |
-
# To view a copy of this license, visit
|
7 |
-
# https://nvlabs.github.io/stylegan2/license.html
|
8 |
-
|
9 |
-
# empty
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/style_module/style_module.py
DELETED
@@ -1,300 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
import random
|
5 |
-
|
6 |
-
|
7 |
-
def calc_mean_std(x, eps=1e-8):
|
8 |
-
"""
|
9 |
-
calculating channel-wise instance mean and standard variance
|
10 |
-
x: shape of (N,C,*)
|
11 |
-
"""
|
12 |
-
mean = torch.mean(x.flatten(2), dim=-1, keepdim=True) # size of (N, C, 1)
|
13 |
-
std = torch.std(x.flatten(2), dim=-1, keepdim=True) + eps # size of (N, C, 1)
|
14 |
-
|
15 |
-
return mean, std
|
16 |
-
|
17 |
-
|
18 |
-
def cal_adain_style_loss(x, y):
|
19 |
-
"""
|
20 |
-
style loss in one layer
|
21 |
-
|
22 |
-
Args:
|
23 |
-
x, y: feature maps of size [N, C, H, W]
|
24 |
-
"""
|
25 |
-
x_mean, x_std = calc_mean_std(x)
|
26 |
-
y_mean, y_std = calc_mean_std(y)
|
27 |
-
|
28 |
-
return nn.functional.mse_loss(x_mean, y_mean) \
|
29 |
-
+ nn.functional.mse_loss(x_std, y_std)
|
30 |
-
|
31 |
-
|
32 |
-
def cal_mse_content_loss(x, y):
|
33 |
-
return nn.functional.mse_loss(x, y)
|
34 |
-
|
35 |
-
|
36 |
-
class LearnableIN(nn.Module):
|
37 |
-
'''
|
38 |
-
Input: (N, C, L) or (C, L)
|
39 |
-
'''
|
40 |
-
|
41 |
-
def __init__(self, dim=256):
|
42 |
-
super().__init__()
|
43 |
-
self.IN = torch.nn.InstanceNorm1d(dim, momentum=1e-4, track_running_stats=True)
|
44 |
-
|
45 |
-
def forward(self, x):
|
46 |
-
if x.size()[-1] <= 1:
|
47 |
-
return x
|
48 |
-
return self.IN(x)
|
49 |
-
|
50 |
-
|
51 |
-
class SimpleLinearStylizer(nn.Module):
|
52 |
-
def __init__(self, input_dim=256, embed_dim=32, n_layers=3) -> None:
|
53 |
-
super().__init__()
|
54 |
-
self.input_dim = input_dim
|
55 |
-
self.embed_dim = embed_dim
|
56 |
-
|
57 |
-
self.IN = LearnableIN(input_dim)
|
58 |
-
|
59 |
-
self.q_embed = nn.Conv1d(input_dim, embed_dim, 1)
|
60 |
-
self.k_embed = nn.Conv1d(input_dim, embed_dim, 1)
|
61 |
-
self.v_embed = nn.Conv1d(input_dim, embed_dim, 1)
|
62 |
-
|
63 |
-
self.unzipper = nn.Conv1d(embed_dim, input_dim, 1, bias=0)
|
64 |
-
|
65 |
-
s_net = []
|
66 |
-
for i in range(n_layers - 1):
|
67 |
-
out_dim = max(embed_dim, input_dim // 2)
|
68 |
-
s_net.append(
|
69 |
-
nn.Sequential(
|
70 |
-
nn.Conv1d(input_dim, out_dim, 1),
|
71 |
-
nn.ReLU(inplace=True),
|
72 |
-
)
|
73 |
-
)
|
74 |
-
input_dim = out_dim
|
75 |
-
s_net.append(nn.Conv1d(input_dim, embed_dim, 1))
|
76 |
-
self.s_net = nn.Sequential(*s_net)
|
77 |
-
|
78 |
-
self.s_fc = nn.Linear(embed_dim ** 2, embed_dim ** 2)
|
79 |
-
|
80 |
-
def _vectorized_covariance(self, x):
|
81 |
-
cov = torch.bmm(x, x.transpose(2, 1)) / x.size(-1)
|
82 |
-
cov = cov.flatten(1)
|
83 |
-
return cov
|
84 |
-
|
85 |
-
def get_content_matrix(self, c):
|
86 |
-
'''
|
87 |
-
Args:
|
88 |
-
c: content feature [N,input_dim,S]
|
89 |
-
Return:
|
90 |
-
mat: [N,S,embed_dim,embed_dim]
|
91 |
-
'''
|
92 |
-
normalized_c = self.IN(c)
|
93 |
-
# normalized_c = torch.nn.functional.instance_norm(c)
|
94 |
-
q_embed = self.q_embed(normalized_c)
|
95 |
-
k_embed = self.k_embed(normalized_c)
|
96 |
-
|
97 |
-
c_cov = q_embed.transpose(1, 2).unsqueeze(3) * k_embed.transpose(1, 2).unsqueeze(2) # [N,S,embed_dim,embed_dim]
|
98 |
-
attn = torch.softmax(c_cov, -1) # [N,S,embed_dim,embed_dim]
|
99 |
-
|
100 |
-
return attn, normalized_c
|
101 |
-
|
102 |
-
def get_style_mean_std_matrix(self, s):
|
103 |
-
'''
|
104 |
-
Args:
|
105 |
-
s: style feature [N,input_dim,S]
|
106 |
-
|
107 |
-
Return:
|
108 |
-
mat: [N,embed_dim,embed_dim]
|
109 |
-
'''
|
110 |
-
s_mean = s.mean(-1, keepdim=True)
|
111 |
-
s_std = s.std(-1, keepdim=True)
|
112 |
-
s = s - s_mean
|
113 |
-
|
114 |
-
s_embed = self.s_net(s)
|
115 |
-
s_cov = self._vectorized_covariance(s_embed)
|
116 |
-
s_mat = self.s_fc(s_cov)
|
117 |
-
s_mat = s_mat.reshape(-1, self.embed_dim, self.embed_dim)
|
118 |
-
|
119 |
-
return s_mean, s_std, s_mat
|
120 |
-
|
121 |
-
def transform_content_3D(self, c):
|
122 |
-
'''
|
123 |
-
Args:
|
124 |
-
c: content feature [N,input_dim,S]
|
125 |
-
Return:
|
126 |
-
transformed_c: [N,embed_dim,S]
|
127 |
-
'''
|
128 |
-
attn, normalized_c = self.get_content_matrix(c) # [N,S,embed_dim,embed_dim]
|
129 |
-
c = self.v_embed(normalized_c) # [N,embed_dim,S]
|
130 |
-
c = c.transpose(1, 2).unsqueeze(3) # [N,S,embed_dim,1]
|
131 |
-
c = torch.matmul(attn, c).squeeze(3) # [N,S,embed_dim]
|
132 |
-
|
133 |
-
return c.transpose(1, 2)
|
134 |
-
|
135 |
-
def transfer_style_2D(self, s_mean_std_mat, c, acc_map):
|
136 |
-
'''
|
137 |
-
Agrs:
|
138 |
-
c: content feature map after volume rendering [N,embed_dim,S]
|
139 |
-
s_mat: style matrix [N,embed_dim,embed_dim]
|
140 |
-
acc_map: [S]
|
141 |
-
|
142 |
-
s_mean = [N,input_dim,1]
|
143 |
-
s_std = [N,input_dim,1]
|
144 |
-
'''
|
145 |
-
s_mean, s_std, s_mat = s_mean_std_mat
|
146 |
-
|
147 |
-
cs = torch.bmm(s_mat, c) # [N,embed_dim,S]
|
148 |
-
cs = self.unzipper(cs) # [N,input_dim,S]
|
149 |
-
|
150 |
-
cs = cs * s_std + s_mean * acc_map[None, None, ...]
|
151 |
-
|
152 |
-
return cs
|
153 |
-
|
154 |
-
|
155 |
-
class AdaAttN(nn.Module):
|
156 |
-
""" Attention-weighted AdaIN (Liu et al., ICCV 21) """
|
157 |
-
|
158 |
-
def __init__(self, qk_dim, v_dim):
|
159 |
-
"""
|
160 |
-
Args:
|
161 |
-
qk_dim (int): query and key size.
|
162 |
-
v_dim (int): value size.
|
163 |
-
"""
|
164 |
-
super(AdaAttN, self).__init__()
|
165 |
-
|
166 |
-
self.q_embed = nn.Conv1d(qk_dim, qk_dim, 1)
|
167 |
-
self.k_embed = nn.Conv1d(qk_dim, qk_dim, 1)
|
168 |
-
self.s_embed = nn.Conv1d(v_dim, v_dim, 1)
|
169 |
-
|
170 |
-
def forward(self, q, k):
|
171 |
-
"""
|
172 |
-
Args:
|
173 |
-
q (float tensor, (bs, qk, *)): query (content) features.
|
174 |
-
k (float tensor, (bs, qk, *)): key (style) features.
|
175 |
-
c (float tensor, (bs, v, *)): content value features.
|
176 |
-
s (float tensor, (bs, v, *)): style value features.
|
177 |
-
|
178 |
-
Returns:
|
179 |
-
cs (float tensor, (bs, v, *)): stylized content features.
|
180 |
-
"""
|
181 |
-
c, s = q, k
|
182 |
-
|
183 |
-
shape = c.shape
|
184 |
-
q, k = q.flatten(2), k.flatten(2)
|
185 |
-
c, s = c.flatten(2), s.flatten(2)
|
186 |
-
|
187 |
-
# QKV attention with projected content and style features
|
188 |
-
q = self.q_embed(F.instance_norm(q)).transpose(2, 1) # (bs, n, qk)
|
189 |
-
k = self.k_embed(F.instance_norm(k)) # (bs, qk, m)
|
190 |
-
s = self.s_embed(s).transpose(2, 1) # (bs, m, v)
|
191 |
-
attn = F.softmax(torch.bmm(q, k), -1) # (bs, n, m)
|
192 |
-
|
193 |
-
# attention-weighted channel-wise statistics
|
194 |
-
mean = torch.bmm(attn, s) # (bs, n, v)
|
195 |
-
var = F.relu(torch.bmm(attn, s ** 2) - mean ** 2) # (bs, n, v)
|
196 |
-
mean = mean.transpose(2, 1) # (bs, v, n)
|
197 |
-
std = torch.sqrt(var).transpose(2, 1) # (bs, v, n)
|
198 |
-
|
199 |
-
cs = F.instance_norm(c) * std + mean # (bs, v, n)
|
200 |
-
cs = cs.reshape(shape)
|
201 |
-
return cs
|
202 |
-
|
203 |
-
|
204 |
-
class AdaAttN_new_IN(nn.Module):
|
205 |
-
""" Attention-weighted AdaIN (Liu et al., ICCV 21) """
|
206 |
-
|
207 |
-
def __init__(self, qk_dim, v_dim):
|
208 |
-
"""
|
209 |
-
Args:
|
210 |
-
qk_dim (int): query and key size.
|
211 |
-
v_dim (int): value size.
|
212 |
-
"""
|
213 |
-
super(AdaAttN_new_IN, self).__init__()
|
214 |
-
|
215 |
-
self.q_embed = nn.Conv1d(qk_dim, qk_dim, 1)
|
216 |
-
self.k_embed = nn.Conv1d(qk_dim, qk_dim, 1)
|
217 |
-
self.s_embed = nn.Conv1d(v_dim, v_dim, 1)
|
218 |
-
self.IN = LearnableIN(qk_dim)
|
219 |
-
|
220 |
-
def forward(self, q, k):
|
221 |
-
"""
|
222 |
-
Args:
|
223 |
-
q (float tensor, (bs, qk, *)): query (content) features.
|
224 |
-
k (float tensor, (bs, qk, *)): key (style) features.
|
225 |
-
c (float tensor, (bs, v, *)): content value features.
|
226 |
-
s (float tensor, (bs, v, *)): style value features.
|
227 |
-
|
228 |
-
Returns:
|
229 |
-
cs (float tensor, (bs, v, *)): stylized content features.
|
230 |
-
"""
|
231 |
-
c, s = q, k
|
232 |
-
|
233 |
-
shape = c.shape
|
234 |
-
q, k = q.flatten(2), k.flatten(2)
|
235 |
-
c, s = c.flatten(2), s.flatten(2)
|
236 |
-
|
237 |
-
# QKV attention with projected content and style features
|
238 |
-
q = self.q_embed(self.IN(q)).transpose(2, 1) # (bs, n, qk)
|
239 |
-
k = self.k_embed(F.instance_norm(k)) # (bs, qk, m)
|
240 |
-
s = self.s_embed(s).transpose(2, 1) # (bs, m, v)
|
241 |
-
attn = F.softmax(torch.bmm(q, k), -1) # (bs, n, m)
|
242 |
-
|
243 |
-
# attention-weighted channel-wise statistics
|
244 |
-
mean = torch.bmm(attn, s) # (bs, n, v)
|
245 |
-
var = F.relu(torch.bmm(attn, s ** 2) - mean ** 2) # (bs, n, v)
|
246 |
-
mean = mean.transpose(2, 1) # (bs, v, n)
|
247 |
-
std = torch.sqrt(var).transpose(2, 1) # (bs, v, n)
|
248 |
-
|
249 |
-
cs = self.IN(c) * std + mean # (bs, v, n)
|
250 |
-
cs = cs.reshape(shape)
|
251 |
-
return cs
|
252 |
-
|
253 |
-
|
254 |
-
class AdaAttN_woin(nn.Module):
|
255 |
-
""" Attention-weighted AdaIN (Liu et al., ICCV 21) """
|
256 |
-
|
257 |
-
def __init__(self, qk_dim, v_dim):
|
258 |
-
"""
|
259 |
-
Args:
|
260 |
-
qk_dim (int): query and key size.
|
261 |
-
v_dim (int): value size.
|
262 |
-
"""
|
263 |
-
super().__init__()
|
264 |
-
|
265 |
-
self.q_embed = nn.Conv1d(qk_dim, qk_dim, 1)
|
266 |
-
self.k_embed = nn.Conv1d(qk_dim, qk_dim, 1)
|
267 |
-
self.s_embed = nn.Conv1d(v_dim, v_dim, 1)
|
268 |
-
|
269 |
-
def forward(self, q, k):
|
270 |
-
"""
|
271 |
-
Args:
|
272 |
-
q (float tensor, (bs, qk, *)): query (content) features.
|
273 |
-
k (float tensor, (bs, qk, *)): key (style) features.
|
274 |
-
c (float tensor, (bs, v, *)): content value features.
|
275 |
-
s (float tensor, (bs, v, *)): style value features.
|
276 |
-
|
277 |
-
Returns:
|
278 |
-
cs (float tensor, (bs, v, *)): stylized content features.
|
279 |
-
"""
|
280 |
-
c, s = q, k
|
281 |
-
|
282 |
-
shape = c.shape
|
283 |
-
q, k = q.flatten(2), k.flatten(2)
|
284 |
-
c, s = c.flatten(2), s.flatten(2)
|
285 |
-
|
286 |
-
# QKV attention with projected content and style features
|
287 |
-
q = self.q_embed(q).transpose(2, 1) # (bs, n, qk)
|
288 |
-
k = self.k_embed(k) # (bs, qk, m)
|
289 |
-
s = self.s_embed(s).transpose(2, 1) # (bs, m, v)
|
290 |
-
attn = F.softmax(torch.bmm(q, k), -1) # (bs, n, m)
|
291 |
-
|
292 |
-
# attention-weighted channel-wise statistics
|
293 |
-
mean = torch.bmm(attn, s) # (bs, n, v)
|
294 |
-
var = F.relu(torch.bmm(attn, s ** 2) - mean ** 2) # (bs, n, v)
|
295 |
-
mean = mean.transpose(2, 1) # (bs, v, n)
|
296 |
-
std = torch.sqrt(var).transpose(2, 1) # (bs, v, n)
|
297 |
-
|
298 |
-
cs = c * std + mean # (bs, v, n)
|
299 |
-
cs = cs.reshape(shape)
|
300 |
-
return cs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/models/asymmetricautoencoderkl.md
DELETED
@@ -1,55 +0,0 @@
|
|
1 |
-
# AsymmetricAutoencoderKL
|
2 |
-
|
3 |
-
Improved larger variational autoencoder (VAE) model with KL loss for inpainting task: [Designing a Better Asymmetric VQGAN for StableDiffusion](https://arxiv.org/abs/2306.04632) by Zixin Zhu, Xuelu Feng, Dongdong Chen, Jianmin Bao, Le Wang, Yinpeng Chen, Lu Yuan, Gang Hua.
|
4 |
-
|
5 |
-
The abstract from the paper is:
|
6 |
-
|
7 |
-
*StableDiffusion is a revolutionary text-to-image generator that is causing a stir in the world of image generation and editing. Unlike traditional methods that learn a diffusion model in pixel space, StableDiffusion learns a diffusion model in the latent space via a VQGAN, ensuring both efficiency and quality. It not only supports image generation tasks, but also enables image editing for real images, such as image inpainting and local editing. However, we have observed that the vanilla VQGAN used in StableDiffusion leads to significant information loss, causing distortion artifacts even in non-edited image regions. To this end, we propose a new asymmetric VQGAN with two simple designs. Firstly, in addition to the input from the encoder, the decoder contains a conditional branch that incorporates information from task-specific priors, such as the unmasked image region in inpainting. Secondly, the decoder is much heavier than the encoder, allowing for more detailed recovery while only slightly increasing the total inference cost. The training cost of our asymmetric VQGAN is cheap, and we only need to retrain a new asymmetric decoder while keeping the vanilla VQGAN encoder and StableDiffusion unchanged. Our asymmetric VQGAN can be widely used in StableDiffusion-based inpainting and local editing methods. Extensive experiments demonstrate that it can significantly improve the inpainting and editing performance, while maintaining the original text-to-image capability. The code is available at https://github.com/buxiangzhiren/Asymmetric_VQGAN*
|
8 |
-
|
9 |
-
Evaluation results can be found in section 4.1 of the original paper.
|
10 |
-
|
11 |
-
## Available checkpoints
|
12 |
-
|
13 |
-
* [https://huggingface.co/cross-attention/asymmetric-autoencoder-kl-x-1-5](https://huggingface.co/cross-attention/asymmetric-autoencoder-kl-x-1-5)
|
14 |
-
* [https://huggingface.co/cross-attention/asymmetric-autoencoder-kl-x-2](https://huggingface.co/cross-attention/asymmetric-autoencoder-kl-x-2)
|
15 |
-
|
16 |
-
## Example Usage
|
17 |
-
|
18 |
-
```python
|
19 |
-
from io import BytesIO
|
20 |
-
from PIL import Image
|
21 |
-
import requests
|
22 |
-
from diffusers import AsymmetricAutoencoderKL, StableDiffusionInpaintPipeline
|
23 |
-
|
24 |
-
|
25 |
-
def download_image(url: str) -> Image.Image:
|
26 |
-
response = requests.get(url)
|
27 |
-
return Image.open(BytesIO(response.content)).convert("RGB")
|
28 |
-
|
29 |
-
|
30 |
-
prompt = "a photo of a person"
|
31 |
-
img_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/repaint/celeba_hq_256.png"
|
32 |
-
mask_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/repaint/mask_256.png"
|
33 |
-
|
34 |
-
image = download_image(img_url).resize((256, 256))
|
35 |
-
mask_image = download_image(mask_url).resize((256, 256))
|
36 |
-
|
37 |
-
pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting")
|
38 |
-
pipe.vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5")
|
39 |
-
pipe.to("cuda")
|
40 |
-
|
41 |
-
image = pipe(prompt=prompt, image=image, mask_image=mask_image).images[0]
|
42 |
-
image.save("image.jpeg")
|
43 |
-
```
|
44 |
-
|
45 |
-
## AsymmetricAutoencoderKL
|
46 |
-
|
47 |
-
[[autodoc]] models.autoencoder_asym_kl.AsymmetricAutoencoderKL
|
48 |
-
|
49 |
-
## AutoencoderKLOutput
|
50 |
-
|
51 |
-
[[autodoc]] models.autoencoder_kl.AutoencoderKLOutput
|
52 |
-
|
53 |
-
## DecoderOutput
|
54 |
-
|
55 |
-
[[autodoc]] models.vae.DecoderOutput
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py
DELETED
@@ -1,319 +0,0 @@
|
|
1 |
-
# Copyright 2023 Open AI and The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
from dataclasses import dataclass
|
16 |
-
from typing import List, Optional, Union
|
17 |
-
|
18 |
-
import numpy as np
|
19 |
-
import PIL
|
20 |
-
import torch
|
21 |
-
from transformers import CLIPImageProcessor, CLIPVisionModel
|
22 |
-
|
23 |
-
from ...models import PriorTransformer
|
24 |
-
from ...schedulers import HeunDiscreteScheduler
|
25 |
-
from ...utils import (
|
26 |
-
BaseOutput,
|
27 |
-
logging,
|
28 |
-
randn_tensor,
|
29 |
-
replace_example_docstring,
|
30 |
-
)
|
31 |
-
from ..pipeline_utils import DiffusionPipeline
|
32 |
-
from .renderer import ShapERenderer
|
33 |
-
|
34 |
-
|
35 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
36 |
-
|
37 |
-
EXAMPLE_DOC_STRING = """
|
38 |
-
Examples:
|
39 |
-
```py
|
40 |
-
>>> from PIL import Image
|
41 |
-
>>> import torch
|
42 |
-
>>> from diffusers import DiffusionPipeline
|
43 |
-
>>> from diffusers.utils import export_to_gif, load_image
|
44 |
-
|
45 |
-
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
46 |
-
|
47 |
-
>>> repo = "openai/shap-e-img2img"
|
48 |
-
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
|
49 |
-
>>> pipe = pipe.to(device)
|
50 |
-
|
51 |
-
>>> guidance_scale = 3.0
|
52 |
-
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
|
53 |
-
>>> image = load_image(image_url).convert("RGB")
|
54 |
-
|
55 |
-
>>> images = pipe(
|
56 |
-
... image,
|
57 |
-
... guidance_scale=guidance_scale,
|
58 |
-
... num_inference_steps=64,
|
59 |
-
... frame_size=256,
|
60 |
-
... ).images
|
61 |
-
|
62 |
-
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
|
63 |
-
```
|
64 |
-
"""
|
65 |
-
|
66 |
-
|
67 |
-
@dataclass
|
68 |
-
class ShapEPipelineOutput(BaseOutput):
|
69 |
-
"""
|
70 |
-
Output class for [`ShapEPipeline`] and [`ShapEImg2ImgPipeline`].
|
71 |
-
|
72 |
-
Args:
|
73 |
-
images (`torch.FloatTensor`)
|
74 |
-
A list of images for 3D rendering.
|
75 |
-
"""
|
76 |
-
|
77 |
-
images: Union[PIL.Image.Image, np.ndarray]
|
78 |
-
|
79 |
-
|
80 |
-
class ShapEImg2ImgPipeline(DiffusionPipeline):
|
81 |
-
"""
|
82 |
-
Pipeline for generating latent representation of a 3D asset and rendering with NeRF method with Shap-E from an
|
83 |
-
image.
|
84 |
-
|
85 |
-
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
86 |
-
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
87 |
-
|
88 |
-
Args:
|
89 |
-
prior ([`PriorTransformer`]):
|
90 |
-
The canonincal unCLIP prior to approximate the image embedding from the text embedding.
|
91 |
-
image_encoder ([`CLIPVisionModel`]):
|
92 |
-
Frozen image-encoder.
|
93 |
-
image_processor (`CLIPImageProcessor`):
|
94 |
-
A [`~transformers.CLIPImageProcessor`] to process images.
|
95 |
-
scheduler ([`HeunDiscreteScheduler`]):
|
96 |
-
A scheduler to be used in combination with `prior` to generate image embedding.
|
97 |
-
shap_e_renderer ([`ShapERenderer`]):
|
98 |
-
Shap-E renderer projects the generated latents into parameters of a MLP that's used to create 3D objects
|
99 |
-
with the NeRF rendering method.
|
100 |
-
"""
|
101 |
-
|
102 |
-
def __init__(
|
103 |
-
self,
|
104 |
-
prior: PriorTransformer,
|
105 |
-
image_encoder: CLIPVisionModel,
|
106 |
-
image_processor: CLIPImageProcessor,
|
107 |
-
scheduler: HeunDiscreteScheduler,
|
108 |
-
shap_e_renderer: ShapERenderer,
|
109 |
-
):
|
110 |
-
super().__init__()
|
111 |
-
|
112 |
-
self.register_modules(
|
113 |
-
prior=prior,
|
114 |
-
image_encoder=image_encoder,
|
115 |
-
image_processor=image_processor,
|
116 |
-
scheduler=scheduler,
|
117 |
-
shap_e_renderer=shap_e_renderer,
|
118 |
-
)
|
119 |
-
|
120 |
-
# Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents
|
121 |
-
def prepare_latents(self, shape, dtype, device, generator, latents, scheduler):
|
122 |
-
if latents is None:
|
123 |
-
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
124 |
-
else:
|
125 |
-
if latents.shape != shape:
|
126 |
-
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
|
127 |
-
latents = latents.to(device)
|
128 |
-
|
129 |
-
latents = latents * scheduler.init_noise_sigma
|
130 |
-
return latents
|
131 |
-
|
132 |
-
def _encode_image(
|
133 |
-
self,
|
134 |
-
image,
|
135 |
-
device,
|
136 |
-
num_images_per_prompt,
|
137 |
-
do_classifier_free_guidance,
|
138 |
-
):
|
139 |
-
if isinstance(image, List) and isinstance(image[0], torch.Tensor):
|
140 |
-
image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0)
|
141 |
-
|
142 |
-
if not isinstance(image, torch.Tensor):
|
143 |
-
image = self.image_processor(image, return_tensors="pt").pixel_values[0].unsqueeze(0)
|
144 |
-
|
145 |
-
image = image.to(dtype=self.image_encoder.dtype, device=device)
|
146 |
-
|
147 |
-
image_embeds = self.image_encoder(image)["last_hidden_state"]
|
148 |
-
image_embeds = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
|
149 |
-
|
150 |
-
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
|
151 |
-
|
152 |
-
if do_classifier_free_guidance:
|
153 |
-
negative_image_embeds = torch.zeros_like(image_embeds)
|
154 |
-
|
155 |
-
# For classifier free guidance, we need to do two forward passes.
|
156 |
-
# Here we concatenate the unconditional and text embeddings into a single batch
|
157 |
-
# to avoid doing two forward passes
|
158 |
-
image_embeds = torch.cat([negative_image_embeds, image_embeds])
|
159 |
-
|
160 |
-
return image_embeds
|
161 |
-
|
162 |
-
@torch.no_grad()
|
163 |
-
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
164 |
-
def __call__(
|
165 |
-
self,
|
166 |
-
image: Union[PIL.Image.Image, List[PIL.Image.Image]],
|
167 |
-
num_images_per_prompt: int = 1,
|
168 |
-
num_inference_steps: int = 25,
|
169 |
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
170 |
-
latents: Optional[torch.FloatTensor] = None,
|
171 |
-
guidance_scale: float = 4.0,
|
172 |
-
frame_size: int = 64,
|
173 |
-
output_type: Optional[str] = "pil", # pil, np, latent, mesh
|
174 |
-
return_dict: bool = True,
|
175 |
-
):
|
176 |
-
"""
|
177 |
-
The call function to the pipeline for generation.
|
178 |
-
|
179 |
-
Args:
|
180 |
-
image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
|
181 |
-
`Image` or tensor representing an image batch to be used as the starting point. Can also accept image
|
182 |
-
latents as `image`, if passing latents directly, it will not be encoded again.
|
183 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
184 |
-
The number of images to generate per prompt.
|
185 |
-
num_inference_steps (`int`, *optional*, defaults to 100):
|
186 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
187 |
-
expense of slower inference.
|
188 |
-
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
189 |
-
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
190 |
-
generation deterministic.
|
191 |
-
latents (`torch.FloatTensor`, *optional*):
|
192 |
-
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
|
193 |
-
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
194 |
-
tensor is generated by sampling using the supplied random `generator`.
|
195 |
-
guidance_scale (`float`, *optional*, defaults to 4.0):
|
196 |
-
A higher guidance scale value encourages the model to generate images closely linked to the text
|
197 |
-
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
198 |
-
frame_size (`int`, *optional*, default to 64):
|
199 |
-
The width and height of each image frame of the generated 3D output.
|
200 |
-
output_type (`str`, *optional*, defaults to `"pt"`):
|
201 |
-
(`np.array`),`"latent"` (`torch.Tensor`), mesh ([`MeshDecoderOutput`]).
|
202 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
203 |
-
Whether or not to return a [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] instead of a plain
|
204 |
-
tuple.
|
205 |
-
|
206 |
-
Examples:
|
207 |
-
|
208 |
-
Returns:
|
209 |
-
[`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] or `tuple`:
|
210 |
-
If `return_dict` is `True`, [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] is returned,
|
211 |
-
otherwise a `tuple` is returned where the first element is a list with the generated images.
|
212 |
-
"""
|
213 |
-
|
214 |
-
if isinstance(image, PIL.Image.Image):
|
215 |
-
batch_size = 1
|
216 |
-
elif isinstance(image, torch.Tensor):
|
217 |
-
batch_size = image.shape[0]
|
218 |
-
elif isinstance(image, list) and isinstance(image[0], (torch.Tensor, PIL.Image.Image)):
|
219 |
-
batch_size = len(image)
|
220 |
-
else:
|
221 |
-
raise ValueError(
|
222 |
-
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(image)}"
|
223 |
-
)
|
224 |
-
|
225 |
-
device = self._execution_device
|
226 |
-
|
227 |
-
batch_size = batch_size * num_images_per_prompt
|
228 |
-
|
229 |
-
do_classifier_free_guidance = guidance_scale > 1.0
|
230 |
-
image_embeds = self._encode_image(image, device, num_images_per_prompt, do_classifier_free_guidance)
|
231 |
-
|
232 |
-
# prior
|
233 |
-
|
234 |
-
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
235 |
-
timesteps = self.scheduler.timesteps
|
236 |
-
|
237 |
-
num_embeddings = self.prior.config.num_embeddings
|
238 |
-
embedding_dim = self.prior.config.embedding_dim
|
239 |
-
|
240 |
-
latents = self.prepare_latents(
|
241 |
-
(batch_size, num_embeddings * embedding_dim),
|
242 |
-
image_embeds.dtype,
|
243 |
-
device,
|
244 |
-
generator,
|
245 |
-
latents,
|
246 |
-
self.scheduler,
|
247 |
-
)
|
248 |
-
|
249 |
-
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
|
250 |
-
latents = latents.reshape(latents.shape[0], num_embeddings, embedding_dim)
|
251 |
-
|
252 |
-
for i, t in enumerate(self.progress_bar(timesteps)):
|
253 |
-
# expand the latents if we are doing classifier free guidance
|
254 |
-
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
255 |
-
scaled_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
256 |
-
|
257 |
-
noise_pred = self.prior(
|
258 |
-
scaled_model_input,
|
259 |
-
timestep=t,
|
260 |
-
proj_embedding=image_embeds,
|
261 |
-
).predicted_image_embedding
|
262 |
-
|
263 |
-
# remove the variance
|
264 |
-
noise_pred, _ = noise_pred.split(
|
265 |
-
scaled_model_input.shape[2], dim=2
|
266 |
-
) # batch_size, num_embeddings, embedding_dim
|
267 |
-
|
268 |
-
if do_classifier_free_guidance:
|
269 |
-
noise_pred_uncond, noise_pred = noise_pred.chunk(2)
|
270 |
-
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
|
271 |
-
|
272 |
-
latents = self.scheduler.step(
|
273 |
-
noise_pred,
|
274 |
-
timestep=t,
|
275 |
-
sample=latents,
|
276 |
-
).prev_sample
|
277 |
-
|
278 |
-
if output_type not in ["np", "pil", "latent", "mesh"]:
|
279 |
-
raise ValueError(
|
280 |
-
f"Only the output types `pil`, `np`, `latent` and `mesh` are supported not output_type={output_type}"
|
281 |
-
)
|
282 |
-
|
283 |
-
if output_type == "latent":
|
284 |
-
return ShapEPipelineOutput(images=latents)
|
285 |
-
|
286 |
-
images = []
|
287 |
-
if output_type == "mesh":
|
288 |
-
for i, latent in enumerate(latents):
|
289 |
-
mesh = self.shap_e_renderer.decode_to_mesh(
|
290 |
-
latent[None, :],
|
291 |
-
device,
|
292 |
-
)
|
293 |
-
images.append(mesh)
|
294 |
-
|
295 |
-
else:
|
296 |
-
# np, pil
|
297 |
-
for i, latent in enumerate(latents):
|
298 |
-
image = self.shap_e_renderer.decode_to_image(
|
299 |
-
latent[None, :],
|
300 |
-
device,
|
301 |
-
size=frame_size,
|
302 |
-
)
|
303 |
-
images.append(image)
|
304 |
-
|
305 |
-
images = torch.stack(images)
|
306 |
-
|
307 |
-
images = images.cpu().numpy()
|
308 |
-
|
309 |
-
if output_type == "pil":
|
310 |
-
images = [self.numpy_to_pil(image) for image in images]
|
311 |
-
|
312 |
-
# Offload last model to CPU
|
313 |
-
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
314 |
-
self.final_offload_hook.offload()
|
315 |
-
|
316 |
-
if not return_dict:
|
317 |
-
return (images,)
|
318 |
-
|
319 |
-
return ShapEPipelineOutput(images=images)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/cascade_mask_rcnn_r50_fpn.py',
|
3 |
-
'../_base_/datasets/coco_instance.py',
|
4 |
-
'../_base_/schedules/schedule_20e.py', '../_base_/default_runtime.py'
|
5 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
_base_ = './mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py'
|
2 |
-
|
3 |
-
# learning policy
|
4 |
-
lr_config = dict(step=[28, 34])
|
5 |
-
runner = dict(type='EpochBasedRunner', max_epochs=36)
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/base_roi_head.py
DELETED
@@ -1,103 +0,0 @@
|
|
1 |
-
from abc import ABCMeta, abstractmethod
|
2 |
-
|
3 |
-
import torch.nn as nn
|
4 |
-
|
5 |
-
from ..builder import build_shared_head
|
6 |
-
|
7 |
-
|
8 |
-
class BaseRoIHead(nn.Module, metaclass=ABCMeta):
|
9 |
-
"""Base class for RoIHeads."""
|
10 |
-
|
11 |
-
def __init__(self,
|
12 |
-
bbox_roi_extractor=None,
|
13 |
-
bbox_head=None,
|
14 |
-
mask_roi_extractor=None,
|
15 |
-
mask_head=None,
|
16 |
-
shared_head=None,
|
17 |
-
train_cfg=None,
|
18 |
-
test_cfg=None):
|
19 |
-
super(BaseRoIHead, self).__init__()
|
20 |
-
self.train_cfg = train_cfg
|
21 |
-
self.test_cfg = test_cfg
|
22 |
-
if shared_head is not None:
|
23 |
-
self.shared_head = build_shared_head(shared_head)
|
24 |
-
|
25 |
-
if bbox_head is not None:
|
26 |
-
self.init_bbox_head(bbox_roi_extractor, bbox_head)
|
27 |
-
|
28 |
-
if mask_head is not None:
|
29 |
-
self.init_mask_head(mask_roi_extractor, mask_head)
|
30 |
-
|
31 |
-
self.init_assigner_sampler()
|
32 |
-
|
33 |
-
@property
|
34 |
-
def with_bbox(self):
|
35 |
-
"""bool: whether the RoI head contains a `bbox_head`"""
|
36 |
-
return hasattr(self, 'bbox_head') and self.bbox_head is not None
|
37 |
-
|
38 |
-
@property
|
39 |
-
def with_mask(self):
|
40 |
-
"""bool: whether the RoI head contains a `mask_head`"""
|
41 |
-
return hasattr(self, 'mask_head') and self.mask_head is not None
|
42 |
-
|
43 |
-
@property
|
44 |
-
def with_shared_head(self):
|
45 |
-
"""bool: whether the RoI head contains a `shared_head`"""
|
46 |
-
return hasattr(self, 'shared_head') and self.shared_head is not None
|
47 |
-
|
48 |
-
@abstractmethod
|
49 |
-
def init_weights(self, pretrained):
|
50 |
-
"""Initialize the weights in head.
|
51 |
-
|
52 |
-
Args:
|
53 |
-
pretrained (str, optional): Path to pre-trained weights.
|
54 |
-
Defaults to None.
|
55 |
-
"""
|
56 |
-
pass
|
57 |
-
|
58 |
-
@abstractmethod
|
59 |
-
def init_bbox_head(self):
|
60 |
-
"""Initialize ``bbox_head``"""
|
61 |
-
pass
|
62 |
-
|
63 |
-
@abstractmethod
|
64 |
-
def init_mask_head(self):
|
65 |
-
"""Initialize ``mask_head``"""
|
66 |
-
pass
|
67 |
-
|
68 |
-
@abstractmethod
|
69 |
-
def init_assigner_sampler(self):
|
70 |
-
"""Initialize assigner and sampler."""
|
71 |
-
pass
|
72 |
-
|
73 |
-
@abstractmethod
|
74 |
-
def forward_train(self,
|
75 |
-
x,
|
76 |
-
img_meta,
|
77 |
-
proposal_list,
|
78 |
-
gt_bboxes,
|
79 |
-
gt_labels,
|
80 |
-
gt_bboxes_ignore=None,
|
81 |
-
gt_masks=None,
|
82 |
-
**kwargs):
|
83 |
-
"""Forward function during training."""
|
84 |
-
|
85 |
-
async def async_simple_test(self, x, img_meta, **kwargs):
|
86 |
-
"""Asynchronized test function."""
|
87 |
-
raise NotImplementedError
|
88 |
-
|
89 |
-
def simple_test(self,
|
90 |
-
x,
|
91 |
-
proposal_list,
|
92 |
-
img_meta,
|
93 |
-
proposals=None,
|
94 |
-
rescale=False,
|
95 |
-
**kwargs):
|
96 |
-
"""Test without augmentation."""
|
97 |
-
|
98 |
-
def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs):
|
99 |
-
"""Test with augmentations.
|
100 |
-
|
101 |
-
If rescale is False, then returned bboxes and masks will fit the scale
|
102 |
-
of imgs[0].
|
103 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r50-d8_769x769_80k_cityscapes.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/encnet_r50-d8.py',
|
3 |
-
'../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py',
|
4 |
-
'../_base_/schedules/schedule_80k.py'
|
5 |
-
]
|
6 |
-
model = dict(
|
7 |
-
decode_head=dict(align_corners=True),
|
8 |
-
auxiliary_head=dict(align_corners=True),
|
9 |
-
test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/mobilenet_v2/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes.py
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
_base_ = '../deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='mmcls://mobilenet_v2',
|
4 |
-
backbone=dict(
|
5 |
-
_delete_=True,
|
6 |
-
type='MobileNetV2',
|
7 |
-
widen_factor=1.,
|
8 |
-
strides=(1, 2, 2, 1, 1, 1, 1),
|
9 |
-
dilations=(1, 1, 1, 2, 2, 4, 4),
|
10 |
-
out_indices=(1, 2, 4, 6)),
|
11 |
-
decode_head=dict(in_channels=320, c1_in_channels=24),
|
12 |
-
auxiliary_head=dict(in_channels=96))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/prompts.py
DELETED
@@ -1,51 +0,0 @@
|
|
1 |
-
from pathlib import Path
|
2 |
-
|
3 |
-
import yaml
|
4 |
-
|
5 |
-
from modules import utils
|
6 |
-
from modules.text_generation import get_encoded_length
|
7 |
-
|
8 |
-
|
9 |
-
def load_prompt(fname):
|
10 |
-
if fname in ['None', '']:
|
11 |
-
return ''
|
12 |
-
else:
|
13 |
-
file_path = Path(f'prompts/{fname}.txt')
|
14 |
-
if not file_path.exists():
|
15 |
-
return ''
|
16 |
-
|
17 |
-
with open(file_path, 'r', encoding='utf-8') as f:
|
18 |
-
text = f.read()
|
19 |
-
if text[-1] == '\n':
|
20 |
-
text = text[:-1]
|
21 |
-
|
22 |
-
return text
|
23 |
-
|
24 |
-
|
25 |
-
def load_instruction_prompt_simple(fname):
|
26 |
-
file_path = Path(f'instruction-templates/{fname}.yaml')
|
27 |
-
if not file_path.exists():
|
28 |
-
return ''
|
29 |
-
|
30 |
-
with open(file_path, 'r', encoding='utf-8') as f:
|
31 |
-
data = yaml.safe_load(f)
|
32 |
-
output = ''
|
33 |
-
if 'context' in data:
|
34 |
-
output += data['context']
|
35 |
-
|
36 |
-
replacements = {
|
37 |
-
'<|user|>': data['user'],
|
38 |
-
'<|bot|>': data['bot'],
|
39 |
-
'<|user-message|>': 'Input',
|
40 |
-
}
|
41 |
-
|
42 |
-
output += utils.replace_all(data['turn_template'].split('<|bot-message|>')[0], replacements)
|
43 |
-
return output.rstrip(' ')
|
44 |
-
|
45 |
-
|
46 |
-
def count_tokens(text):
|
47 |
-
try:
|
48 |
-
tokens = get_encoded_length(text)
|
49 |
-
return str(tokens)
|
50 |
-
except:
|
51 |
-
return '0'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/corner_pool.py
DELETED
@@ -1,161 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
import torch
|
3 |
-
from torch import nn
|
4 |
-
from torch.autograd import Function
|
5 |
-
|
6 |
-
from ..utils import ext_loader
|
7 |
-
|
8 |
-
ext_module = ext_loader.load_ext('_ext', [
|
9 |
-
'top_pool_forward', 'top_pool_backward', 'bottom_pool_forward',
|
10 |
-
'bottom_pool_backward', 'left_pool_forward', 'left_pool_backward',
|
11 |
-
'right_pool_forward', 'right_pool_backward'
|
12 |
-
])
|
13 |
-
|
14 |
-
_mode_dict = {'top': 0, 'bottom': 1, 'left': 2, 'right': 3}
|
15 |
-
|
16 |
-
|
17 |
-
class TopPoolFunction(Function):
|
18 |
-
|
19 |
-
@staticmethod
|
20 |
-
def symbolic(g, input):
|
21 |
-
output = g.op(
|
22 |
-
'mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['top']))
|
23 |
-
return output
|
24 |
-
|
25 |
-
@staticmethod
|
26 |
-
def forward(ctx, input):
|
27 |
-
output = ext_module.top_pool_forward(input)
|
28 |
-
ctx.save_for_backward(input)
|
29 |
-
return output
|
30 |
-
|
31 |
-
@staticmethod
|
32 |
-
def backward(ctx, grad_output):
|
33 |
-
input, = ctx.saved_tensors
|
34 |
-
output = ext_module.top_pool_backward(input, grad_output)
|
35 |
-
return output
|
36 |
-
|
37 |
-
|
38 |
-
class BottomPoolFunction(Function):
|
39 |
-
|
40 |
-
@staticmethod
|
41 |
-
def symbolic(g, input):
|
42 |
-
output = g.op(
|
43 |
-
'mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['bottom']))
|
44 |
-
return output
|
45 |
-
|
46 |
-
@staticmethod
|
47 |
-
def forward(ctx, input):
|
48 |
-
output = ext_module.bottom_pool_forward(input)
|
49 |
-
ctx.save_for_backward(input)
|
50 |
-
return output
|
51 |
-
|
52 |
-
@staticmethod
|
53 |
-
def backward(ctx, grad_output):
|
54 |
-
input, = ctx.saved_tensors
|
55 |
-
output = ext_module.bottom_pool_backward(input, grad_output)
|
56 |
-
return output
|
57 |
-
|
58 |
-
|
59 |
-
class LeftPoolFunction(Function):
|
60 |
-
|
61 |
-
@staticmethod
|
62 |
-
def symbolic(g, input):
|
63 |
-
output = g.op(
|
64 |
-
'mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['left']))
|
65 |
-
return output
|
66 |
-
|
67 |
-
@staticmethod
|
68 |
-
def forward(ctx, input):
|
69 |
-
output = ext_module.left_pool_forward(input)
|
70 |
-
ctx.save_for_backward(input)
|
71 |
-
return output
|
72 |
-
|
73 |
-
@staticmethod
|
74 |
-
def backward(ctx, grad_output):
|
75 |
-
input, = ctx.saved_tensors
|
76 |
-
output = ext_module.left_pool_backward(input, grad_output)
|
77 |
-
return output
|
78 |
-
|
79 |
-
|
80 |
-
class RightPoolFunction(Function):
|
81 |
-
|
82 |
-
@staticmethod
|
83 |
-
def symbolic(g, input):
|
84 |
-
output = g.op(
|
85 |
-
'mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['right']))
|
86 |
-
return output
|
87 |
-
|
88 |
-
@staticmethod
|
89 |
-
def forward(ctx, input):
|
90 |
-
output = ext_module.right_pool_forward(input)
|
91 |
-
ctx.save_for_backward(input)
|
92 |
-
return output
|
93 |
-
|
94 |
-
@staticmethod
|
95 |
-
def backward(ctx, grad_output):
|
96 |
-
input, = ctx.saved_tensors
|
97 |
-
output = ext_module.right_pool_backward(input, grad_output)
|
98 |
-
return output
|
99 |
-
|
100 |
-
|
101 |
-
class CornerPool(nn.Module):
|
102 |
-
"""Corner Pooling.
|
103 |
-
|
104 |
-
Corner Pooling is a new type of pooling layer that helps a
|
105 |
-
convolutional network better localize corners of bounding boxes.
|
106 |
-
|
107 |
-
Please refer to https://arxiv.org/abs/1808.01244 for more details.
|
108 |
-
Code is modified from https://github.com/princeton-vl/CornerNet-Lite.
|
109 |
-
|
110 |
-
Args:
|
111 |
-
mode(str): Pooling orientation for the pooling layer
|
112 |
-
|
113 |
-
- 'bottom': Bottom Pooling
|
114 |
-
- 'left': Left Pooling
|
115 |
-
- 'right': Right Pooling
|
116 |
-
- 'top': Top Pooling
|
117 |
-
|
118 |
-
Returns:
|
119 |
-
Feature map after pooling.
|
120 |
-
"""
|
121 |
-
|
122 |
-
pool_functions = {
|
123 |
-
'bottom': BottomPoolFunction,
|
124 |
-
'left': LeftPoolFunction,
|
125 |
-
'right': RightPoolFunction,
|
126 |
-
'top': TopPoolFunction,
|
127 |
-
}
|
128 |
-
|
129 |
-
cummax_dim_flip = {
|
130 |
-
'bottom': (2, False),
|
131 |
-
'left': (3, True),
|
132 |
-
'right': (3, False),
|
133 |
-
'top': (2, True),
|
134 |
-
}
|
135 |
-
|
136 |
-
def __init__(self, mode):
|
137 |
-
super(CornerPool, self).__init__()
|
138 |
-
assert mode in self.pool_functions
|
139 |
-
self.mode = mode
|
140 |
-
self.corner_pool = self.pool_functions[mode]
|
141 |
-
|
142 |
-
def forward(self, x):
|
143 |
-
if torch.__version__ != 'parrots' and torch.__version__ >= '1.5.0':
|
144 |
-
if torch.onnx.is_in_onnx_export():
|
145 |
-
assert torch.__version__ >= '1.7.0', \
|
146 |
-
'When `cummax` serves as an intermediate component whose '\
|
147 |
-
'outputs is used as inputs for another modules, it\'s '\
|
148 |
-
'expected that pytorch version must be >= 1.7.0, '\
|
149 |
-
'otherwise Error appears like: `RuntimeError: tuple '\
|
150 |
-
'appears in op that does not forward tuples, unsupported '\
|
151 |
-
'kind: prim::PythonOp`.'
|
152 |
-
|
153 |
-
dim, flip = self.cummax_dim_flip[self.mode]
|
154 |
-
if flip:
|
155 |
-
x = x.flip(dim)
|
156 |
-
pool_tensor, _ = torch.cummax(x, dim=dim)
|
157 |
-
if flip:
|
158 |
-
pool_tensor = pool_tensor.flip(dim)
|
159 |
-
return pool_tensor
|
160 |
-
else:
|
161 |
-
return self.corner_pool.apply(x)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AppleQAQ/anime-remove-background/app.py
DELETED
@@ -1,52 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import huggingface_hub
|
3 |
-
import onnxruntime as rt
|
4 |
-
import numpy as np
|
5 |
-
import cv2
|
6 |
-
|
7 |
-
|
8 |
-
def get_mask(img, s=1024):
|
9 |
-
img = (img / 255).astype(np.float32)
|
10 |
-
h, w = h0, w0 = img.shape[:-1]
|
11 |
-
h, w = (s, int(s * w / h)) if h > w else (int(s * h / w), s)
|
12 |
-
ph, pw = s - h, s - w
|
13 |
-
img_input = np.zeros([s, s, 3], dtype=np.float32)
|
14 |
-
img_input[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] = cv2.resize(img, (w, h))
|
15 |
-
img_input = np.transpose(img_input, (2, 0, 1))
|
16 |
-
img_input = img_input[np.newaxis, :]
|
17 |
-
mask = rmbg_model.run(None, {'img': img_input})[0][0]
|
18 |
-
mask = np.transpose(mask, (1, 2, 0))
|
19 |
-
mask = mask[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w]
|
20 |
-
mask = cv2.resize(mask, (w0, h0))[:, :, np.newaxis]
|
21 |
-
return mask
|
22 |
-
|
23 |
-
|
24 |
-
def rmbg_fn(img):
|
25 |
-
mask = get_mask(img)
|
26 |
-
img = (mask * img + 255 * (1 - mask)).astype(np.uint8)
|
27 |
-
mask = (mask * 255).astype(np.uint8)
|
28 |
-
img = np.concatenate([img, mask], axis=2, dtype=np.uint8)
|
29 |
-
mask = mask.repeat(3, axis=2)
|
30 |
-
return mask, img
|
31 |
-
|
32 |
-
|
33 |
-
if __name__ == "__main__":
|
34 |
-
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
|
35 |
-
model_path = huggingface_hub.hf_hub_download("skytnt/anime-seg", "isnetis.onnx")
|
36 |
-
rmbg_model = rt.InferenceSession(model_path, providers=providers)
|
37 |
-
app = gr.Blocks()
|
38 |
-
with app:
|
39 |
-
gr.Markdown("# Anime Remove Background\n\n"
|
40 |
-
"\n\n"
|
41 |
-
"demo for [https://github.com/SkyTNT/anime-segmentation/](https://github.com/SkyTNT/anime-segmentation/)")
|
42 |
-
with gr.Row():
|
43 |
-
with gr.Column():
|
44 |
-
input_img = gr.Image(label="input image")
|
45 |
-
examples_data = [[f"examples/{x:02d}.jpg"] for x in range(1, 4)]
|
46 |
-
examples = gr.Dataset(components=[input_img], samples=examples_data)
|
47 |
-
run_btn = gr.Button(variant="primary")
|
48 |
-
output_mask = gr.Image(label="mask")
|
49 |
-
output_img = gr.Image(label="result", image_mode="RGBA")
|
50 |
-
examples.click(lambda x: x[0], [examples], [input_img])
|
51 |
-
run_btn.click(rmbg_fn, [input_img], [output_mask, output_img])
|
52 |
-
app.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ariharasudhan/XAI_Class-Activation-Maps/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Class Activation Maps
|
3 |
-
emoji: 🚀
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: red
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.14.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Artples/llama-2-7b-chat/run-app.sh
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
nodemon -w app.py -x python app.py
|
|
|
|
spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/util/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
|
|
|
spaces/AxelBell/EasyOCR_text_recognition/data.py
DELETED
@@ -1,110 +0,0 @@
|
|
1 |
-
import cv2
|
2 |
-
import gradio as gr
|
3 |
-
import easyocr
|
4 |
-
import numpy as np
|
5 |
-
import json
|
6 |
-
from collections import OrderedDict
|
7 |
-
from pprint import pformat
|
8 |
-
|
9 |
-
try:
|
10 |
-
import tomllib
|
11 |
-
except ModuleNotFoundError:
|
12 |
-
import tomli as tomllib
|
13 |
-
|
14 |
-
|
15 |
-
def read_file(path: str) -> str:
|
16 |
-
with open(path, "rb") as f:
|
17 |
-
return f.read().decode("utf-8")
|
18 |
-
|
19 |
-
|
20 |
-
def draw_label(
|
21 |
-
image,
|
22 |
-
center,
|
23 |
-
text,
|
24 |
-
font=cv2.FONT_HERSHEY_SIMPLEX,
|
25 |
-
font_scale=1,
|
26 |
-
text_color=(255, 0, 0),
|
27 |
-
text_thickness=2,
|
28 |
-
circle_color=(0, 0, 0),
|
29 |
-
circle_thickness=-1,
|
30 |
-
):
|
31 |
-
(text_width, text_height), _ = cv2.getTextSize(
|
32 |
-
str(text), font, font_scale, text_thickness
|
33 |
-
)
|
34 |
-
text_x = center[0] - text_width // 2
|
35 |
-
text_y = center[1] + text_height // 2
|
36 |
-
radius = (text_width + 8) // 2
|
37 |
-
cv2.circle(image, center, radius, circle_color, circle_thickness)
|
38 |
-
cv2.putText(
|
39 |
-
image, str(text), (text_x, text_y), font, font_scale, text_color, text_thickness
|
40 |
-
)
|
41 |
-
cv2.circle(image, center, radius + 1, text_color, 1)
|
42 |
-
|
43 |
-
|
44 |
-
class Data:
|
45 |
-
@staticmethod
|
46 |
-
def block_factory(block_metadata: list[dict]):
|
47 |
-
_block_list = [
|
48 |
-
(bm["name"], getattr(gr, bm["type"])(render=False, **bm["param"]))
|
49 |
-
for bm in block_metadata
|
50 |
-
]
|
51 |
-
return OrderedDict(_block_list)
|
52 |
-
|
53 |
-
def __init__(
|
54 |
-
self, toml_file: str, batch_size: int = 1, workers: int = 0, canvas_size=2560
|
55 |
-
):
|
56 |
-
with open(toml_file, "rb") as f:
|
57 |
-
_data = tomllib.load(f, parse_float=float)
|
58 |
-
self.assets = {k: read_file(v) for k, v in _data["assets"]}
|
59 |
-
self.inputs = Data.block_factory(_data["blocklist"]["inputs"])
|
60 |
-
self.default = [getattr(b, "value", None) for b in self.inputs.values()]
|
61 |
-
self.outputs = Data.block_factory(_data["blocklist"]["outputs"])
|
62 |
-
self.examples = _data["examples"]
|
63 |
-
self.batch_size = batch_size
|
64 |
-
self.workers = workers
|
65 |
-
self.canvas_size = canvas_size
|
66 |
-
|
67 |
-
def render(self, *args):
|
68 |
-
for bn in args:
|
69 |
-
block = self.inputs.get(bn, self.outputs.get(bn))
|
70 |
-
if block:
|
71 |
-
block.render()
|
72 |
-
|
73 |
-
@property
|
74 |
-
def inputs_list(self):
|
75 |
-
return list(self.inputs.values())
|
76 |
-
|
77 |
-
@property
|
78 |
-
def outputs_list(self):
|
79 |
-
return list(self.outputs.values())
|
80 |
-
|
81 |
-
def process_image(self, lang: list[str], *args):
|
82 |
-
reader = easyocr.Reader(lang, False)
|
83 |
-
kwargs = dict(zip(list(self.inputs.keys())[1:], args))
|
84 |
-
kwargs.update(
|
85 |
-
{
|
86 |
-
"batch_size": self.batch_size,
|
87 |
-
"workers": self.workers,
|
88 |
-
"canvas_size": self.canvas_size,
|
89 |
-
}
|
90 |
-
)
|
91 |
-
raw = reader.readtext(**kwargs)
|
92 |
-
result = raw.copy()
|
93 |
-
raw = pformat(raw)
|
94 |
-
if not kwargs["detail"]:
|
95 |
-
return kwargs["image"], [[idx, i, ""] for idx, i in enumerate(result)], raw
|
96 |
-
|
97 |
-
if kwargs["paragraph"]:
|
98 |
-
result = [[i[0], i[1], 0] for i in result]
|
99 |
-
if kwargs["output_format"] == "json":
|
100 |
-
result = [json.loads(i, parse_float=float, parse_int=int) for i in result]
|
101 |
-
if kwargs["output_format"] == "dict" or kwargs["output_format"] == "json":
|
102 |
-
result = [[i["boxes"], i["text"], i["confident"]] for i in result]
|
103 |
-
|
104 |
-
cnt = [np.array(i[0]).astype(int) for i in result]
|
105 |
-
cv2.drawContours(kwargs["image"], cnt, -1, (0, 255, 0), 3, cv2.LINE_AA)
|
106 |
-
for count, c in enumerate(cnt):
|
107 |
-
draw_label(kwargs["image"], c[0], count)
|
108 |
-
data = [[idx, i[1], f"{int(i[2]*100)}%"] for idx, i in enumerate(result)]
|
109 |
-
|
110 |
-
return kwargs["image"], data, raw
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Axolotlily/TextGen/app.py
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
2 |
-
import torch
|
3 |
-
import gradio as gr
|
4 |
-
|
5 |
-
tokenizer = AutoTokenizer.from_pretrained("gpt2")
|
6 |
-
model = AutoModelForCausalLM.from_pretrained("gpt2")
|
7 |
-
|
8 |
-
def text_generation(input_text, seed):
|
9 |
-
input_ids = tokenizer(input_text, return_tensors="pt").input_ids
|
10 |
-
torch.manual_seed(seed) # Max value: 18446744073709551615
|
11 |
-
outputs = model.generate(input_ids, do_sample=True, max_length=500)
|
12 |
-
generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
13 |
-
return generated_text
|
14 |
-
|
15 |
-
title = "Text Generator Demo GPT2"
|
16 |
-
description = "Text Generator Application by ecarbo"
|
17 |
-
|
18 |
-
gr.Interface(
|
19 |
-
text_generation,
|
20 |
-
[gr.inputs.Textbox(lines=2, label="Enter input text"), gr.inputs.Number(default=10, label="Enter seed number")],
|
21 |
-
[gr.outputs.Textbox(type="auto", label="Text Generated")],
|
22 |
-
title=title,
|
23 |
-
description=description,
|
24 |
-
theme="huggingface"
|
25 |
-
).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/demucs/pretrained.py
DELETED
@@ -1,107 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
# author: adefossez
|
7 |
-
|
8 |
-
import logging
|
9 |
-
|
10 |
-
from diffq import DiffQuantizer
|
11 |
-
import torch.hub
|
12 |
-
|
13 |
-
from .model import Demucs
|
14 |
-
from .tasnet import ConvTasNet
|
15 |
-
from .utils import set_state
|
16 |
-
|
17 |
-
logger = logging.getLogger(__name__)
|
18 |
-
ROOT = "https://dl.fbaipublicfiles.com/demucs/v3.0/"
|
19 |
-
|
20 |
-
PRETRAINED_MODELS = {
|
21 |
-
'demucs': 'e07c671f',
|
22 |
-
'demucs48_hq': '28a1282c',
|
23 |
-
'demucs_extra': '3646af93',
|
24 |
-
'demucs_quantized': '07afea75',
|
25 |
-
'tasnet': 'beb46fac',
|
26 |
-
'tasnet_extra': 'df3777b2',
|
27 |
-
'demucs_unittest': '09ebc15f',
|
28 |
-
}
|
29 |
-
|
30 |
-
SOURCES = ["drums", "bass", "other", "vocals"]
|
31 |
-
|
32 |
-
|
33 |
-
def get_url(name):
|
34 |
-
sig = PRETRAINED_MODELS[name]
|
35 |
-
return ROOT + name + "-" + sig[:8] + ".th"
|
36 |
-
|
37 |
-
|
38 |
-
def is_pretrained(name):
|
39 |
-
return name in PRETRAINED_MODELS
|
40 |
-
|
41 |
-
|
42 |
-
def load_pretrained(name):
|
43 |
-
if name == "demucs":
|
44 |
-
return demucs(pretrained=True)
|
45 |
-
elif name == "demucs48_hq":
|
46 |
-
return demucs(pretrained=True, hq=True, channels=48)
|
47 |
-
elif name == "demucs_extra":
|
48 |
-
return demucs(pretrained=True, extra=True)
|
49 |
-
elif name == "demucs_quantized":
|
50 |
-
return demucs(pretrained=True, quantized=True)
|
51 |
-
elif name == "demucs_unittest":
|
52 |
-
return demucs_unittest(pretrained=True)
|
53 |
-
elif name == "tasnet":
|
54 |
-
return tasnet(pretrained=True)
|
55 |
-
elif name == "tasnet_extra":
|
56 |
-
return tasnet(pretrained=True, extra=True)
|
57 |
-
else:
|
58 |
-
raise ValueError(f"Invalid pretrained name {name}")
|
59 |
-
|
60 |
-
|
61 |
-
def _load_state(name, model, quantizer=None):
|
62 |
-
url = get_url(name)
|
63 |
-
state = torch.hub.load_state_dict_from_url(url, map_location='cpu', check_hash=True)
|
64 |
-
set_state(model, quantizer, state)
|
65 |
-
if quantizer:
|
66 |
-
quantizer.detach()
|
67 |
-
|
68 |
-
|
69 |
-
def demucs_unittest(pretrained=True):
|
70 |
-
model = Demucs(channels=4, sources=SOURCES)
|
71 |
-
if pretrained:
|
72 |
-
_load_state('demucs_unittest', model)
|
73 |
-
return model
|
74 |
-
|
75 |
-
|
76 |
-
def demucs(pretrained=True, extra=False, quantized=False, hq=False, channels=64):
|
77 |
-
if not pretrained and (extra or quantized or hq):
|
78 |
-
raise ValueError("if extra or quantized is True, pretrained must be True.")
|
79 |
-
model = Demucs(sources=SOURCES, channels=channels)
|
80 |
-
if pretrained:
|
81 |
-
name = 'demucs'
|
82 |
-
if channels != 64:
|
83 |
-
name += str(channels)
|
84 |
-
quantizer = None
|
85 |
-
if sum([extra, quantized, hq]) > 1:
|
86 |
-
raise ValueError("Only one of extra, quantized, hq, can be True.")
|
87 |
-
if quantized:
|
88 |
-
quantizer = DiffQuantizer(model, group_size=8, min_size=1)
|
89 |
-
name += '_quantized'
|
90 |
-
if extra:
|
91 |
-
name += '_extra'
|
92 |
-
if hq:
|
93 |
-
name += '_hq'
|
94 |
-
_load_state(name, model, quantizer)
|
95 |
-
return model
|
96 |
-
|
97 |
-
|
98 |
-
def tasnet(pretrained=True, extra=False):
|
99 |
-
if not pretrained and extra:
|
100 |
-
raise ValueError("if extra is True, pretrained must be True.")
|
101 |
-
model = ConvTasNet(X=10, sources=SOURCES)
|
102 |
-
if pretrained:
|
103 |
-
name = 'tasnet'
|
104 |
-
if extra:
|
105 |
-
name = 'tasnet_extra'
|
106 |
-
_load_state(name, model)
|
107 |
-
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/lib/uvr5_pack/lib_v5/spec_utils.py
DELETED
@@ -1,667 +0,0 @@
|
|
1 |
-
import os, librosa
|
2 |
-
import numpy as np
|
3 |
-
import soundfile as sf
|
4 |
-
from tqdm import tqdm
|
5 |
-
import json, math, hashlib
|
6 |
-
|
7 |
-
|
8 |
-
def crop_center(h1, h2):
|
9 |
-
h1_shape = h1.size()
|
10 |
-
h2_shape = h2.size()
|
11 |
-
|
12 |
-
if h1_shape[3] == h2_shape[3]:
|
13 |
-
return h1
|
14 |
-
elif h1_shape[3] < h2_shape[3]:
|
15 |
-
raise ValueError("h1_shape[3] must be greater than h2_shape[3]")
|
16 |
-
|
17 |
-
# s_freq = (h2_shape[2] - h1_shape[2]) // 2
|
18 |
-
# e_freq = s_freq + h1_shape[2]
|
19 |
-
s_time = (h1_shape[3] - h2_shape[3]) // 2
|
20 |
-
e_time = s_time + h2_shape[3]
|
21 |
-
h1 = h1[:, :, :, s_time:e_time]
|
22 |
-
|
23 |
-
return h1
|
24 |
-
|
25 |
-
|
26 |
-
def wave_to_spectrogram(
|
27 |
-
wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False
|
28 |
-
):
|
29 |
-
if reverse:
|
30 |
-
wave_left = np.flip(np.asfortranarray(wave[0]))
|
31 |
-
wave_right = np.flip(np.asfortranarray(wave[1]))
|
32 |
-
elif mid_side:
|
33 |
-
wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2)
|
34 |
-
wave_right = np.asfortranarray(np.subtract(wave[0], wave[1]))
|
35 |
-
elif mid_side_b2:
|
36 |
-
wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5))
|
37 |
-
wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5))
|
38 |
-
else:
|
39 |
-
wave_left = np.asfortranarray(wave[0])
|
40 |
-
wave_right = np.asfortranarray(wave[1])
|
41 |
-
|
42 |
-
spec_left = librosa.stft(wave_left, n_fft, hop_length=hop_length)
|
43 |
-
spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length)
|
44 |
-
|
45 |
-
spec = np.asfortranarray([spec_left, spec_right])
|
46 |
-
|
47 |
-
return spec
|
48 |
-
|
49 |
-
|
50 |
-
def wave_to_spectrogram_mt(
|
51 |
-
wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False
|
52 |
-
):
|
53 |
-
import threading
|
54 |
-
|
55 |
-
if reverse:
|
56 |
-
wave_left = np.flip(np.asfortranarray(wave[0]))
|
57 |
-
wave_right = np.flip(np.asfortranarray(wave[1]))
|
58 |
-
elif mid_side:
|
59 |
-
wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2)
|
60 |
-
wave_right = np.asfortranarray(np.subtract(wave[0], wave[1]))
|
61 |
-
elif mid_side_b2:
|
62 |
-
wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5))
|
63 |
-
wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5))
|
64 |
-
else:
|
65 |
-
wave_left = np.asfortranarray(wave[0])
|
66 |
-
wave_right = np.asfortranarray(wave[1])
|
67 |
-
|
68 |
-
def run_thread(**kwargs):
|
69 |
-
global spec_left
|
70 |
-
spec_left = librosa.stft(**kwargs)
|
71 |
-
|
72 |
-
thread = threading.Thread(
|
73 |
-
target=run_thread,
|
74 |
-
kwargs={"y": wave_left, "n_fft": n_fft, "hop_length": hop_length},
|
75 |
-
)
|
76 |
-
thread.start()
|
77 |
-
spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length)
|
78 |
-
thread.join()
|
79 |
-
|
80 |
-
spec = np.asfortranarray([spec_left, spec_right])
|
81 |
-
|
82 |
-
return spec
|
83 |
-
|
84 |
-
|
85 |
-
def combine_spectrograms(specs, mp):
|
86 |
-
l = min([specs[i].shape[2] for i in specs])
|
87 |
-
spec_c = np.zeros(shape=(2, mp.param["bins"] + 1, l), dtype=np.complex64)
|
88 |
-
offset = 0
|
89 |
-
bands_n = len(mp.param["band"])
|
90 |
-
|
91 |
-
for d in range(1, bands_n + 1):
|
92 |
-
h = mp.param["band"][d]["crop_stop"] - mp.param["band"][d]["crop_start"]
|
93 |
-
spec_c[:, offset : offset + h, :l] = specs[d][
|
94 |
-
:, mp.param["band"][d]["crop_start"] : mp.param["band"][d]["crop_stop"], :l
|
95 |
-
]
|
96 |
-
offset += h
|
97 |
-
|
98 |
-
if offset > mp.param["bins"]:
|
99 |
-
raise ValueError("Too much bins")
|
100 |
-
|
101 |
-
# lowpass fiter
|
102 |
-
if (
|
103 |
-
mp.param["pre_filter_start"] > 0
|
104 |
-
): # and mp.param['band'][bands_n]['res_type'] in ['scipy', 'polyphase']:
|
105 |
-
if bands_n == 1:
|
106 |
-
spec_c = fft_lp_filter(
|
107 |
-
spec_c, mp.param["pre_filter_start"], mp.param["pre_filter_stop"]
|
108 |
-
)
|
109 |
-
else:
|
110 |
-
gp = 1
|
111 |
-
for b in range(
|
112 |
-
mp.param["pre_filter_start"] + 1, mp.param["pre_filter_stop"]
|
113 |
-
):
|
114 |
-
g = math.pow(
|
115 |
-
10, -(b - mp.param["pre_filter_start"]) * (3.5 - gp) / 20.0
|
116 |
-
)
|
117 |
-
gp = g
|
118 |
-
spec_c[:, b, :] *= g
|
119 |
-
|
120 |
-
return np.asfortranarray(spec_c)
|
121 |
-
|
122 |
-
|
123 |
-
def spectrogram_to_image(spec, mode="magnitude"):
|
124 |
-
if mode == "magnitude":
|
125 |
-
if np.iscomplexobj(spec):
|
126 |
-
y = np.abs(spec)
|
127 |
-
else:
|
128 |
-
y = spec
|
129 |
-
y = np.log10(y**2 + 1e-8)
|
130 |
-
elif mode == "phase":
|
131 |
-
if np.iscomplexobj(spec):
|
132 |
-
y = np.angle(spec)
|
133 |
-
else:
|
134 |
-
y = spec
|
135 |
-
|
136 |
-
y -= y.min()
|
137 |
-
y *= 255 / y.max()
|
138 |
-
img = np.uint8(y)
|
139 |
-
|
140 |
-
if y.ndim == 3:
|
141 |
-
img = img.transpose(1, 2, 0)
|
142 |
-
img = np.concatenate([np.max(img, axis=2, keepdims=True), img], axis=2)
|
143 |
-
|
144 |
-
return img
|
145 |
-
|
146 |
-
|
147 |
-
def reduce_vocal_aggressively(X, y, softmask):
|
148 |
-
v = X - y
|
149 |
-
y_mag_tmp = np.abs(y)
|
150 |
-
v_mag_tmp = np.abs(v)
|
151 |
-
|
152 |
-
v_mask = v_mag_tmp > y_mag_tmp
|
153 |
-
y_mag = np.clip(y_mag_tmp - v_mag_tmp * v_mask * softmask, 0, np.inf)
|
154 |
-
|
155 |
-
return y_mag * np.exp(1.0j * np.angle(y))
|
156 |
-
|
157 |
-
|
158 |
-
def mask_silence(mag, ref, thres=0.2, min_range=64, fade_size=32):
|
159 |
-
if min_range < fade_size * 2:
|
160 |
-
raise ValueError("min_range must be >= fade_area * 2")
|
161 |
-
|
162 |
-
mag = mag.copy()
|
163 |
-
|
164 |
-
idx = np.where(ref.mean(axis=(0, 1)) < thres)[0]
|
165 |
-
starts = np.insert(idx[np.where(np.diff(idx) != 1)[0] + 1], 0, idx[0])
|
166 |
-
ends = np.append(idx[np.where(np.diff(idx) != 1)[0]], idx[-1])
|
167 |
-
uninformative = np.where(ends - starts > min_range)[0]
|
168 |
-
if len(uninformative) > 0:
|
169 |
-
starts = starts[uninformative]
|
170 |
-
ends = ends[uninformative]
|
171 |
-
old_e = None
|
172 |
-
for s, e in zip(starts, ends):
|
173 |
-
if old_e is not None and s - old_e < fade_size:
|
174 |
-
s = old_e - fade_size * 2
|
175 |
-
|
176 |
-
if s != 0:
|
177 |
-
weight = np.linspace(0, 1, fade_size)
|
178 |
-
mag[:, :, s : s + fade_size] += weight * ref[:, :, s : s + fade_size]
|
179 |
-
else:
|
180 |
-
s -= fade_size
|
181 |
-
|
182 |
-
if e != mag.shape[2]:
|
183 |
-
weight = np.linspace(1, 0, fade_size)
|
184 |
-
mag[:, :, e - fade_size : e] += weight * ref[:, :, e - fade_size : e]
|
185 |
-
else:
|
186 |
-
e += fade_size
|
187 |
-
|
188 |
-
mag[:, :, s + fade_size : e - fade_size] += ref[
|
189 |
-
:, :, s + fade_size : e - fade_size
|
190 |
-
]
|
191 |
-
old_e = e
|
192 |
-
|
193 |
-
return mag
|
194 |
-
|
195 |
-
|
196 |
-
def align_wave_head_and_tail(a, b):
|
197 |
-
l = min([a[0].size, b[0].size])
|
198 |
-
|
199 |
-
return a[:l, :l], b[:l, :l]
|
200 |
-
|
201 |
-
|
202 |
-
def cache_or_load(mix_path, inst_path, mp):
|
203 |
-
mix_basename = os.path.splitext(os.path.basename(mix_path))[0]
|
204 |
-
inst_basename = os.path.splitext(os.path.basename(inst_path))[0]
|
205 |
-
|
206 |
-
cache_dir = "mph{}".format(
|
207 |
-
hashlib.sha1(json.dumps(mp.param, sort_keys=True).encode("utf-8")).hexdigest()
|
208 |
-
)
|
209 |
-
mix_cache_dir = os.path.join("cache", cache_dir)
|
210 |
-
inst_cache_dir = os.path.join("cache", cache_dir)
|
211 |
-
|
212 |
-
os.makedirs(mix_cache_dir, exist_ok=True)
|
213 |
-
os.makedirs(inst_cache_dir, exist_ok=True)
|
214 |
-
|
215 |
-
mix_cache_path = os.path.join(mix_cache_dir, mix_basename + ".npy")
|
216 |
-
inst_cache_path = os.path.join(inst_cache_dir, inst_basename + ".npy")
|
217 |
-
|
218 |
-
if os.path.exists(mix_cache_path) and os.path.exists(inst_cache_path):
|
219 |
-
X_spec_m = np.load(mix_cache_path)
|
220 |
-
y_spec_m = np.load(inst_cache_path)
|
221 |
-
else:
|
222 |
-
X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}
|
223 |
-
|
224 |
-
for d in range(len(mp.param["band"]), 0, -1):
|
225 |
-
bp = mp.param["band"][d]
|
226 |
-
|
227 |
-
if d == len(mp.param["band"]): # high-end band
|
228 |
-
X_wave[d], _ = librosa.load(
|
229 |
-
mix_path, bp["sr"], False, dtype=np.float32, res_type=bp["res_type"]
|
230 |
-
)
|
231 |
-
y_wave[d], _ = librosa.load(
|
232 |
-
inst_path,
|
233 |
-
bp["sr"],
|
234 |
-
False,
|
235 |
-
dtype=np.float32,
|
236 |
-
res_type=bp["res_type"],
|
237 |
-
)
|
238 |
-
else: # lower bands
|
239 |
-
X_wave[d] = librosa.resample(
|
240 |
-
X_wave[d + 1],
|
241 |
-
mp.param["band"][d + 1]["sr"],
|
242 |
-
bp["sr"],
|
243 |
-
res_type=bp["res_type"],
|
244 |
-
)
|
245 |
-
y_wave[d] = librosa.resample(
|
246 |
-
y_wave[d + 1],
|
247 |
-
mp.param["band"][d + 1]["sr"],
|
248 |
-
bp["sr"],
|
249 |
-
res_type=bp["res_type"],
|
250 |
-
)
|
251 |
-
|
252 |
-
X_wave[d], y_wave[d] = align_wave_head_and_tail(X_wave[d], y_wave[d])
|
253 |
-
|
254 |
-
X_spec_s[d] = wave_to_spectrogram(
|
255 |
-
X_wave[d],
|
256 |
-
bp["hl"],
|
257 |
-
bp["n_fft"],
|
258 |
-
mp.param["mid_side"],
|
259 |
-
mp.param["mid_side_b2"],
|
260 |
-
mp.param["reverse"],
|
261 |
-
)
|
262 |
-
y_spec_s[d] = wave_to_spectrogram(
|
263 |
-
y_wave[d],
|
264 |
-
bp["hl"],
|
265 |
-
bp["n_fft"],
|
266 |
-
mp.param["mid_side"],
|
267 |
-
mp.param["mid_side_b2"],
|
268 |
-
mp.param["reverse"],
|
269 |
-
)
|
270 |
-
|
271 |
-
del X_wave, y_wave
|
272 |
-
|
273 |
-
X_spec_m = combine_spectrograms(X_spec_s, mp)
|
274 |
-
y_spec_m = combine_spectrograms(y_spec_s, mp)
|
275 |
-
|
276 |
-
if X_spec_m.shape != y_spec_m.shape:
|
277 |
-
raise ValueError("The combined spectrograms are different: " + mix_path)
|
278 |
-
|
279 |
-
_, ext = os.path.splitext(mix_path)
|
280 |
-
|
281 |
-
np.save(mix_cache_path, X_spec_m)
|
282 |
-
np.save(inst_cache_path, y_spec_m)
|
283 |
-
|
284 |
-
return X_spec_m, y_spec_m
|
285 |
-
|
286 |
-
|
287 |
-
def spectrogram_to_wave(spec, hop_length, mid_side, mid_side_b2, reverse):
|
288 |
-
spec_left = np.asfortranarray(spec[0])
|
289 |
-
spec_right = np.asfortranarray(spec[1])
|
290 |
-
|
291 |
-
wave_left = librosa.istft(spec_left, hop_length=hop_length)
|
292 |
-
wave_right = librosa.istft(spec_right, hop_length=hop_length)
|
293 |
-
|
294 |
-
if reverse:
|
295 |
-
return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)])
|
296 |
-
elif mid_side:
|
297 |
-
return np.asfortranarray(
|
298 |
-
[np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)]
|
299 |
-
)
|
300 |
-
elif mid_side_b2:
|
301 |
-
return np.asfortranarray(
|
302 |
-
[
|
303 |
-
np.add(wave_right / 1.25, 0.4 * wave_left),
|
304 |
-
np.subtract(wave_left / 1.25, 0.4 * wave_right),
|
305 |
-
]
|
306 |
-
)
|
307 |
-
else:
|
308 |
-
return np.asfortranarray([wave_left, wave_right])
|
309 |
-
|
310 |
-
|
311 |
-
def spectrogram_to_wave_mt(spec, hop_length, mid_side, reverse, mid_side_b2):
|
312 |
-
import threading
|
313 |
-
|
314 |
-
spec_left = np.asfortranarray(spec[0])
|
315 |
-
spec_right = np.asfortranarray(spec[1])
|
316 |
-
|
317 |
-
def run_thread(**kwargs):
|
318 |
-
global wave_left
|
319 |
-
wave_left = librosa.istft(**kwargs)
|
320 |
-
|
321 |
-
thread = threading.Thread(
|
322 |
-
target=run_thread, kwargs={"stft_matrix": spec_left, "hop_length": hop_length}
|
323 |
-
)
|
324 |
-
thread.start()
|
325 |
-
wave_right = librosa.istft(spec_right, hop_length=hop_length)
|
326 |
-
thread.join()
|
327 |
-
|
328 |
-
if reverse:
|
329 |
-
return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)])
|
330 |
-
elif mid_side:
|
331 |
-
return np.asfortranarray(
|
332 |
-
[np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)]
|
333 |
-
)
|
334 |
-
elif mid_side_b2:
|
335 |
-
return np.asfortranarray(
|
336 |
-
[
|
337 |
-
np.add(wave_right / 1.25, 0.4 * wave_left),
|
338 |
-
np.subtract(wave_left / 1.25, 0.4 * wave_right),
|
339 |
-
]
|
340 |
-
)
|
341 |
-
else:
|
342 |
-
return np.asfortranarray([wave_left, wave_right])
|
343 |
-
|
344 |
-
|
345 |
-
def cmb_spectrogram_to_wave(spec_m, mp, extra_bins_h=None, extra_bins=None):
|
346 |
-
wave_band = {}
|
347 |
-
bands_n = len(mp.param["band"])
|
348 |
-
offset = 0
|
349 |
-
|
350 |
-
for d in range(1, bands_n + 1):
|
351 |
-
bp = mp.param["band"][d]
|
352 |
-
spec_s = np.ndarray(
|
353 |
-
shape=(2, bp["n_fft"] // 2 + 1, spec_m.shape[2]), dtype=complex
|
354 |
-
)
|
355 |
-
h = bp["crop_stop"] - bp["crop_start"]
|
356 |
-
spec_s[:, bp["crop_start"] : bp["crop_stop"], :] = spec_m[
|
357 |
-
:, offset : offset + h, :
|
358 |
-
]
|
359 |
-
|
360 |
-
offset += h
|
361 |
-
if d == bands_n: # higher
|
362 |
-
if extra_bins_h: # if --high_end_process bypass
|
363 |
-
max_bin = bp["n_fft"] // 2
|
364 |
-
spec_s[:, max_bin - extra_bins_h : max_bin, :] = extra_bins[
|
365 |
-
:, :extra_bins_h, :
|
366 |
-
]
|
367 |
-
if bp["hpf_start"] > 0:
|
368 |
-
spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1)
|
369 |
-
if bands_n == 1:
|
370 |
-
wave = spectrogram_to_wave(
|
371 |
-
spec_s,
|
372 |
-
bp["hl"],
|
373 |
-
mp.param["mid_side"],
|
374 |
-
mp.param["mid_side_b2"],
|
375 |
-
mp.param["reverse"],
|
376 |
-
)
|
377 |
-
else:
|
378 |
-
wave = np.add(
|
379 |
-
wave,
|
380 |
-
spectrogram_to_wave(
|
381 |
-
spec_s,
|
382 |
-
bp["hl"],
|
383 |
-
mp.param["mid_side"],
|
384 |
-
mp.param["mid_side_b2"],
|
385 |
-
mp.param["reverse"],
|
386 |
-
),
|
387 |
-
)
|
388 |
-
else:
|
389 |
-
sr = mp.param["band"][d + 1]["sr"]
|
390 |
-
if d == 1: # lower
|
391 |
-
spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"])
|
392 |
-
wave = librosa.resample(
|
393 |
-
spectrogram_to_wave(
|
394 |
-
spec_s,
|
395 |
-
bp["hl"],
|
396 |
-
mp.param["mid_side"],
|
397 |
-
mp.param["mid_side_b2"],
|
398 |
-
mp.param["reverse"],
|
399 |
-
),
|
400 |
-
bp["sr"],
|
401 |
-
sr,
|
402 |
-
res_type="sinc_fastest",
|
403 |
-
)
|
404 |
-
else: # mid
|
405 |
-
spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1)
|
406 |
-
spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"])
|
407 |
-
wave2 = np.add(
|
408 |
-
wave,
|
409 |
-
spectrogram_to_wave(
|
410 |
-
spec_s,
|
411 |
-
bp["hl"],
|
412 |
-
mp.param["mid_side"],
|
413 |
-
mp.param["mid_side_b2"],
|
414 |
-
mp.param["reverse"],
|
415 |
-
),
|
416 |
-
)
|
417 |
-
# wave = librosa.core.resample(wave2, bp['sr'], sr, res_type="sinc_fastest")
|
418 |
-
wave = librosa.core.resample(wave2, bp["sr"], sr, res_type="scipy")
|
419 |
-
|
420 |
-
return wave.T
|
421 |
-
|
422 |
-
|
423 |
-
def fft_lp_filter(spec, bin_start, bin_stop):
|
424 |
-
g = 1.0
|
425 |
-
for b in range(bin_start, bin_stop):
|
426 |
-
g -= 1 / (bin_stop - bin_start)
|
427 |
-
spec[:, b, :] = g * spec[:, b, :]
|
428 |
-
|
429 |
-
spec[:, bin_stop:, :] *= 0
|
430 |
-
|
431 |
-
return spec
|
432 |
-
|
433 |
-
|
434 |
-
def fft_hp_filter(spec, bin_start, bin_stop):
|
435 |
-
g = 1.0
|
436 |
-
for b in range(bin_start, bin_stop, -1):
|
437 |
-
g -= 1 / (bin_start - bin_stop)
|
438 |
-
spec[:, b, :] = g * spec[:, b, :]
|
439 |
-
|
440 |
-
spec[:, 0 : bin_stop + 1, :] *= 0
|
441 |
-
|
442 |
-
return spec
|
443 |
-
|
444 |
-
|
445 |
-
def mirroring(a, spec_m, input_high_end, mp):
|
446 |
-
if "mirroring" == a:
|
447 |
-
mirror = np.flip(
|
448 |
-
np.abs(
|
449 |
-
spec_m[
|
450 |
-
:,
|
451 |
-
mp.param["pre_filter_start"]
|
452 |
-
- 10
|
453 |
-
- input_high_end.shape[1] : mp.param["pre_filter_start"]
|
454 |
-
- 10,
|
455 |
-
:,
|
456 |
-
]
|
457 |
-
),
|
458 |
-
1,
|
459 |
-
)
|
460 |
-
mirror = mirror * np.exp(1.0j * np.angle(input_high_end))
|
461 |
-
|
462 |
-
return np.where(
|
463 |
-
np.abs(input_high_end) <= np.abs(mirror), input_high_end, mirror
|
464 |
-
)
|
465 |
-
|
466 |
-
if "mirroring2" == a:
|
467 |
-
mirror = np.flip(
|
468 |
-
np.abs(
|
469 |
-
spec_m[
|
470 |
-
:,
|
471 |
-
mp.param["pre_filter_start"]
|
472 |
-
- 10
|
473 |
-
- input_high_end.shape[1] : mp.param["pre_filter_start"]
|
474 |
-
- 10,
|
475 |
-
:,
|
476 |
-
]
|
477 |
-
),
|
478 |
-
1,
|
479 |
-
)
|
480 |
-
mi = np.multiply(mirror, input_high_end * 1.7)
|
481 |
-
|
482 |
-
return np.where(np.abs(input_high_end) <= np.abs(mi), input_high_end, mi)
|
483 |
-
|
484 |
-
|
485 |
-
def ensembling(a, specs):
|
486 |
-
for i in range(1, len(specs)):
|
487 |
-
if i == 1:
|
488 |
-
spec = specs[0]
|
489 |
-
|
490 |
-
ln = min([spec.shape[2], specs[i].shape[2]])
|
491 |
-
spec = spec[:, :, :ln]
|
492 |
-
specs[i] = specs[i][:, :, :ln]
|
493 |
-
|
494 |
-
if "min_mag" == a:
|
495 |
-
spec = np.where(np.abs(specs[i]) <= np.abs(spec), specs[i], spec)
|
496 |
-
if "max_mag" == a:
|
497 |
-
spec = np.where(np.abs(specs[i]) >= np.abs(spec), specs[i], spec)
|
498 |
-
|
499 |
-
return spec
|
500 |
-
|
501 |
-
|
502 |
-
def stft(wave, nfft, hl):
|
503 |
-
wave_left = np.asfortranarray(wave[0])
|
504 |
-
wave_right = np.asfortranarray(wave[1])
|
505 |
-
spec_left = librosa.stft(wave_left, nfft, hop_length=hl)
|
506 |
-
spec_right = librosa.stft(wave_right, nfft, hop_length=hl)
|
507 |
-
spec = np.asfortranarray([spec_left, spec_right])
|
508 |
-
|
509 |
-
return spec
|
510 |
-
|
511 |
-
|
512 |
-
def istft(spec, hl):
|
513 |
-
spec_left = np.asfortranarray(spec[0])
|
514 |
-
spec_right = np.asfortranarray(spec[1])
|
515 |
-
|
516 |
-
wave_left = librosa.istft(spec_left, hop_length=hl)
|
517 |
-
wave_right = librosa.istft(spec_right, hop_length=hl)
|
518 |
-
wave = np.asfortranarray([wave_left, wave_right])
|
519 |
-
|
520 |
-
|
521 |
-
if __name__ == "__main__":
|
522 |
-
import cv2
|
523 |
-
import sys
|
524 |
-
import time
|
525 |
-
import argparse
|
526 |
-
from model_param_init import ModelParameters
|
527 |
-
|
528 |
-
p = argparse.ArgumentParser()
|
529 |
-
p.add_argument(
|
530 |
-
"--algorithm",
|
531 |
-
"-a",
|
532 |
-
type=str,
|
533 |
-
choices=["invert", "invert_p", "min_mag", "max_mag", "deep", "align"],
|
534 |
-
default="min_mag",
|
535 |
-
)
|
536 |
-
p.add_argument(
|
537 |
-
"--model_params",
|
538 |
-
"-m",
|
539 |
-
type=str,
|
540 |
-
default=os.path.join("modelparams", "1band_sr44100_hl512.json"),
|
541 |
-
)
|
542 |
-
p.add_argument("--output_name", "-o", type=str, default="output")
|
543 |
-
p.add_argument("--vocals_only", "-v", action="store_true")
|
544 |
-
p.add_argument("input", nargs="+")
|
545 |
-
args = p.parse_args()
|
546 |
-
|
547 |
-
start_time = time.time()
|
548 |
-
|
549 |
-
if args.algorithm.startswith("invert") and len(args.input) != 2:
|
550 |
-
raise ValueError("There should be two input files.")
|
551 |
-
|
552 |
-
if not args.algorithm.startswith("invert") and len(args.input) < 2:
|
553 |
-
raise ValueError("There must be at least two input files.")
|
554 |
-
|
555 |
-
wave, specs = {}, {}
|
556 |
-
mp = ModelParameters(args.model_params)
|
557 |
-
|
558 |
-
for i in range(len(args.input)):
|
559 |
-
spec = {}
|
560 |
-
|
561 |
-
for d in range(len(mp.param["band"]), 0, -1):
|
562 |
-
bp = mp.param["band"][d]
|
563 |
-
|
564 |
-
if d == len(mp.param["band"]): # high-end band
|
565 |
-
wave[d], _ = librosa.load(
|
566 |
-
args.input[i],
|
567 |
-
bp["sr"],
|
568 |
-
False,
|
569 |
-
dtype=np.float32,
|
570 |
-
res_type=bp["res_type"],
|
571 |
-
)
|
572 |
-
|
573 |
-
if len(wave[d].shape) == 1: # mono to stereo
|
574 |
-
wave[d] = np.array([wave[d], wave[d]])
|
575 |
-
else: # lower bands
|
576 |
-
wave[d] = librosa.resample(
|
577 |
-
wave[d + 1],
|
578 |
-
mp.param["band"][d + 1]["sr"],
|
579 |
-
bp["sr"],
|
580 |
-
res_type=bp["res_type"],
|
581 |
-
)
|
582 |
-
|
583 |
-
spec[d] = wave_to_spectrogram(
|
584 |
-
wave[d],
|
585 |
-
bp["hl"],
|
586 |
-
bp["n_fft"],
|
587 |
-
mp.param["mid_side"],
|
588 |
-
mp.param["mid_side_b2"],
|
589 |
-
mp.param["reverse"],
|
590 |
-
)
|
591 |
-
|
592 |
-
specs[i] = combine_spectrograms(spec, mp)
|
593 |
-
|
594 |
-
del wave
|
595 |
-
|
596 |
-
if args.algorithm == "deep":
|
597 |
-
d_spec = np.where(np.abs(specs[0]) <= np.abs(spec[1]), specs[0], spec[1])
|
598 |
-
v_spec = d_spec - specs[1]
|
599 |
-
sf.write(
|
600 |
-
os.path.join("{}.wav".format(args.output_name)),
|
601 |
-
cmb_spectrogram_to_wave(v_spec, mp),
|
602 |
-
mp.param["sr"],
|
603 |
-
)
|
604 |
-
|
605 |
-
if args.algorithm.startswith("invert"):
|
606 |
-
ln = min([specs[0].shape[2], specs[1].shape[2]])
|
607 |
-
specs[0] = specs[0][:, :, :ln]
|
608 |
-
specs[1] = specs[1][:, :, :ln]
|
609 |
-
|
610 |
-
if "invert_p" == args.algorithm:
|
611 |
-
X_mag = np.abs(specs[0])
|
612 |
-
y_mag = np.abs(specs[1])
|
613 |
-
max_mag = np.where(X_mag >= y_mag, X_mag, y_mag)
|
614 |
-
v_spec = specs[1] - max_mag * np.exp(1.0j * np.angle(specs[0]))
|
615 |
-
else:
|
616 |
-
specs[1] = reduce_vocal_aggressively(specs[0], specs[1], 0.2)
|
617 |
-
v_spec = specs[0] - specs[1]
|
618 |
-
|
619 |
-
if not args.vocals_only:
|
620 |
-
X_mag = np.abs(specs[0])
|
621 |
-
y_mag = np.abs(specs[1])
|
622 |
-
v_mag = np.abs(v_spec)
|
623 |
-
|
624 |
-
X_image = spectrogram_to_image(X_mag)
|
625 |
-
y_image = spectrogram_to_image(y_mag)
|
626 |
-
v_image = spectrogram_to_image(v_mag)
|
627 |
-
|
628 |
-
cv2.imwrite("{}_X.png".format(args.output_name), X_image)
|
629 |
-
cv2.imwrite("{}_y.png".format(args.output_name), y_image)
|
630 |
-
cv2.imwrite("{}_v.png".format(args.output_name), v_image)
|
631 |
-
|
632 |
-
sf.write(
|
633 |
-
"{}_X.wav".format(args.output_name),
|
634 |
-
cmb_spectrogram_to_wave(specs[0], mp),
|
635 |
-
mp.param["sr"],
|
636 |
-
)
|
637 |
-
sf.write(
|
638 |
-
"{}_y.wav".format(args.output_name),
|
639 |
-
cmb_spectrogram_to_wave(specs[1], mp),
|
640 |
-
mp.param["sr"],
|
641 |
-
)
|
642 |
-
|
643 |
-
sf.write(
|
644 |
-
"{}_v.wav".format(args.output_name),
|
645 |
-
cmb_spectrogram_to_wave(v_spec, mp),
|
646 |
-
mp.param["sr"],
|
647 |
-
)
|
648 |
-
else:
|
649 |
-
if not args.algorithm == "deep":
|
650 |
-
sf.write(
|
651 |
-
os.path.join("ensembled", "{}.wav".format(args.output_name)),
|
652 |
-
cmb_spectrogram_to_wave(ensembling(args.algorithm, specs), mp),
|
653 |
-
mp.param["sr"],
|
654 |
-
)
|
655 |
-
|
656 |
-
if args.algorithm == "align":
|
657 |
-
trackalignment = [
|
658 |
-
{
|
659 |
-
"file1": '"{}"'.format(args.input[0]),
|
660 |
-
"file2": '"{}"'.format(args.input[1]),
|
661 |
-
}
|
662 |
-
]
|
663 |
-
|
664 |
-
for i, e in tqdm(enumerate(trackalignment), desc="Performing Alignment..."):
|
665 |
-
os.system(f"python lib/align_tracks.py {e['file1']} {e['file2']}")
|
666 |
-
|
667 |
-
# print('Total time: {0:.{1}f}s'.format(time.time() - start_time, 1))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Amanda El Aventurero Descargar Apk 2023.md
DELETED
@@ -1,98 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Amanda el aventurero Descargar APK 2023: Cómo jugar el espeluznante juego de terror de dibujos animados en su dispositivo Android</h1>
|
3 |
-
<p>Si estás buscando un juego de terror único y aterrador para jugar en tu dispositivo Android, es posible que quieras echar un vistazo a Amanda the Adventurer. Este juego es un juego de terror al estilo de dibujos animados de imágenes encontradas que te hará cuestionar tu cordura mientras ves e interactúas con una serie de cintas de VHS con un programa infantil aparentemente inocente. En este artículo, te diremos todo lo que necesitas saber sobre Amanda the Adventurer, por qué deberías descargar su archivo APK, cómo instalarlo en tu dispositivo y cómo jugarlo. </p>
|
4 |
-
<h2>amanda el aventurero descargar apk 2023</h2><br /><p><b><b>Download Zip</b> ✑ ✑ ✑ <a href="https://bltlly.com/2v6MIY">https://bltlly.com/2v6MIY</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es Amanda la aventurera? </h2>
|
6 |
-
<h3>Una breve introducción al juego y su premisa</h3>
|
7 |
-
<p>Amanda the Adventurer es un juego de terror desarrollado por MANGLEDmaw Games y publicado por DreadXP. Fue lanzado en Steam en abril de 2023 y recibió críticas abrumadoramente positivas de jugadores y críticos por igual. El juego sigue a Riley Park, quien hereda la casa de su tía Kate después de su muerte. Mientras explora el ático, Riley encuentra una pila de cintas de VHS junto a un viejo televisor. Las cintas parecen ser episodios de una caricatura infantil de principios de la década de 2000 que nunca habían visto antes, protagonizada por una niña llamada Amanda y su leal pero tímida mejor amiga, Wooly the Sheep. Sin embargo, mientras Riley mira las cintas, se dan cuenta de que algo está muy mal con Amanda y Wooly, y que parecen estar comunicándose directamente a través del televisor. ¿Qué secretos se esconden en estas cintas? ¿Y qué quiere Amanda de Riley? </p>
|
8 |
-
<h3>Las principales características y elementos de juego del juego</h3>
|
9 |
-
<p>Amanda the Adventurer es una corta pero intrincada experiencia de terror para un solo jugador que combina cintas animadas con rompecabezas estilo sala de escape. El juego tiene varias características que lo hacen destacar de otros juegos de terror, como:</p>
|
10 |
-
<ul>
|
11 |
-
<li>Clásico, CGI estilo 90 que recuerda una era más simple de animación</li>
|
12 |
-
|
13 |
-
<li>Desafiante, escapar de rompecabezas de estilo sala de usar pistas ocultas dentro de las cintas</li>
|
14 |
-
<li>Múltiples finales dependiendo de sus opciones y acciones</li>
|
15 |
-
<li>Una historia oscura y retorcida que te mantendrá al límite hasta el final</li>
|
16 |
-
</ul>
|
17 |
-
<h2>¿Por qué descargar Amanda el aventurero APK 2023? </h2>
|
18 |
-
<h3>Los beneficios de descargar el archivo APK en lugar de usar Google Play Store</h3>
|
19 |
-
<p>Un archivo APK es un archivo de paquete utilizado para distribuir e instalar aplicaciones en dispositivos Android. Puede descargar archivos APK de varios sitios web en lugar de usar Google Play Store por varias razones, como:</p>
|
20 |
-
<ul>
|
21 |
-
<li> Puede acceder a aplicaciones que no están disponibles o compatibles con su dispositivo o región</li>
|
22 |
-
<li> Puede obtener aplicaciones que se actualizan más rápido o tienen más características que sus versiones oficiales</li>
|
23 |
-
<li>Puede ahorrar espacio de almacenamiento instalando solo las partes de una aplicación que necesita</ <h3>Los riesgos y precauciones de descargar archivos APK de fuentes desconocidas</h3>
|
24 |
-
<p>Sin embargo, la descarga de archivos APK de fuentes desconocidas también viene con algunos riesgos y desventajas, como:</p>
|
25 |
-
<ul>
|
26 |
-
<li>Puede descargar malware o virus que pueden dañar su dispositivo o robar sus datos</li>
|
27 |
-
<li>Usted puede violar los términos y condiciones del desarrollador de aplicaciones o Google Play Store</li>
|
28 |
-
<li>Es posible que no reciba actualizaciones o soporte del desarrollador de aplicaciones o Google Play Store</li>
|
29 |
-
<li>Es posible que encuentre problemas de compatibilidad o rendimiento con su dispositivo u otras aplicaciones</li>
|
30 |
-
</ul>
|
31 |
-
<p>Por lo tanto, siempre debe tener cuidado y precaución al descargar archivos APK de fuentes desconocidas. Aquí hay algunos consejos para ayudarle a evitar cualquier problema:</p>
|
32 |
-
<p></p>
|
33 |
-
<ul>
|
34 |
-
<li> Solo descargar archivos APK de sitios web de confianza y de buena reputación que tienen críticas y valoraciones positivas</li>
|
35 |
-
<li>Escanear el archivo APK con un antivirus o escáner de malware antes de instalarlo en su dispositivo</li>
|
36 |
-
<li>Compruebe los permisos y el acceso que las solicitudes de archivos APK y asegúrese de que son razonables y necesarias para la aplicación</li>
|
37 |
-
|
38 |
-
</ul>
|
39 |
-
<h2>¿Cómo descargar e instalar Amanda el Aventurero APK 2023? </h2>
|
40 |
-
<h3>Los pasos para descargar el archivo APK de un sitio web de confianza</h3>
|
41 |
-
<p>Si desea descargar Amanda el aventurero APK 2023, puede seguir estos sencillos pasos:</p>
|
42 |
-
<ol>
|
43 |
-
<li>Ir a un sitio web de confianza que ofrece Amanda el Aventurero APK 2023, tales como [APKPure] o [APKMirror]</li>
|
44 |
-
<li>Busca a Amanda la aventurera en la barra de búsqueda del sitio web o navega por las categorías hasta que lo encuentres</li>
|
45 |
-
<li>Haga clic en el botón de descarga y espere a que el archivo APK se descargue en su dispositivo</li>
|
46 |
-
<li>Busque el archivo APK en el administrador de archivos de su dispositivo o carpeta de descargas y toque en él para abrirlo</li>
|
47 |
-
</ol>
|
48 |
-
<h3>Los pasos para habilitar fuentes desconocidas e instalar el archivo APK en su dispositivo Android</h3>
|
49 |
-
<p>Antes de que pueda instalar Amanda el Aventurero APK 2023 en su dispositivo Android, es necesario habilitar fuentes desconocidas en la configuración de su dispositivo. Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play Store. Para hacer esto, siga estos pasos:</p>
|
50 |
-
<ol>
|
51 |
-
<li>Ir a la configuración de su dispositivo y buscar opciones de seguridad o privacidad</li>
|
52 |
-
<li>Encontrar la opción que dice fuentes desconocidas o instalar aplicaciones desconocidas y alternar en</li>
|
53 |
-
<li> Aparecerá un mensaje de advertencia pidiéndole que confirme su elección. Pulse en OK o Permitir que proceda</li>
|
54 |
-
<li> Ahora puede volver al archivo APK que ha descargado y toque en él de nuevo para iniciar el proceso de instalación</li>
|
55 |
-
<li>Siga las instrucciones en la pantalla y espere a que la instalación termine</li>
|
56 |
-
<li> Ahora puede iniciar Amanda el aventurero desde el cajón de la aplicación o la pantalla de inicio y disfrutar del juego</li>
|
57 |
-
</ol>
|
58 |
-
<h2>¿Cómo se juega Amanda el aventurero en su dispositivo Android? </h2>
|
59 |
-
<h3>Los controles básicos y la interfaz del juego</h3>
|
60 |
-
|
61 |
-
<ul>
|
62 |
-
<li>Para ver una cinta, toque en ella en el menú del ático y luego toque en jugar. Puede pausar, rebobinar, avanzar rápidamente o detener la cinta usando los botones de la pantalla del televisor. </li>
|
63 |
-
<li>Para interactuar con objetos en las cintas, toque en ellos cuando brillan. También puede arrastrarlos o combinarlos con otros objetos. </li>
|
64 |
-
<li>Para acceder a su inventario, toque en el icono de la mochila en la parte inferior de la pantalla. Puede ver qué artículos ha recogido y utilizarlos cuando sea necesario. </li>
|
65 |
-
<li>Para acceder a su diario, toque en el icono del cuaderno en la parte inferior de la pantalla. Puedes ver qué pistas has encontrado y qué puzzles necesitas resolver. </li>
|
66 |
-
<li>Para acceder a la configuración, toque en el icono de engranaje en la esquina superior derecha de la pantalla. Puede ajustar su sonido, gráficos, idioma y otras opciones. </li>
|
67 |
-
</ul>
|
68 |
-
<h3>Los consejos y trucos para sobrevivir y resolver los puzzles en el juego</h3>
|
69 |
-
<p>Amanda el aventurero es un juego desafiante que pondrá a prueba tu ingenio y nervios. Aquí hay algunos consejos y trucos para ayudarle a sobrevivir y resolver los puzzles en el juego:</p>
|
70 |
-
<ul>
|
71 |
-
<li>Presta atención a todo lo que Amanda y Wooly dicen y hacen. Pueden darte pistas sobre qué hacer a continuación. </li>
|
72 |
-
<li>Explora cada escena a fondo y busca objetos ocultos o detalles que puedan ser útiles. </li>
|
73 |
-
<li>Usa tu inventario sabiamente y prueba diferentes combinaciones de objetos para ver si funcionan juntos. </li>
|
74 |
-
<li>Usa tu diario con frecuencia y revisa las pistas y rompecabezas que has encontrado. Podrías notar algo nuevo o recordar algo importante. </li>
|
75 |
-
<li>No tengas miedo de experimentar y probar cosas diferentes. Nunca sabes lo que puede pasar o lo que puedes descubrir. </li>
|
76 |
-
<li>Tenga cuidado y precaución cuando se trata de Amanda y Wooly. Pueden parecer amigables e inofensivos, pero también pueden ser peligrosos e impredecibles. </li>
|
77 |
-
</ul>
|
78 |
-
<h2>Conclusión</h2>
|
79 |
-
|
80 |
-
<p>¿Estás listo para unirte a Amanda la aventurera en su viaje? Descarga el juego ahora y ver por ti mismo! </p>
|
81 |
-
<h2>Preguntas frecuentes</h2>
|
82 |
-
<h3>¿Cuáles son los requisitos del sistema para Amanda the Adventurer? </h3>
|
83 |
-
<p>Amanda the Adventurer es un juego relativamente ligero que no requiere muchos recursos para funcionar. Sin embargo, todavía necesita tener un dispositivo Android compatible que cumpla con los siguientes requisitos mínimos del sistema:</p>
|
84 |
-
<ul>
|
85 |
-
<li>Versión para Android: 4.4 o superior</li>
|
86 |
-
<li>RAM: 1 GB o más</li>
|
87 |
-
<li>Almacenamiento: 500 MB o más</li>
|
88 |
-
<li>Conexión a Internet: No se requiere (pero se recomienda para actualizaciones y soporte)</li>
|
89 |
-
</ul>
|
90 |
-
<h3>¿Amanda la aventurera es libre de jugar? </h3>
|
91 |
-
<p>Amanda el Aventurero no es un juego gratis. Cuesta $4.99 en Steam y $3.99 en Google Play Store. Sin embargo, puede descargar su archivo APK de forma gratuita desde algunos sitios web que lo ofrecen. Sin embargo, usted debe ser consciente de los riesgos y desventajas de descargar archivos APK de fuentes desconocidas, como hemos explicado anteriormente. </p>
|
92 |
-
<h3>¿Cuánto tiempo es Amanda la aventurera? </h3>
|
93 |
-
<p>Amanda el aventurero es un juego corto que se puede completar en aproximadamente una hora o menos. Sin embargo, el juego tiene múltiples finales que dependen de tus elecciones y acciones a lo largo del juego. Por lo tanto, es posible que desee volver a jugar el juego varias veces para ver todos los posibles resultados y secretos. </p>
|
94 |
-
<h3>¿Amanda la aventurera tiene múltiples finales? </h3>
|
95 |
-
<p>Sí, Amanda la aventurera tiene múltiples finales que varían en función de sus opciones y acciones a lo largo del juego. Algunos finales son buenos, algunos son malos, y algunos son francamente horripilantes. Puedes intentar lograr todos los finales tomando diferentes decisiones y explorando diferentes caminos en el juego. </p>
|
96 |
-
<h3>¿Amanda la aventurera se basa en una historia real? </h3> 64aa2da5cf<br />
|
97 |
-
<br />
|
98 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/formatters/html.py
DELETED
@@ -1,991 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
pygments.formatters.html
|
3 |
-
~~~~~~~~~~~~~~~~~~~~~~~~
|
4 |
-
|
5 |
-
Formatter for HTML output.
|
6 |
-
|
7 |
-
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
|
8 |
-
:license: BSD, see LICENSE for details.
|
9 |
-
"""
|
10 |
-
|
11 |
-
import functools
|
12 |
-
import os
|
13 |
-
import sys
|
14 |
-
import os.path
|
15 |
-
from io import StringIO
|
16 |
-
|
17 |
-
from pip._vendor.pygments.formatter import Formatter
|
18 |
-
from pip._vendor.pygments.token import Token, Text, STANDARD_TYPES
|
19 |
-
from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt
|
20 |
-
|
21 |
-
try:
|
22 |
-
import ctags
|
23 |
-
except ImportError:
|
24 |
-
ctags = None
|
25 |
-
|
26 |
-
__all__ = ['HtmlFormatter']
|
27 |
-
|
28 |
-
|
29 |
-
_escape_html_table = {
|
30 |
-
ord('&'): '&',
|
31 |
-
ord('<'): '<',
|
32 |
-
ord('>'): '>',
|
33 |
-
ord('"'): '"',
|
34 |
-
ord("'"): ''',
|
35 |
-
}
|
36 |
-
|
37 |
-
|
38 |
-
def escape_html(text, table=_escape_html_table):
|
39 |
-
"""Escape &, <, > as well as single and double quotes for HTML."""
|
40 |
-
return text.translate(table)
|
41 |
-
|
42 |
-
|
43 |
-
def webify(color):
|
44 |
-
if color.startswith('calc') or color.startswith('var'):
|
45 |
-
return color
|
46 |
-
else:
|
47 |
-
return '#' + color
|
48 |
-
|
49 |
-
|
50 |
-
def _get_ttype_class(ttype):
|
51 |
-
fname = STANDARD_TYPES.get(ttype)
|
52 |
-
if fname:
|
53 |
-
return fname
|
54 |
-
aname = ''
|
55 |
-
while fname is None:
|
56 |
-
aname = '-' + ttype[-1] + aname
|
57 |
-
ttype = ttype.parent
|
58 |
-
fname = STANDARD_TYPES.get(ttype)
|
59 |
-
return fname + aname
|
60 |
-
|
61 |
-
|
62 |
-
CSSFILE_TEMPLATE = '''\
|
63 |
-
/*
|
64 |
-
generated by Pygments <https://pygments.org/>
|
65 |
-
Copyright 2006-2022 by the Pygments team.
|
66 |
-
Licensed under the BSD license, see LICENSE for details.
|
67 |
-
*/
|
68 |
-
%(styledefs)s
|
69 |
-
'''
|
70 |
-
|
71 |
-
DOC_HEADER = '''\
|
72 |
-
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
|
73 |
-
"http://www.w3.org/TR/html4/strict.dtd">
|
74 |
-
<!--
|
75 |
-
generated by Pygments <https://pygments.org/>
|
76 |
-
Copyright 2006-2022 by the Pygments team.
|
77 |
-
Licensed under the BSD license, see LICENSE for details.
|
78 |
-
-->
|
79 |
-
<html>
|
80 |
-
<head>
|
81 |
-
<title>%(title)s</title>
|
82 |
-
<meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
|
83 |
-
<style type="text/css">
|
84 |
-
''' + CSSFILE_TEMPLATE + '''
|
85 |
-
</style>
|
86 |
-
</head>
|
87 |
-
<body>
|
88 |
-
<h2>%(title)s</h2>
|
89 |
-
|
90 |
-
'''
|
91 |
-
|
92 |
-
DOC_HEADER_EXTERNALCSS = '''\
|
93 |
-
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
|
94 |
-
"http://www.w3.org/TR/html4/strict.dtd">
|
95 |
-
|
96 |
-
<html>
|
97 |
-
<head>
|
98 |
-
<title>%(title)s</title>
|
99 |
-
<meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
|
100 |
-
<link rel="stylesheet" href="%(cssfile)s" type="text/css">
|
101 |
-
</head>
|
102 |
-
<body>
|
103 |
-
<h2>%(title)s</h2>
|
104 |
-
|
105 |
-
'''
|
106 |
-
|
107 |
-
DOC_FOOTER = '''\
|
108 |
-
</body>
|
109 |
-
</html>
|
110 |
-
'''
|
111 |
-
|
112 |
-
|
113 |
-
class HtmlFormatter(Formatter):
|
114 |
-
r"""
|
115 |
-
Format tokens as HTML 4 ``<span>`` tags within a ``<pre>`` tag, wrapped
|
116 |
-
in a ``<div>`` tag. The ``<div>``'s CSS class can be set by the `cssclass`
|
117 |
-
option.
|
118 |
-
|
119 |
-
If the `linenos` option is set to ``"table"``, the ``<pre>`` is
|
120 |
-
additionally wrapped inside a ``<table>`` which has one row and two
|
121 |
-
cells: one containing the line numbers and one containing the code.
|
122 |
-
Example:
|
123 |
-
|
124 |
-
.. sourcecode:: html
|
125 |
-
|
126 |
-
<div class="highlight" >
|
127 |
-
<table><tr>
|
128 |
-
<td class="linenos" title="click to toggle"
|
129 |
-
onclick="with (this.firstChild.style)
|
130 |
-
{ display = (display == '') ? 'none' : '' }">
|
131 |
-
<pre>1
|
132 |
-
2</pre>
|
133 |
-
</td>
|
134 |
-
<td class="code">
|
135 |
-
<pre><span class="Ke">def </span><span class="NaFu">foo</span>(bar):
|
136 |
-
<span class="Ke">pass</span>
|
137 |
-
</pre>
|
138 |
-
</td>
|
139 |
-
</tr></table></div>
|
140 |
-
|
141 |
-
(whitespace added to improve clarity).
|
142 |
-
|
143 |
-
Wrapping can be disabled using the `nowrap` option.
|
144 |
-
|
145 |
-
A list of lines can be specified using the `hl_lines` option to make these
|
146 |
-
lines highlighted (as of Pygments 0.11).
|
147 |
-
|
148 |
-
With the `full` option, a complete HTML 4 document is output, including
|
149 |
-
the style definitions inside a ``<style>`` tag, or in a separate file if
|
150 |
-
the `cssfile` option is given.
|
151 |
-
|
152 |
-
When `tagsfile` is set to the path of a ctags index file, it is used to
|
153 |
-
generate hyperlinks from names to their definition. You must enable
|
154 |
-
`lineanchors` and run ctags with the `-n` option for this to work. The
|
155 |
-
`python-ctags` module from PyPI must be installed to use this feature;
|
156 |
-
otherwise a `RuntimeError` will be raised.
|
157 |
-
|
158 |
-
The `get_style_defs(arg='')` method of a `HtmlFormatter` returns a string
|
159 |
-
containing CSS rules for the CSS classes used by the formatter. The
|
160 |
-
argument `arg` can be used to specify additional CSS selectors that
|
161 |
-
are prepended to the classes. A call `fmter.get_style_defs('td .code')`
|
162 |
-
would result in the following CSS classes:
|
163 |
-
|
164 |
-
.. sourcecode:: css
|
165 |
-
|
166 |
-
td .code .kw { font-weight: bold; color: #00FF00 }
|
167 |
-
td .code .cm { color: #999999 }
|
168 |
-
...
|
169 |
-
|
170 |
-
If you have Pygments 0.6 or higher, you can also pass a list or tuple to the
|
171 |
-
`get_style_defs()` method to request multiple prefixes for the tokens:
|
172 |
-
|
173 |
-
.. sourcecode:: python
|
174 |
-
|
175 |
-
formatter.get_style_defs(['div.syntax pre', 'pre.syntax'])
|
176 |
-
|
177 |
-
The output would then look like this:
|
178 |
-
|
179 |
-
.. sourcecode:: css
|
180 |
-
|
181 |
-
div.syntax pre .kw,
|
182 |
-
pre.syntax .kw { font-weight: bold; color: #00FF00 }
|
183 |
-
div.syntax pre .cm,
|
184 |
-
pre.syntax .cm { color: #999999 }
|
185 |
-
...
|
186 |
-
|
187 |
-
Additional options accepted:
|
188 |
-
|
189 |
-
`nowrap`
|
190 |
-
If set to ``True``, don't wrap the tokens at all, not even inside a ``<pre>``
|
191 |
-
tag. This disables most other options (default: ``False``).
|
192 |
-
|
193 |
-
`full`
|
194 |
-
Tells the formatter to output a "full" document, i.e. a complete
|
195 |
-
self-contained document (default: ``False``).
|
196 |
-
|
197 |
-
`title`
|
198 |
-
If `full` is true, the title that should be used to caption the
|
199 |
-
document (default: ``''``).
|
200 |
-
|
201 |
-
`style`
|
202 |
-
The style to use, can be a string or a Style subclass (default:
|
203 |
-
``'default'``). This option has no effect if the `cssfile`
|
204 |
-
and `noclobber_cssfile` option are given and the file specified in
|
205 |
-
`cssfile` exists.
|
206 |
-
|
207 |
-
`noclasses`
|
208 |
-
If set to true, token ``<span>`` tags (as well as line number elements)
|
209 |
-
will not use CSS classes, but inline styles. This is not recommended
|
210 |
-
for larger pieces of code since it increases output size by quite a bit
|
211 |
-
(default: ``False``).
|
212 |
-
|
213 |
-
`classprefix`
|
214 |
-
Since the token types use relatively short class names, they may clash
|
215 |
-
with some of your own class names. In this case you can use the
|
216 |
-
`classprefix` option to give a string to prepend to all Pygments-generated
|
217 |
-
CSS class names for token types.
|
218 |
-
Note that this option also affects the output of `get_style_defs()`.
|
219 |
-
|
220 |
-
`cssclass`
|
221 |
-
CSS class for the wrapping ``<div>`` tag (default: ``'highlight'``).
|
222 |
-
If you set this option, the default selector for `get_style_defs()`
|
223 |
-
will be this class.
|
224 |
-
|
225 |
-
.. versionadded:: 0.9
|
226 |
-
If you select the ``'table'`` line numbers, the wrapping table will
|
227 |
-
have a CSS class of this string plus ``'table'``, the default is
|
228 |
-
accordingly ``'highlighttable'``.
|
229 |
-
|
230 |
-
`cssstyles`
|
231 |
-
Inline CSS styles for the wrapping ``<div>`` tag (default: ``''``).
|
232 |
-
|
233 |
-
`prestyles`
|
234 |
-
Inline CSS styles for the ``<pre>`` tag (default: ``''``).
|
235 |
-
|
236 |
-
.. versionadded:: 0.11
|
237 |
-
|
238 |
-
`cssfile`
|
239 |
-
If the `full` option is true and this option is given, it must be the
|
240 |
-
name of an external file. If the filename does not include an absolute
|
241 |
-
path, the file's path will be assumed to be relative to the main output
|
242 |
-
file's path, if the latter can be found. The stylesheet is then written
|
243 |
-
to this file instead of the HTML file.
|
244 |
-
|
245 |
-
.. versionadded:: 0.6
|
246 |
-
|
247 |
-
`noclobber_cssfile`
|
248 |
-
If `cssfile` is given and the specified file exists, the css file will
|
249 |
-
not be overwritten. This allows the use of the `full` option in
|
250 |
-
combination with a user specified css file. Default is ``False``.
|
251 |
-
|
252 |
-
.. versionadded:: 1.1
|
253 |
-
|
254 |
-
`linenos`
|
255 |
-
If set to ``'table'``, output line numbers as a table with two cells,
|
256 |
-
one containing the line numbers, the other the whole code. This is
|
257 |
-
copy-and-paste-friendly, but may cause alignment problems with some
|
258 |
-
browsers or fonts. If set to ``'inline'``, the line numbers will be
|
259 |
-
integrated in the ``<pre>`` tag that contains the code (that setting
|
260 |
-
is *new in Pygments 0.8*).
|
261 |
-
|
262 |
-
For compatibility with Pygments 0.7 and earlier, every true value
|
263 |
-
except ``'inline'`` means the same as ``'table'`` (in particular, that
|
264 |
-
means also ``True``).
|
265 |
-
|
266 |
-
The default value is ``False``, which means no line numbers at all.
|
267 |
-
|
268 |
-
**Note:** with the default ("table") line number mechanism, the line
|
269 |
-
numbers and code can have different line heights in Internet Explorer
|
270 |
-
unless you give the enclosing ``<pre>`` tags an explicit ``line-height``
|
271 |
-
CSS property (you get the default line spacing with ``line-height:
|
272 |
-
125%``).
|
273 |
-
|
274 |
-
`hl_lines`
|
275 |
-
Specify a list of lines to be highlighted. The line numbers are always
|
276 |
-
relative to the input (i.e. the first line is line 1) and are
|
277 |
-
independent of `linenostart`.
|
278 |
-
|
279 |
-
.. versionadded:: 0.11
|
280 |
-
|
281 |
-
`linenostart`
|
282 |
-
The line number for the first line (default: ``1``).
|
283 |
-
|
284 |
-
`linenostep`
|
285 |
-
If set to a number n > 1, only every nth line number is printed.
|
286 |
-
|
287 |
-
`linenospecial`
|
288 |
-
If set to a number n > 0, every nth line number is given the CSS
|
289 |
-
class ``"special"`` (default: ``0``).
|
290 |
-
|
291 |
-
`nobackground`
|
292 |
-
If set to ``True``, the formatter won't output the background color
|
293 |
-
for the wrapping element (this automatically defaults to ``False``
|
294 |
-
when there is no wrapping element [eg: no argument for the
|
295 |
-
`get_syntax_defs` method given]) (default: ``False``).
|
296 |
-
|
297 |
-
.. versionadded:: 0.6
|
298 |
-
|
299 |
-
`lineseparator`
|
300 |
-
This string is output between lines of code. It defaults to ``"\n"``,
|
301 |
-
which is enough to break a line inside ``<pre>`` tags, but you can
|
302 |
-
e.g. set it to ``"<br>"`` to get HTML line breaks.
|
303 |
-
|
304 |
-
.. versionadded:: 0.7
|
305 |
-
|
306 |
-
`lineanchors`
|
307 |
-
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
|
308 |
-
output line in an anchor tag with an ``id`` (and `name`) of ``foo-linenumber``.
|
309 |
-
This allows easy linking to certain lines.
|
310 |
-
|
311 |
-
.. versionadded:: 0.9
|
312 |
-
|
313 |
-
`linespans`
|
314 |
-
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
|
315 |
-
output line in a span tag with an ``id`` of ``foo-linenumber``.
|
316 |
-
This allows easy access to lines via javascript.
|
317 |
-
|
318 |
-
.. versionadded:: 1.6
|
319 |
-
|
320 |
-
`anchorlinenos`
|
321 |
-
If set to `True`, will wrap line numbers in <a> tags. Used in
|
322 |
-
combination with `linenos` and `lineanchors`.
|
323 |
-
|
324 |
-
`tagsfile`
|
325 |
-
If set to the path of a ctags file, wrap names in anchor tags that
|
326 |
-
link to their definitions. `lineanchors` should be used, and the
|
327 |
-
tags file should specify line numbers (see the `-n` option to ctags).
|
328 |
-
|
329 |
-
.. versionadded:: 1.6
|
330 |
-
|
331 |
-
`tagurlformat`
|
332 |
-
A string formatting pattern used to generate links to ctags definitions.
|
333 |
-
Available variables are `%(path)s`, `%(fname)s` and `%(fext)s`.
|
334 |
-
Defaults to an empty string, resulting in just `#prefix-number` links.
|
335 |
-
|
336 |
-
.. versionadded:: 1.6
|
337 |
-
|
338 |
-
`filename`
|
339 |
-
A string used to generate a filename when rendering ``<pre>`` blocks,
|
340 |
-
for example if displaying source code. If `linenos` is set to
|
341 |
-
``'table'`` then the filename will be rendered in an initial row
|
342 |
-
containing a single `<th>` which spans both columns.
|
343 |
-
|
344 |
-
.. versionadded:: 2.1
|
345 |
-
|
346 |
-
`wrapcode`
|
347 |
-
Wrap the code inside ``<pre>`` blocks using ``<code>``, as recommended
|
348 |
-
by the HTML5 specification.
|
349 |
-
|
350 |
-
.. versionadded:: 2.4
|
351 |
-
|
352 |
-
`debug_token_types`
|
353 |
-
Add ``title`` attributes to all token ``<span>`` tags that show the
|
354 |
-
name of the token.
|
355 |
-
|
356 |
-
.. versionadded:: 2.10
|
357 |
-
|
358 |
-
|
359 |
-
**Subclassing the HTML formatter**
|
360 |
-
|
361 |
-
.. versionadded:: 0.7
|
362 |
-
|
363 |
-
The HTML formatter is now built in a way that allows easy subclassing, thus
|
364 |
-
customizing the output HTML code. The `format()` method calls
|
365 |
-
`self._format_lines()` which returns a generator that yields tuples of ``(1,
|
366 |
-
line)``, where the ``1`` indicates that the ``line`` is a line of the
|
367 |
-
formatted source code.
|
368 |
-
|
369 |
-
If the `nowrap` option is set, the generator is the iterated over and the
|
370 |
-
resulting HTML is output.
|
371 |
-
|
372 |
-
Otherwise, `format()` calls `self.wrap()`, which wraps the generator with
|
373 |
-
other generators. These may add some HTML code to the one generated by
|
374 |
-
`_format_lines()`, either by modifying the lines generated by the latter,
|
375 |
-
then yielding them again with ``(1, line)``, and/or by yielding other HTML
|
376 |
-
code before or after the lines, with ``(0, html)``. The distinction between
|
377 |
-
source lines and other code makes it possible to wrap the generator multiple
|
378 |
-
times.
|
379 |
-
|
380 |
-
The default `wrap()` implementation adds a ``<div>`` and a ``<pre>`` tag.
|
381 |
-
|
382 |
-
A custom `HtmlFormatter` subclass could look like this:
|
383 |
-
|
384 |
-
.. sourcecode:: python
|
385 |
-
|
386 |
-
class CodeHtmlFormatter(HtmlFormatter):
|
387 |
-
|
388 |
-
def wrap(self, source, *, include_div):
|
389 |
-
return self._wrap_code(source)
|
390 |
-
|
391 |
-
def _wrap_code(self, source):
|
392 |
-
yield 0, '<code>'
|
393 |
-
for i, t in source:
|
394 |
-
if i == 1:
|
395 |
-
# it's a line of formatted code
|
396 |
-
t += '<br>'
|
397 |
-
yield i, t
|
398 |
-
yield 0, '</code>'
|
399 |
-
|
400 |
-
This results in wrapping the formatted lines with a ``<code>`` tag, where the
|
401 |
-
source lines are broken using ``<br>`` tags.
|
402 |
-
|
403 |
-
After calling `wrap()`, the `format()` method also adds the "line numbers"
|
404 |
-
and/or "full document" wrappers if the respective options are set. Then, all
|
405 |
-
HTML yielded by the wrapped generator is output.
|
406 |
-
"""
|
407 |
-
|
408 |
-
name = 'HTML'
|
409 |
-
aliases = ['html']
|
410 |
-
filenames = ['*.html', '*.htm']
|
411 |
-
|
412 |
-
def __init__(self, **options):
|
413 |
-
Formatter.__init__(self, **options)
|
414 |
-
self.title = self._decodeifneeded(self.title)
|
415 |
-
self.nowrap = get_bool_opt(options, 'nowrap', False)
|
416 |
-
self.noclasses = get_bool_opt(options, 'noclasses', False)
|
417 |
-
self.classprefix = options.get('classprefix', '')
|
418 |
-
self.cssclass = self._decodeifneeded(options.get('cssclass', 'highlight'))
|
419 |
-
self.cssstyles = self._decodeifneeded(options.get('cssstyles', ''))
|
420 |
-
self.prestyles = self._decodeifneeded(options.get('prestyles', ''))
|
421 |
-
self.cssfile = self._decodeifneeded(options.get('cssfile', ''))
|
422 |
-
self.noclobber_cssfile = get_bool_opt(options, 'noclobber_cssfile', False)
|
423 |
-
self.tagsfile = self._decodeifneeded(options.get('tagsfile', ''))
|
424 |
-
self.tagurlformat = self._decodeifneeded(options.get('tagurlformat', ''))
|
425 |
-
self.filename = self._decodeifneeded(options.get('filename', ''))
|
426 |
-
self.wrapcode = get_bool_opt(options, 'wrapcode', False)
|
427 |
-
self.span_element_openers = {}
|
428 |
-
self.debug_token_types = get_bool_opt(options, 'debug_token_types', False)
|
429 |
-
|
430 |
-
if self.tagsfile:
|
431 |
-
if not ctags:
|
432 |
-
raise RuntimeError('The "ctags" package must to be installed '
|
433 |
-
'to be able to use the "tagsfile" feature.')
|
434 |
-
self._ctags = ctags.CTags(self.tagsfile)
|
435 |
-
|
436 |
-
linenos = options.get('linenos', False)
|
437 |
-
if linenos == 'inline':
|
438 |
-
self.linenos = 2
|
439 |
-
elif linenos:
|
440 |
-
# compatibility with <= 0.7
|
441 |
-
self.linenos = 1
|
442 |
-
else:
|
443 |
-
self.linenos = 0
|
444 |
-
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
|
445 |
-
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
|
446 |
-
self.linenospecial = abs(get_int_opt(options, 'linenospecial', 0))
|
447 |
-
self.nobackground = get_bool_opt(options, 'nobackground', False)
|
448 |
-
self.lineseparator = options.get('lineseparator', '\n')
|
449 |
-
self.lineanchors = options.get('lineanchors', '')
|
450 |
-
self.linespans = options.get('linespans', '')
|
451 |
-
self.anchorlinenos = get_bool_opt(options, 'anchorlinenos', False)
|
452 |
-
self.hl_lines = set()
|
453 |
-
for lineno in get_list_opt(options, 'hl_lines', []):
|
454 |
-
try:
|
455 |
-
self.hl_lines.add(int(lineno))
|
456 |
-
except ValueError:
|
457 |
-
pass
|
458 |
-
|
459 |
-
self._create_stylesheet()
|
460 |
-
|
461 |
-
def _get_css_class(self, ttype):
|
462 |
-
"""Return the css class of this token type prefixed with
|
463 |
-
the classprefix option."""
|
464 |
-
ttypeclass = _get_ttype_class(ttype)
|
465 |
-
if ttypeclass:
|
466 |
-
return self.classprefix + ttypeclass
|
467 |
-
return ''
|
468 |
-
|
469 |
-
def _get_css_classes(self, ttype):
|
470 |
-
"""Return the CSS classes of this token type prefixed with the classprefix option."""
|
471 |
-
cls = self._get_css_class(ttype)
|
472 |
-
while ttype not in STANDARD_TYPES:
|
473 |
-
ttype = ttype.parent
|
474 |
-
cls = self._get_css_class(ttype) + ' ' + cls
|
475 |
-
return cls or ''
|
476 |
-
|
477 |
-
def _get_css_inline_styles(self, ttype):
|
478 |
-
"""Return the inline CSS styles for this token type."""
|
479 |
-
cclass = self.ttype2class.get(ttype)
|
480 |
-
while cclass is None:
|
481 |
-
ttype = ttype.parent
|
482 |
-
cclass = self.ttype2class.get(ttype)
|
483 |
-
return cclass or ''
|
484 |
-
|
485 |
-
def _create_stylesheet(self):
|
486 |
-
t2c = self.ttype2class = {Token: ''}
|
487 |
-
c2s = self.class2style = {}
|
488 |
-
for ttype, ndef in self.style:
|
489 |
-
name = self._get_css_class(ttype)
|
490 |
-
style = ''
|
491 |
-
if ndef['color']:
|
492 |
-
style += 'color: %s; ' % webify(ndef['color'])
|
493 |
-
if ndef['bold']:
|
494 |
-
style += 'font-weight: bold; '
|
495 |
-
if ndef['italic']:
|
496 |
-
style += 'font-style: italic; '
|
497 |
-
if ndef['underline']:
|
498 |
-
style += 'text-decoration: underline; '
|
499 |
-
if ndef['bgcolor']:
|
500 |
-
style += 'background-color: %s; ' % webify(ndef['bgcolor'])
|
501 |
-
if ndef['border']:
|
502 |
-
style += 'border: 1px solid %s; ' % webify(ndef['border'])
|
503 |
-
if style:
|
504 |
-
t2c[ttype] = name
|
505 |
-
# save len(ttype) to enable ordering the styles by
|
506 |
-
# hierarchy (necessary for CSS cascading rules!)
|
507 |
-
c2s[name] = (style[:-2], ttype, len(ttype))
|
508 |
-
|
509 |
-
def get_style_defs(self, arg=None):
|
510 |
-
"""
|
511 |
-
Return CSS style definitions for the classes produced by the current
|
512 |
-
highlighting style. ``arg`` can be a string or list of selectors to
|
513 |
-
insert before the token type classes.
|
514 |
-
"""
|
515 |
-
style_lines = []
|
516 |
-
|
517 |
-
style_lines.extend(self.get_linenos_style_defs())
|
518 |
-
style_lines.extend(self.get_background_style_defs(arg))
|
519 |
-
style_lines.extend(self.get_token_style_defs(arg))
|
520 |
-
|
521 |
-
return '\n'.join(style_lines)
|
522 |
-
|
523 |
-
def get_token_style_defs(self, arg=None):
|
524 |
-
prefix = self.get_css_prefix(arg)
|
525 |
-
|
526 |
-
styles = [
|
527 |
-
(level, ttype, cls, style)
|
528 |
-
for cls, (style, ttype, level) in self.class2style.items()
|
529 |
-
if cls and style
|
530 |
-
]
|
531 |
-
styles.sort()
|
532 |
-
|
533 |
-
lines = [
|
534 |
-
'%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])
|
535 |
-
for (level, ttype, cls, style) in styles
|
536 |
-
]
|
537 |
-
|
538 |
-
return lines
|
539 |
-
|
540 |
-
def get_background_style_defs(self, arg=None):
|
541 |
-
prefix = self.get_css_prefix(arg)
|
542 |
-
bg_color = self.style.background_color
|
543 |
-
hl_color = self.style.highlight_color
|
544 |
-
|
545 |
-
lines = []
|
546 |
-
|
547 |
-
if arg and not self.nobackground and bg_color is not None:
|
548 |
-
text_style = ''
|
549 |
-
if Text in self.ttype2class:
|
550 |
-
text_style = ' ' + self.class2style[self.ttype2class[Text]][0]
|
551 |
-
lines.insert(
|
552 |
-
0, '%s{ background: %s;%s }' % (
|
553 |
-
prefix(''), bg_color, text_style
|
554 |
-
)
|
555 |
-
)
|
556 |
-
if hl_color is not None:
|
557 |
-
lines.insert(
|
558 |
-
0, '%s { background-color: %s }' % (prefix('hll'), hl_color)
|
559 |
-
)
|
560 |
-
|
561 |
-
return lines
|
562 |
-
|
563 |
-
def get_linenos_style_defs(self):
|
564 |
-
lines = [
|
565 |
-
'pre { %s }' % self._pre_style,
|
566 |
-
'td.linenos .normal { %s }' % self._linenos_style,
|
567 |
-
'span.linenos { %s }' % self._linenos_style,
|
568 |
-
'td.linenos .special { %s }' % self._linenos_special_style,
|
569 |
-
'span.linenos.special { %s }' % self._linenos_special_style,
|
570 |
-
]
|
571 |
-
|
572 |
-
return lines
|
573 |
-
|
574 |
-
def get_css_prefix(self, arg):
|
575 |
-
if arg is None:
|
576 |
-
arg = ('cssclass' in self.options and '.'+self.cssclass or '')
|
577 |
-
if isinstance(arg, str):
|
578 |
-
args = [arg]
|
579 |
-
else:
|
580 |
-
args = list(arg)
|
581 |
-
|
582 |
-
def prefix(cls):
|
583 |
-
if cls:
|
584 |
-
cls = '.' + cls
|
585 |
-
tmp = []
|
586 |
-
for arg in args:
|
587 |
-
tmp.append((arg and arg + ' ' or '') + cls)
|
588 |
-
return ', '.join(tmp)
|
589 |
-
|
590 |
-
return prefix
|
591 |
-
|
592 |
-
@property
|
593 |
-
def _pre_style(self):
|
594 |
-
return 'line-height: 125%;'
|
595 |
-
|
596 |
-
@property
|
597 |
-
def _linenos_style(self):
|
598 |
-
return 'color: %s; background-color: %s; padding-left: 5px; padding-right: 5px;' % (
|
599 |
-
self.style.line_number_color,
|
600 |
-
self.style.line_number_background_color
|
601 |
-
)
|
602 |
-
|
603 |
-
@property
|
604 |
-
def _linenos_special_style(self):
|
605 |
-
return 'color: %s; background-color: %s; padding-left: 5px; padding-right: 5px;' % (
|
606 |
-
self.style.line_number_special_color,
|
607 |
-
self.style.line_number_special_background_color
|
608 |
-
)
|
609 |
-
|
610 |
-
def _decodeifneeded(self, value):
|
611 |
-
if isinstance(value, bytes):
|
612 |
-
if self.encoding:
|
613 |
-
return value.decode(self.encoding)
|
614 |
-
return value.decode()
|
615 |
-
return value
|
616 |
-
|
617 |
-
def _wrap_full(self, inner, outfile):
|
618 |
-
if self.cssfile:
|
619 |
-
if os.path.isabs(self.cssfile):
|
620 |
-
# it's an absolute filename
|
621 |
-
cssfilename = self.cssfile
|
622 |
-
else:
|
623 |
-
try:
|
624 |
-
filename = outfile.name
|
625 |
-
if not filename or filename[0] == '<':
|
626 |
-
# pseudo files, e.g. name == '<fdopen>'
|
627 |
-
raise AttributeError
|
628 |
-
cssfilename = os.path.join(os.path.dirname(filename),
|
629 |
-
self.cssfile)
|
630 |
-
except AttributeError:
|
631 |
-
print('Note: Cannot determine output file name, '
|
632 |
-
'using current directory as base for the CSS file name',
|
633 |
-
file=sys.stderr)
|
634 |
-
cssfilename = self.cssfile
|
635 |
-
# write CSS file only if noclobber_cssfile isn't given as an option.
|
636 |
-
try:
|
637 |
-
if not os.path.exists(cssfilename) or not self.noclobber_cssfile:
|
638 |
-
with open(cssfilename, "w") as cf:
|
639 |
-
cf.write(CSSFILE_TEMPLATE %
|
640 |
-
{'styledefs': self.get_style_defs('body')})
|
641 |
-
except OSError as err:
|
642 |
-
err.strerror = 'Error writing CSS file: ' + err.strerror
|
643 |
-
raise
|
644 |
-
|
645 |
-
yield 0, (DOC_HEADER_EXTERNALCSS %
|
646 |
-
dict(title=self.title,
|
647 |
-
cssfile=self.cssfile,
|
648 |
-
encoding=self.encoding))
|
649 |
-
else:
|
650 |
-
yield 0, (DOC_HEADER %
|
651 |
-
dict(title=self.title,
|
652 |
-
styledefs=self.get_style_defs('body'),
|
653 |
-
encoding=self.encoding))
|
654 |
-
|
655 |
-
yield from inner
|
656 |
-
yield 0, DOC_FOOTER
|
657 |
-
|
658 |
-
def _wrap_tablelinenos(self, inner):
|
659 |
-
dummyoutfile = StringIO()
|
660 |
-
lncount = 0
|
661 |
-
for t, line in inner:
|
662 |
-
if t:
|
663 |
-
lncount += 1
|
664 |
-
dummyoutfile.write(line)
|
665 |
-
|
666 |
-
fl = self.linenostart
|
667 |
-
mw = len(str(lncount + fl - 1))
|
668 |
-
sp = self.linenospecial
|
669 |
-
st = self.linenostep
|
670 |
-
anchor_name = self.lineanchors or self.linespans
|
671 |
-
aln = self.anchorlinenos
|
672 |
-
nocls = self.noclasses
|
673 |
-
|
674 |
-
lines = []
|
675 |
-
|
676 |
-
for i in range(fl, fl+lncount):
|
677 |
-
print_line = i % st == 0
|
678 |
-
special_line = sp and i % sp == 0
|
679 |
-
|
680 |
-
if print_line:
|
681 |
-
line = '%*d' % (mw, i)
|
682 |
-
if aln:
|
683 |
-
line = '<a href="#%s-%d">%s</a>' % (anchor_name, i, line)
|
684 |
-
else:
|
685 |
-
line = ' ' * mw
|
686 |
-
|
687 |
-
if nocls:
|
688 |
-
if special_line:
|
689 |
-
style = ' style="%s"' % self._linenos_special_style
|
690 |
-
else:
|
691 |
-
style = ' style="%s"' % self._linenos_style
|
692 |
-
else:
|
693 |
-
if special_line:
|
694 |
-
style = ' class="special"'
|
695 |
-
else:
|
696 |
-
style = ' class="normal"'
|
697 |
-
|
698 |
-
if style:
|
699 |
-
line = '<span%s>%s</span>' % (style, line)
|
700 |
-
|
701 |
-
lines.append(line)
|
702 |
-
|
703 |
-
ls = '\n'.join(lines)
|
704 |
-
|
705 |
-
# If a filename was specified, we can't put it into the code table as it
|
706 |
-
# would misalign the line numbers. Hence we emit a separate row for it.
|
707 |
-
filename_tr = ""
|
708 |
-
if self.filename:
|
709 |
-
filename_tr = (
|
710 |
-
'<tr><th colspan="2" class="filename">'
|
711 |
-
'<span class="filename">' + self.filename + '</span>'
|
712 |
-
'</th></tr>')
|
713 |
-
|
714 |
-
# in case you wonder about the seemingly redundant <div> here: since the
|
715 |
-
# content in the other cell also is wrapped in a div, some browsers in
|
716 |
-
# some configurations seem to mess up the formatting...
|
717 |
-
yield 0, (f'<table class="{self.cssclass}table">' + filename_tr +
|
718 |
-
'<tr><td class="linenos"><div class="linenodiv"><pre>' +
|
719 |
-
ls + '</pre></div></td><td class="code">')
|
720 |
-
yield 0, '<div>'
|
721 |
-
yield 0, dummyoutfile.getvalue()
|
722 |
-
yield 0, '</div>'
|
723 |
-
yield 0, '</td></tr></table>'
|
724 |
-
|
725 |
-
|
726 |
-
def _wrap_inlinelinenos(self, inner):
|
727 |
-
# need a list of lines since we need the width of a single number :(
|
728 |
-
inner_lines = list(inner)
|
729 |
-
sp = self.linenospecial
|
730 |
-
st = self.linenostep
|
731 |
-
num = self.linenostart
|
732 |
-
mw = len(str(len(inner_lines) + num - 1))
|
733 |
-
anchor_name = self.lineanchors or self.linespans
|
734 |
-
aln = self.anchorlinenos
|
735 |
-
nocls = self.noclasses
|
736 |
-
|
737 |
-
for _, inner_line in inner_lines:
|
738 |
-
print_line = num % st == 0
|
739 |
-
special_line = sp and num % sp == 0
|
740 |
-
|
741 |
-
if print_line:
|
742 |
-
line = '%*d' % (mw, num)
|
743 |
-
else:
|
744 |
-
line = ' ' * mw
|
745 |
-
|
746 |
-
if nocls:
|
747 |
-
if special_line:
|
748 |
-
style = ' style="%s"' % self._linenos_special_style
|
749 |
-
else:
|
750 |
-
style = ' style="%s"' % self._linenos_style
|
751 |
-
else:
|
752 |
-
if special_line:
|
753 |
-
style = ' class="linenos special"'
|
754 |
-
else:
|
755 |
-
style = ' class="linenos"'
|
756 |
-
|
757 |
-
if style:
|
758 |
-
linenos = '<span%s>%s</span>' % (style, line)
|
759 |
-
else:
|
760 |
-
linenos = line
|
761 |
-
|
762 |
-
if aln:
|
763 |
-
yield 1, ('<a href="#%s-%d">%s</a>' % (anchor_name, num, linenos) +
|
764 |
-
inner_line)
|
765 |
-
else:
|
766 |
-
yield 1, linenos + inner_line
|
767 |
-
num += 1
|
768 |
-
|
769 |
-
def _wrap_lineanchors(self, inner):
|
770 |
-
s = self.lineanchors
|
771 |
-
# subtract 1 since we have to increment i *before* yielding
|
772 |
-
i = self.linenostart - 1
|
773 |
-
for t, line in inner:
|
774 |
-
if t:
|
775 |
-
i += 1
|
776 |
-
href = "" if self.linenos else ' href="#%s-%d"' % (s, i)
|
777 |
-
yield 1, '<a id="%s-%d" name="%s-%d"%s></a>' % (s, i, s, i, href) + line
|
778 |
-
else:
|
779 |
-
yield 0, line
|
780 |
-
|
781 |
-
def _wrap_linespans(self, inner):
|
782 |
-
s = self.linespans
|
783 |
-
i = self.linenostart - 1
|
784 |
-
for t, line in inner:
|
785 |
-
if t:
|
786 |
-
i += 1
|
787 |
-
yield 1, '<span id="%s-%d">%s</span>' % (s, i, line)
|
788 |
-
else:
|
789 |
-
yield 0, line
|
790 |
-
|
791 |
-
def _wrap_div(self, inner):
|
792 |
-
style = []
|
793 |
-
if (self.noclasses and not self.nobackground and
|
794 |
-
self.style.background_color is not None):
|
795 |
-
style.append('background: %s' % (self.style.background_color,))
|
796 |
-
if self.cssstyles:
|
797 |
-
style.append(self.cssstyles)
|
798 |
-
style = '; '.join(style)
|
799 |
-
|
800 |
-
yield 0, ('<div' + (self.cssclass and ' class="%s"' % self.cssclass) +
|
801 |
-
(style and (' style="%s"' % style)) + '>')
|
802 |
-
yield from inner
|
803 |
-
yield 0, '</div>\n'
|
804 |
-
|
805 |
-
def _wrap_pre(self, inner):
|
806 |
-
style = []
|
807 |
-
if self.prestyles:
|
808 |
-
style.append(self.prestyles)
|
809 |
-
if self.noclasses:
|
810 |
-
style.append(self._pre_style)
|
811 |
-
style = '; '.join(style)
|
812 |
-
|
813 |
-
if self.filename and self.linenos != 1:
|
814 |
-
yield 0, ('<span class="filename">' + self.filename + '</span>')
|
815 |
-
|
816 |
-
# the empty span here is to keep leading empty lines from being
|
817 |
-
# ignored by HTML parsers
|
818 |
-
yield 0, ('<pre' + (style and ' style="%s"' % style) + '><span></span>')
|
819 |
-
yield from inner
|
820 |
-
yield 0, '</pre>'
|
821 |
-
|
822 |
-
def _wrap_code(self, inner):
|
823 |
-
yield 0, '<code>'
|
824 |
-
yield from inner
|
825 |
-
yield 0, '</code>'
|
826 |
-
|
827 |
-
@functools.lru_cache(maxsize=100)
|
828 |
-
def _translate_parts(self, value):
|
829 |
-
"""HTML-escape a value and split it by newlines."""
|
830 |
-
return value.translate(_escape_html_table).split('\n')
|
831 |
-
|
832 |
-
def _format_lines(self, tokensource):
|
833 |
-
"""
|
834 |
-
Just format the tokens, without any wrapping tags.
|
835 |
-
Yield individual lines.
|
836 |
-
"""
|
837 |
-
nocls = self.noclasses
|
838 |
-
lsep = self.lineseparator
|
839 |
-
tagsfile = self.tagsfile
|
840 |
-
|
841 |
-
lspan = ''
|
842 |
-
line = []
|
843 |
-
for ttype, value in tokensource:
|
844 |
-
try:
|
845 |
-
cspan = self.span_element_openers[ttype]
|
846 |
-
except KeyError:
|
847 |
-
title = ' title="%s"' % '.'.join(ttype) if self.debug_token_types else ''
|
848 |
-
if nocls:
|
849 |
-
css_style = self._get_css_inline_styles(ttype)
|
850 |
-
if css_style:
|
851 |
-
css_style = self.class2style[css_style][0]
|
852 |
-
cspan = '<span style="%s"%s>' % (css_style, title)
|
853 |
-
else:
|
854 |
-
cspan = ''
|
855 |
-
else:
|
856 |
-
css_class = self._get_css_classes(ttype)
|
857 |
-
if css_class:
|
858 |
-
cspan = '<span class="%s"%s>' % (css_class, title)
|
859 |
-
else:
|
860 |
-
cspan = ''
|
861 |
-
self.span_element_openers[ttype] = cspan
|
862 |
-
|
863 |
-
parts = self._translate_parts(value)
|
864 |
-
|
865 |
-
if tagsfile and ttype in Token.Name:
|
866 |
-
filename, linenumber = self._lookup_ctag(value)
|
867 |
-
if linenumber:
|
868 |
-
base, filename = os.path.split(filename)
|
869 |
-
if base:
|
870 |
-
base += '/'
|
871 |
-
filename, extension = os.path.splitext(filename)
|
872 |
-
url = self.tagurlformat % {'path': base, 'fname': filename,
|
873 |
-
'fext': extension}
|
874 |
-
parts[0] = "<a href=\"%s#%s-%d\">%s" % \
|
875 |
-
(url, self.lineanchors, linenumber, parts[0])
|
876 |
-
parts[-1] = parts[-1] + "</a>"
|
877 |
-
|
878 |
-
# for all but the last line
|
879 |
-
for part in parts[:-1]:
|
880 |
-
if line:
|
881 |
-
# Also check for part being non-empty, so we avoid creating
|
882 |
-
# empty <span> tags
|
883 |
-
if lspan != cspan and part:
|
884 |
-
line.extend(((lspan and '</span>'), cspan, part,
|
885 |
-
(cspan and '</span>'), lsep))
|
886 |
-
else: # both are the same, or the current part was empty
|
887 |
-
line.extend((part, (lspan and '</span>'), lsep))
|
888 |
-
yield 1, ''.join(line)
|
889 |
-
line = []
|
890 |
-
elif part:
|
891 |
-
yield 1, ''.join((cspan, part, (cspan and '</span>'), lsep))
|
892 |
-
else:
|
893 |
-
yield 1, lsep
|
894 |
-
# for the last line
|
895 |
-
if line and parts[-1]:
|
896 |
-
if lspan != cspan:
|
897 |
-
line.extend(((lspan and '</span>'), cspan, parts[-1]))
|
898 |
-
lspan = cspan
|
899 |
-
else:
|
900 |
-
line.append(parts[-1])
|
901 |
-
elif parts[-1]:
|
902 |
-
line = [cspan, parts[-1]]
|
903 |
-
lspan = cspan
|
904 |
-
# else we neither have to open a new span nor set lspan
|
905 |
-
|
906 |
-
if line:
|
907 |
-
line.extend(((lspan and '</span>'), lsep))
|
908 |
-
yield 1, ''.join(line)
|
909 |
-
|
910 |
-
def _lookup_ctag(self, token):
|
911 |
-
entry = ctags.TagEntry()
|
912 |
-
if self._ctags.find(entry, token.encode(), 0):
|
913 |
-
return entry['file'], entry['lineNumber']
|
914 |
-
else:
|
915 |
-
return None, None
|
916 |
-
|
917 |
-
def _highlight_lines(self, tokensource):
|
918 |
-
"""
|
919 |
-
Highlighted the lines specified in the `hl_lines` option by
|
920 |
-
post-processing the token stream coming from `_format_lines`.
|
921 |
-
"""
|
922 |
-
hls = self.hl_lines
|
923 |
-
|
924 |
-
for i, (t, value) in enumerate(tokensource):
|
925 |
-
if t != 1:
|
926 |
-
yield t, value
|
927 |
-
if i + 1 in hls: # i + 1 because Python indexes start at 0
|
928 |
-
if self.noclasses:
|
929 |
-
style = ''
|
930 |
-
if self.style.highlight_color is not None:
|
931 |
-
style = (' style="background-color: %s"' %
|
932 |
-
(self.style.highlight_color,))
|
933 |
-
yield 1, '<span%s>%s</span>' % (style, value)
|
934 |
-
else:
|
935 |
-
yield 1, '<span class="hll">%s</span>' % value
|
936 |
-
else:
|
937 |
-
yield 1, value
|
938 |
-
|
939 |
-
def wrap(self, source):
|
940 |
-
"""
|
941 |
-
Wrap the ``source``, which is a generator yielding
|
942 |
-
individual lines, in custom generators. See docstring
|
943 |
-
for `format`. Can be overridden.
|
944 |
-
"""
|
945 |
-
|
946 |
-
output = source
|
947 |
-
if self.wrapcode:
|
948 |
-
output = self._wrap_code(output)
|
949 |
-
|
950 |
-
output = self._wrap_pre(output)
|
951 |
-
|
952 |
-
return output
|
953 |
-
|
954 |
-
def format_unencoded(self, tokensource, outfile):
|
955 |
-
"""
|
956 |
-
The formatting process uses several nested generators; which of
|
957 |
-
them are used is determined by the user's options.
|
958 |
-
|
959 |
-
Each generator should take at least one argument, ``inner``,
|
960 |
-
and wrap the pieces of text generated by this.
|
961 |
-
|
962 |
-
Always yield 2-tuples: (code, text). If "code" is 1, the text
|
963 |
-
is part of the original tokensource being highlighted, if it's
|
964 |
-
0, the text is some piece of wrapping. This makes it possible to
|
965 |
-
use several different wrappers that process the original source
|
966 |
-
linewise, e.g. line number generators.
|
967 |
-
"""
|
968 |
-
source = self._format_lines(tokensource)
|
969 |
-
|
970 |
-
# As a special case, we wrap line numbers before line highlighting
|
971 |
-
# so the line numbers get wrapped in the highlighting tag.
|
972 |
-
if not self.nowrap and self.linenos == 2:
|
973 |
-
source = self._wrap_inlinelinenos(source)
|
974 |
-
|
975 |
-
if self.hl_lines:
|
976 |
-
source = self._highlight_lines(source)
|
977 |
-
|
978 |
-
if not self.nowrap:
|
979 |
-
if self.lineanchors:
|
980 |
-
source = self._wrap_lineanchors(source)
|
981 |
-
if self.linespans:
|
982 |
-
source = self._wrap_linespans(source)
|
983 |
-
source = self.wrap(source)
|
984 |
-
if self.linenos == 1:
|
985 |
-
source = self._wrap_tablelinenos(source)
|
986 |
-
source = self._wrap_div(source)
|
987 |
-
if self.full:
|
988 |
-
source = self._wrap_full(source, outfile)
|
989 |
-
|
990 |
-
for t, piece in source:
|
991 |
-
outfile.write(piece)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BigDL/bigdl_nano_demo/app.py
DELETED
@@ -1,192 +0,0 @@
|
|
1 |
-
#
|
2 |
-
# Copyright 2016 The BigDL Authors.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
#
|
16 |
-
# Part of the code in this file is adapted from
|
17 |
-
# https://github.com/rnwzd/FSPBT-Image-Translation/blob/master/eval.py and
|
18 |
-
# https://github.com/rnwzd/FSPBT-Image-Translation/blob/master/train.py
|
19 |
-
|
20 |
-
# MIT License
|
21 |
-
|
22 |
-
# Copyright (c) 2022 Lorenzo Breschi
|
23 |
-
|
24 |
-
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
25 |
-
# of this software and associated documentation files (the "Software"), to deal
|
26 |
-
# in the Software without restriction, including without limitation the rights
|
27 |
-
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
28 |
-
# copies of the Software, and to permit persons to whom the Software is
|
29 |
-
# furnished to do so, subject to the following conditions:
|
30 |
-
|
31 |
-
# The above copyright notice and this permission notice shall be included in all
|
32 |
-
# copies or substantial portions of the Software.
|
33 |
-
|
34 |
-
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
35 |
-
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
36 |
-
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
37 |
-
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
38 |
-
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
39 |
-
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
40 |
-
# SOFTWARE.
|
41 |
-
|
42 |
-
import gradio as gr
|
43 |
-
import numpy as np
|
44 |
-
import time
|
45 |
-
from data import PatchDataModule, prepare_data, image2tensor, tensor2image
|
46 |
-
import torch
|
47 |
-
from tqdm import tqdm
|
48 |
-
from bigdl.nano.pytorch import InferenceOptimizer
|
49 |
-
from torch.utils.data import DataLoader
|
50 |
-
from pathlib import Path
|
51 |
-
from torch.utils.data import Dataset
|
52 |
-
import datetime
|
53 |
-
import huggingface_hub
|
54 |
-
|
55 |
-
|
56 |
-
device = 'cpu'
|
57 |
-
dtype = torch.float32
|
58 |
-
MODEL_REPO = 'BigDL/FSPBT'
|
59 |
-
ckpt_path = huggingface_hub.hf_hub_download(
|
60 |
-
MODEL_REPO, 'generator.pt')
|
61 |
-
generator = torch.load(ckpt_path)
|
62 |
-
generator.eval()
|
63 |
-
generator.to(device, dtype)
|
64 |
-
params = {'batch_size': 1,
|
65 |
-
'num_workers': 0}
|
66 |
-
|
67 |
-
|
68 |
-
class ImageDataset(Dataset):
|
69 |
-
def __init__(self, img):
|
70 |
-
self.imgs = [image2tensor(img)]
|
71 |
-
def __getitem__(self, idx: int) -> dict:
|
72 |
-
return self.imgs[idx]
|
73 |
-
|
74 |
-
def __len__(self) -> int:
|
75 |
-
return len(self.imgs)
|
76 |
-
|
77 |
-
|
78 |
-
data_path = Path('data')
|
79 |
-
train_image_dd = prepare_data(data_path)
|
80 |
-
dm = PatchDataModule(train_image_dd, patch_size=2**6,
|
81 |
-
batch_size=2**3, patch_num=2**6)
|
82 |
-
|
83 |
-
# quantize model
|
84 |
-
train_loader = dm.train_dataloader()
|
85 |
-
train_loader_iter = iter(train_loader)
|
86 |
-
quantized_model = InferenceOptimizer.quantize(generator,
|
87 |
-
accelerator=None,
|
88 |
-
calib_dataloader=train_loader)
|
89 |
-
|
90 |
-
|
91 |
-
def original_transfer(input_img):
|
92 |
-
w, h, _ = input_img.shape
|
93 |
-
print(datetime.datetime.now())
|
94 |
-
print("input size: ", w, h)
|
95 |
-
# resize too large image
|
96 |
-
if w > 3000 or h > 3000:
|
97 |
-
ratio = min(3000 / w, 3000 / h)
|
98 |
-
w = int(w * ratio)
|
99 |
-
h = int(h * ratio)
|
100 |
-
if w % 4 != 0 or h % 4 != 0:
|
101 |
-
NW = int((w // 4) * 4)
|
102 |
-
NH = int((h // 4) * 4)
|
103 |
-
input_img = np.resize(input_img,(NW,NH,3))
|
104 |
-
st = time.perf_counter()
|
105 |
-
dataset = ImageDataset(input_img)
|
106 |
-
loader = DataLoader(dataset, **params)
|
107 |
-
with torch.no_grad():
|
108 |
-
for inputs in tqdm(loader):
|
109 |
-
inputs = inputs.to(device, dtype)
|
110 |
-
st = time.perf_counter()
|
111 |
-
outputs = generator(inputs)
|
112 |
-
ori_time = time.perf_counter() - st
|
113 |
-
ori_time = "{:.3f}s".format(ori_time)
|
114 |
-
ori_image = np.array(tensor2image(outputs[0]))
|
115 |
-
del inputs
|
116 |
-
del outputs
|
117 |
-
return ori_image, ori_time
|
118 |
-
|
119 |
-
def nano_transfer(input_img):
|
120 |
-
w, h, _ = input_img.shape
|
121 |
-
print(datetime.datetime.now())
|
122 |
-
print("input size: ", w, h)
|
123 |
-
# resize too large image
|
124 |
-
if w > 3000 or h > 3000:
|
125 |
-
ratio = min(3000 / w, 3000 / h)
|
126 |
-
w = int(w * ratio)
|
127 |
-
h = int(h * ratio)
|
128 |
-
if w % 4 != 0 or h % 4 != 0:
|
129 |
-
NW = int((w // 4) * 4)
|
130 |
-
NH = int((h // 4) * 4)
|
131 |
-
input_img = np.resize(input_img,(NW,NH,3))
|
132 |
-
st = time.perf_counter()
|
133 |
-
dataset = ImageDataset(input_img)
|
134 |
-
loader = DataLoader(dataset, **params)
|
135 |
-
with torch.no_grad():
|
136 |
-
for inputs in tqdm(loader):
|
137 |
-
inputs = inputs.to(device, dtype)
|
138 |
-
st = time.perf_counter()
|
139 |
-
outputs = quantized_model(inputs)
|
140 |
-
nano_time = time.perf_counter() - st
|
141 |
-
nano_time = "{:.3f}s".format(nano_time)
|
142 |
-
nano_image = np.array(tensor2image(outputs[0]))
|
143 |
-
del inputs
|
144 |
-
del outputs
|
145 |
-
return nano_image, nano_time
|
146 |
-
|
147 |
-
|
148 |
-
def clear():
|
149 |
-
return None, None, None, None
|
150 |
-
|
151 |
-
|
152 |
-
demo = gr.Blocks()
|
153 |
-
|
154 |
-
with demo:
|
155 |
-
gr.Markdown("<h1><center>BigDL-Nano Demo</center></h1>")
|
156 |
-
with gr.Row().style(equal_height=False):
|
157 |
-
with gr.Column():
|
158 |
-
gr.Markdown('''
|
159 |
-
<h2>Overview</h2>
|
160 |
-
|
161 |
-
BigDL-Nano is a library in [BigDL 2.0](https://github.com/intel-analytics/BigDL) that allows the users to transparently accelerate their deep learning pipelines (including data processing, training and inference) by automatically integrating optimized libraries, best-known configurations, and software optimizations. </p>
|
162 |
-
The video on the right shows how the user can easily accelerate their training and inference (including tracing and quantization) pipelines using BigDL-Nano with just a couple of lines of code; you may refer to our [CVPR 2022 demo paper](https://arxiv.org/abs/2204.01715) for more details.
|
163 |
-
''')
|
164 |
-
with gr.Column():
|
165 |
-
gr.Video(value="data/nano_api_display.mp4")
|
166 |
-
gr.Markdown('''
|
167 |
-
<h2>Demo</h2>
|
168 |
-
|
169 |
-
This section uses an image stylization example to demonstrate the speedup of an inference pipeline using quantization in BigDL-Nano (about 2~3x inference time speedup).
|
170 |
-
This inference demo is adapted from the original [FSPBT-Image-Translation code](https://github.com/rnwzd/FSPBT-Image-Translation),
|
171 |
-
and the default image is from [the COCO dataset](https://cocodataset.org/#home).
|
172 |
-
''')
|
173 |
-
with gr.Row().style(equal_height=False):
|
174 |
-
input_img = gr.Image(label="input image", value="data/COCO_image.jpg", source="upload")
|
175 |
-
with gr.Column():
|
176 |
-
ori_but = gr.Button("Standard PyTorch")
|
177 |
-
nano_but = gr.Button("BigDL-Nano")
|
178 |
-
clear_but = gr.Button("Clear Output")
|
179 |
-
with gr.Row().style(equal_height=False):
|
180 |
-
with gr.Column():
|
181 |
-
ori_time = gr.Text(label="Standard PyTorch latency")
|
182 |
-
ori_image = gr.Image(label="Standard PyTorch output image")
|
183 |
-
with gr.Column():
|
184 |
-
nano_time = gr.Text(label="BigDL-Nano latency")
|
185 |
-
nano_image = gr.Image(label="BigDL-Nano output image")
|
186 |
-
|
187 |
-
ori_but.click(original_transfer, inputs=input_img, outputs=[ori_image, ori_time])
|
188 |
-
nano_but.click(nano_transfer, inputs=input_img, outputs=[nano_image, nano_time])
|
189 |
-
clear_but.click(clear, inputs=None, outputs=[ori_image, ori_time, nano_image, nano_time])
|
190 |
-
|
191 |
-
|
192 |
-
demo.launch(share=True, enable_queue=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BigData-KSU/VQA-in-Medical-Imagery/CLIP/model.py
DELETED
@@ -1,461 +0,0 @@
|
|
1 |
-
from collections import OrderedDict
|
2 |
-
from typing import Tuple, Union
|
3 |
-
|
4 |
-
import torch
|
5 |
-
import torch.nn.functional as F
|
6 |
-
from torch import nn
|
7 |
-
|
8 |
-
|
9 |
-
class Bottleneck(nn.Module):
|
10 |
-
expansion = 4
|
11 |
-
|
12 |
-
def __init__(self, inplanes, planes, stride=1):
|
13 |
-
super().__init__()
|
14 |
-
|
15 |
-
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
|
16 |
-
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
|
17 |
-
self.bn1 = nn.BatchNorm2d(planes)
|
18 |
-
|
19 |
-
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
|
20 |
-
self.bn2 = nn.BatchNorm2d(planes)
|
21 |
-
|
22 |
-
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
|
23 |
-
|
24 |
-
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
|
25 |
-
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
|
26 |
-
|
27 |
-
self.relu = nn.ReLU(inplace=True)
|
28 |
-
self.downsample = None
|
29 |
-
self.stride = stride
|
30 |
-
|
31 |
-
if stride > 1 or inplanes != planes * Bottleneck.expansion:
|
32 |
-
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
|
33 |
-
self.downsample = nn.Sequential(OrderedDict([
|
34 |
-
("-1", nn.AvgPool2d(stride)),
|
35 |
-
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
|
36 |
-
("1", nn.BatchNorm2d(planes * self.expansion))
|
37 |
-
]))
|
38 |
-
|
39 |
-
def forward(self, x: torch.Tensor):
|
40 |
-
identity = x
|
41 |
-
|
42 |
-
out = self.relu(self.bn1(self.conv1(x)))
|
43 |
-
out = self.relu(self.bn2(self.conv2(out)))
|
44 |
-
out = self.avgpool(out)
|
45 |
-
out = self.bn3(self.conv3(out))
|
46 |
-
|
47 |
-
if self.downsample is not None:
|
48 |
-
identity = self.downsample(x)
|
49 |
-
|
50 |
-
out += identity
|
51 |
-
out = self.relu(out)
|
52 |
-
return out
|
53 |
-
|
54 |
-
|
55 |
-
class AttentionPool2d(nn.Module):
|
56 |
-
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
|
57 |
-
super().__init__()
|
58 |
-
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
|
59 |
-
self.k_proj = nn.Linear(embed_dim, embed_dim)
|
60 |
-
self.q_proj = nn.Linear(embed_dim, embed_dim)
|
61 |
-
self.v_proj = nn.Linear(embed_dim, embed_dim)
|
62 |
-
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
|
63 |
-
self.num_heads = num_heads
|
64 |
-
|
65 |
-
def forward(self, x):
|
66 |
-
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
|
67 |
-
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
|
68 |
-
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
|
69 |
-
x, _ = F.multi_head_attention_forward(
|
70 |
-
query=x, key=x, value=x,
|
71 |
-
embed_dim_to_check=x.shape[-1],
|
72 |
-
num_heads=self.num_heads,
|
73 |
-
q_proj_weight=self.q_proj.weight,
|
74 |
-
k_proj_weight=self.k_proj.weight,
|
75 |
-
v_proj_weight=self.v_proj.weight,
|
76 |
-
in_proj_weight=None,
|
77 |
-
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
|
78 |
-
bias_k=None,
|
79 |
-
bias_v=None,
|
80 |
-
add_zero_attn=False,
|
81 |
-
dropout_p=0,
|
82 |
-
out_proj_weight=self.c_proj.weight,
|
83 |
-
out_proj_bias=self.c_proj.bias,
|
84 |
-
use_separate_proj_weight=True,
|
85 |
-
training=self.training,
|
86 |
-
need_weights=False
|
87 |
-
)
|
88 |
-
|
89 |
-
return x[0]
|
90 |
-
|
91 |
-
|
92 |
-
class ModifiedResNet(nn.Module):
|
93 |
-
"""
|
94 |
-
A ResNet class that is similar to torchvision's but contains the following changes:
|
95 |
-
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
|
96 |
-
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
|
97 |
-
- The final pooling layer is a QKV attention instead of an average pool
|
98 |
-
"""
|
99 |
-
|
100 |
-
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
|
101 |
-
super().__init__()
|
102 |
-
self.output_dim = output_dim
|
103 |
-
self.input_resolution = input_resolution
|
104 |
-
|
105 |
-
# the 3-layer stem
|
106 |
-
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
|
107 |
-
self.bn1 = nn.BatchNorm2d(width // 2)
|
108 |
-
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
|
109 |
-
self.bn2 = nn.BatchNorm2d(width // 2)
|
110 |
-
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
|
111 |
-
self.bn3 = nn.BatchNorm2d(width)
|
112 |
-
self.avgpool = nn.AvgPool2d(2)
|
113 |
-
self.relu = nn.ReLU(inplace=True)
|
114 |
-
|
115 |
-
# residual layers
|
116 |
-
self._inplanes = width # this is a *mutable* variable used during construction
|
117 |
-
self.layer1 = self._make_layer(width, layers[0])
|
118 |
-
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
|
119 |
-
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
|
120 |
-
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
|
121 |
-
|
122 |
-
embed_dim = width * 32 # the ResNet feature dimension
|
123 |
-
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
|
124 |
-
|
125 |
-
def _make_layer(self, planes, blocks, stride=1):
|
126 |
-
layers = [Bottleneck(self._inplanes, planes, stride)]
|
127 |
-
|
128 |
-
self._inplanes = planes * Bottleneck.expansion
|
129 |
-
for _ in range(1, blocks):
|
130 |
-
layers.append(Bottleneck(self._inplanes, planes))
|
131 |
-
|
132 |
-
return nn.Sequential(*layers)
|
133 |
-
|
134 |
-
def forward(self, x):
|
135 |
-
def stem(x):
|
136 |
-
for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]:
|
137 |
-
x = self.relu(bn(conv(x)))
|
138 |
-
x = self.avgpool(x)
|
139 |
-
return x
|
140 |
-
|
141 |
-
x = x.type(self.conv1.weight.dtype)
|
142 |
-
x = stem(x)
|
143 |
-
x = self.layer1(x)
|
144 |
-
x = self.layer2(x)
|
145 |
-
x = self.layer3(x)
|
146 |
-
|
147 |
-
|
148 |
-
#x = self.layer4(x)
|
149 |
-
#print(x.shape)
|
150 |
-
#x = self.attnpool(x)
|
151 |
-
|
152 |
-
return x
|
153 |
-
|
154 |
-
|
155 |
-
class LayerNorm(nn.LayerNorm):
|
156 |
-
"""Subclass torch's LayerNorm to handle fp16."""
|
157 |
-
|
158 |
-
def forward(self, x: torch.Tensor):
|
159 |
-
orig_type = x.dtype
|
160 |
-
ret = super().forward(x.type(torch.float32))
|
161 |
-
return ret.type(orig_type)
|
162 |
-
|
163 |
-
|
164 |
-
class QuickGELU(nn.Module):
|
165 |
-
def forward(self, x: torch.Tensor):
|
166 |
-
return x * torch.sigmoid(1.702 * x)
|
167 |
-
|
168 |
-
|
169 |
-
class ResidualAttentionBlock(nn.Module):
|
170 |
-
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
|
171 |
-
super().__init__()
|
172 |
-
|
173 |
-
self.attn = nn.MultiheadAttention(d_model, n_head)
|
174 |
-
self.ln_1 = LayerNorm(d_model)
|
175 |
-
self.mlp = nn.Sequential(OrderedDict([
|
176 |
-
("c_fc", nn.Linear(d_model, d_model * 4)),
|
177 |
-
("gelu", QuickGELU()),
|
178 |
-
("c_proj", nn.Linear(d_model * 4, d_model))
|
179 |
-
]))
|
180 |
-
self.ln_2 = LayerNorm(d_model)
|
181 |
-
self.attn_mask = attn_mask
|
182 |
-
|
183 |
-
def attention(self, x: torch.Tensor):
|
184 |
-
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
|
185 |
-
return self.attn(x, x, x, need_weights=True, attn_mask=self.attn_mask)
|
186 |
-
|
187 |
-
def forward(self, x: torch.Tensor):
|
188 |
-
attention_res = self.attention(self.ln_1(x))
|
189 |
-
x, weight = x+attention_res[0], attention_res[1]
|
190 |
-
x = x + self.mlp(self.ln_2(x))
|
191 |
-
return x, weight
|
192 |
-
|
193 |
-
class ResidualAttentionBlock_old(nn.Module):
|
194 |
-
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
|
195 |
-
super().__init__()
|
196 |
-
|
197 |
-
self.attn = nn.MultiheadAttention(d_model, n_head)
|
198 |
-
self.ln_1 = LayerNorm(d_model)
|
199 |
-
self.mlp = nn.Sequential(OrderedDict([
|
200 |
-
("c_fc", nn.Linear(d_model, d_model * 4)),
|
201 |
-
("gelu", QuickGELU()),
|
202 |
-
("c_proj", nn.Linear(d_model * 4, d_model))
|
203 |
-
]))
|
204 |
-
self.ln_2 = LayerNorm(d_model)
|
205 |
-
self.attn_mask = attn_mask
|
206 |
-
|
207 |
-
def attention(self, x: torch.Tensor):
|
208 |
-
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
|
209 |
-
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
|
210 |
-
|
211 |
-
def forward(self, x: torch.Tensor):
|
212 |
-
x = x + self.attention(self.ln_1(x))
|
213 |
-
x = x + self.mlp(self.ln_2(x))
|
214 |
-
return x
|
215 |
-
|
216 |
-
|
217 |
-
class Transformer(nn.Module):
|
218 |
-
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
|
219 |
-
super().__init__()
|
220 |
-
self.width = width
|
221 |
-
self.layers = layers
|
222 |
-
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
|
223 |
-
|
224 |
-
def forward(self, x: torch.Tensor):
|
225 |
-
weights = []
|
226 |
-
r=0
|
227 |
-
|
228 |
-
for block in self.resblocks:
|
229 |
-
#if r<=10:
|
230 |
-
# for param in block.parameters():
|
231 |
-
# param.requires_grad = False
|
232 |
-
#if r%2==0:
|
233 |
-
|
234 |
-
x, weight = block(x)
|
235 |
-
weights.append(weight)
|
236 |
-
#print("r=",r)
|
237 |
-
#if r==5:
|
238 |
-
# break
|
239 |
-
#r = r + 1
|
240 |
-
|
241 |
-
return x, weights
|
242 |
-
|
243 |
-
### OLD transformer without attetion
|
244 |
-
class Transformer_Ecnoder_clip(nn.Module):
|
245 |
-
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
|
246 |
-
super().__init__()
|
247 |
-
self.width = width
|
248 |
-
self.layers = layers
|
249 |
-
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
|
250 |
-
|
251 |
-
def forward(self, x: torch.Tensor):
|
252 |
-
return self.resblocks(x)
|
253 |
-
|
254 |
-
|
255 |
-
class VisualTransformer(nn.Module):
|
256 |
-
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
|
257 |
-
super().__init__()
|
258 |
-
self.input_resolution = input_resolution
|
259 |
-
self.output_dim = output_dim
|
260 |
-
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
|
261 |
-
|
262 |
-
scale = width ** -0.5
|
263 |
-
self.class_embedding = nn.Parameter(scale * torch.randn(width))
|
264 |
-
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
|
265 |
-
self.ln_pre = LayerNorm(width)
|
266 |
-
|
267 |
-
self.transformer = Transformer(width, layers, heads)
|
268 |
-
|
269 |
-
self.ln_post = LayerNorm(width)
|
270 |
-
self.proj = nn.Parameter(scale * torch.randn(width, 512))
|
271 |
-
|
272 |
-
def forward(self, x: torch.Tensor):
|
273 |
-
x = self.conv1(x) # shape = [*, width, grid, grid]
|
274 |
-
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
|
275 |
-
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
|
276 |
-
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
|
277 |
-
|
278 |
-
|
279 |
-
x = x + self.positional_embedding.to(x.dtype)
|
280 |
-
x = self.ln_pre(x)
|
281 |
-
|
282 |
-
x = x.permute(1, 0, 2) # NLD -> LND
|
283 |
-
x,weight = self.transformer(x)
|
284 |
-
x = x.permute(1, 0, 2) # LND -> NLD
|
285 |
-
#hide_feat=x
|
286 |
-
#x = self.ln_post(x[:, 0, :])
|
287 |
-
#x=self.ln_post(x)
|
288 |
-
if self.proj is not None:
|
289 |
-
hide_feat=self.ln_post(x) @ self.proj
|
290 |
-
x = self.ln_post(x[:, 0, :]) @ self.proj
|
291 |
-
#print(hide_feat.shape)
|
292 |
-
|
293 |
-
return x,weight,hide_feat
|
294 |
-
|
295 |
-
|
296 |
-
class CLIP(nn.Module):
|
297 |
-
def __init__(self,
|
298 |
-
embed_dim: int,
|
299 |
-
# vision
|
300 |
-
image_resolution: int,
|
301 |
-
vision_layers: Union[Tuple[int, int, int, int], int],
|
302 |
-
vision_width: int,
|
303 |
-
vision_patch_size: int,
|
304 |
-
# text
|
305 |
-
context_length: int,
|
306 |
-
vocab_size: int,
|
307 |
-
transformer_width: int,
|
308 |
-
transformer_heads: int,
|
309 |
-
transformer_layers: int
|
310 |
-
):
|
311 |
-
super().__init__()
|
312 |
-
|
313 |
-
self.context_length = context_length
|
314 |
-
|
315 |
-
if isinstance(vision_layers, (tuple, list)):
|
316 |
-
vision_heads = vision_width * 32 // 64
|
317 |
-
self.visual = ModifiedResNet(
|
318 |
-
layers=vision_layers,
|
319 |
-
output_dim=embed_dim,
|
320 |
-
heads=vision_heads,
|
321 |
-
input_resolution=image_resolution,
|
322 |
-
width=vision_width
|
323 |
-
)
|
324 |
-
else:
|
325 |
-
vision_heads = vision_width // 64
|
326 |
-
self.visual = VisualTransformer(
|
327 |
-
input_resolution=image_resolution,
|
328 |
-
patch_size=vision_patch_size,
|
329 |
-
width=vision_width,
|
330 |
-
layers=vision_layers,
|
331 |
-
heads=vision_heads,
|
332 |
-
output_dim=embed_dim
|
333 |
-
)
|
334 |
-
|
335 |
-
self.transformer = Transformer(
|
336 |
-
width=transformer_width,
|
337 |
-
layers=transformer_layers,
|
338 |
-
heads=transformer_heads,
|
339 |
-
attn_mask=self.build_attention_mask()
|
340 |
-
)
|
341 |
-
|
342 |
-
self.vocab_size = vocab_size
|
343 |
-
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
|
344 |
-
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
|
345 |
-
self.ln_final = LayerNorm(transformer_width)
|
346 |
-
|
347 |
-
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
|
348 |
-
self.logit_scale = nn.Parameter(torch.ones([]))
|
349 |
-
|
350 |
-
def build_attention_mask(self):
|
351 |
-
# lazily create causal attention mask, with full attention between the vision tokens
|
352 |
-
# pytorch uses additive attention mask; fill with -inf
|
353 |
-
mask = torch.empty(self.context_length, self.context_length)
|
354 |
-
mask.fill_(float("-inf"))
|
355 |
-
mask.triu_(1) # zero out the lower diagonal
|
356 |
-
return mask
|
357 |
-
|
358 |
-
@property
|
359 |
-
def dtype(self):
|
360 |
-
return self.visual.conv1.weight.dtype
|
361 |
-
|
362 |
-
def encode_image(self, image):
|
363 |
-
return self.visual(image.type(self.dtype))
|
364 |
-
|
365 |
-
def encode_text(self, text):
|
366 |
-
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
|
367 |
-
|
368 |
-
x = x + self.positional_embedding.type(self.dtype)
|
369 |
-
x = x.permute(1, 0, 2) # NLD -> LND
|
370 |
-
x,weight = self.transformer(x)
|
371 |
-
x = x.permute(1, 0, 2) # LND -> NLD
|
372 |
-
x = self.ln_final(x).type(self.dtype)
|
373 |
-
|
374 |
-
# x.shape = [batch_size, n_ctx, transformer.width]
|
375 |
-
# take features from the eot embedding (eot_token is the highest number in each sequence)
|
376 |
-
hide_feat=x
|
377 |
-
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
|
378 |
-
|
379 |
-
return x,weight,hide_feat
|
380 |
-
|
381 |
-
def forward(self, image, text):
|
382 |
-
image_features,weight_image,hide_image = self.encode_image(image)
|
383 |
-
text_features,weight_text,hide_text = self.encode_text(text)
|
384 |
-
|
385 |
-
# normalized features
|
386 |
-
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
|
387 |
-
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
|
388 |
-
|
389 |
-
# cosine similarity as logits
|
390 |
-
logit_scale = self.logit_scale.exp()
|
391 |
-
logits_per_iamge = logit_scale * image_features @ text_features.t()
|
392 |
-
logits_per_text = logit_scale * text_features @ image_features.t()
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
# shape = [global_batch_size, global_batch_size]
|
398 |
-
#return image_features, text_features logits_per_iamge, logits_per_text,hide_image,hide_text
|
399 |
-
return image_features, text_features,hide_image,hide_text
|
400 |
-
|
401 |
-
def convert_weights(model: nn.Module):
|
402 |
-
"""Convert applicable model parameters to fp16"""
|
403 |
-
|
404 |
-
def _convert_weights_to_fp16(l):
|
405 |
-
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
|
406 |
-
l.weight.data = l.weight.data.half()
|
407 |
-
if l.bias is not None:
|
408 |
-
l.bias.data = l.bias.data.half()
|
409 |
-
|
410 |
-
if isinstance(l, nn.MultiheadAttention):
|
411 |
-
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
|
412 |
-
tensor = getattr(l, attr)
|
413 |
-
if tensor is not None:
|
414 |
-
tensor.data = tensor.data.half()
|
415 |
-
|
416 |
-
for name in ["text_projection", "proj"]:
|
417 |
-
if hasattr(l, name):
|
418 |
-
attr = getattr(l, name)
|
419 |
-
if attr is not None:
|
420 |
-
attr.data = attr.data.half()
|
421 |
-
|
422 |
-
model.apply(_convert_weights_to_fp16)
|
423 |
-
|
424 |
-
|
425 |
-
def build_model(state_dict: dict):
|
426 |
-
vit = "visual.proj" in state_dict
|
427 |
-
|
428 |
-
if vit:
|
429 |
-
vision_width = state_dict["visual.conv1.weight"].shape[0]
|
430 |
-
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
|
431 |
-
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
|
432 |
-
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
|
433 |
-
image_resolution = vision_patch_size * grid_size
|
434 |
-
else:
|
435 |
-
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
|
436 |
-
vision_layers = tuple(counts)
|
437 |
-
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
|
438 |
-
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
|
439 |
-
vision_patch_size = None
|
440 |
-
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
|
441 |
-
image_resolution = output_width * 32
|
442 |
-
|
443 |
-
embed_dim = state_dict["text_projection"].shape[1]
|
444 |
-
context_length = state_dict["positional_embedding"].shape[0]
|
445 |
-
vocab_size = state_dict["token_embedding.weight"].shape[0]
|
446 |
-
transformer_width = state_dict["ln_final.weight"].shape[0]
|
447 |
-
transformer_heads = transformer_width // 64
|
448 |
-
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
|
449 |
-
|
450 |
-
model = CLIP(
|
451 |
-
embed_dim,
|
452 |
-
image_resolution, vision_layers, vision_width, vision_patch_size,
|
453 |
-
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
|
454 |
-
)
|
455 |
-
|
456 |
-
for key in ["input_resolution", "context_length", "vocab_size"]:
|
457 |
-
del state_dict[key]
|
458 |
-
|
459 |
-
convert_weights(model)
|
460 |
-
model.load_state_dict(state_dict)
|
461 |
-
return model.eval()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BlitzEsports/TextToImage/index.html
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
|
2 |
-
<!DOCTYPE html>
|
3 |
-
<html lang="en">
|
4 |
-
<head>
|
5 |
-
<meta charset="utf-8" />
|
6 |
-
<meta
|
7 |
-
name="viewport"
|
8 |
-
content="width=device-width, initial-scale=1, shrink-to-fit=no, maximum-scale=1"
|
9 |
-
/>
|
10 |
-
|
11 |
-
<script>
|
12 |
-
window.__gradio_mode__ = "app";
|
13 |
-
window.gradio_config = {"version": "3.0.26\n", "mode": "blocks", "dev_mode": false, "components": [{"id": 1, "type": "column", "props": {"type": "column", "variant": "default", "visible": true, "style": {}}}, {"id": 2, "type": "markdown", "props": {"value": "<h1><center>DALL\u00b7E mini by <a href=\"https://www.craiyon.com/\" target=\"_blank\">craiyon.com</a></center></h1>", "name": "markdown", "visible": true, "style": {}}}, {"id": 3, "type": "markdown", "props": {"value": "<center>AI model generating images from any prompt!</center>", "name": "markdown", "visible": true, "style": {}}}, {"id": 4, "type": "group", "props": {"type": "group", "visible": true, "style": {}}}, {"id": 5, "type": "box", "props": {"type": "box", "visible": true, "style": {}}}, {"id": 6, "type": "row", "props": {"type": "row", "visible": true, "style": {"equal_height": true, "mobile_collapse": false}}}, {"id": 7, "type": "textbox", "props": {"lines": 1, "max_lines": 1, "value": "", "label": "Enter your prompt", "show_label": false, "name": "textbox", "visible": true, "elem_id": "prompt", "style": {"container": false}}}, {"id": 8, "type": "button", "props": {"value": "Run", "variant": "primary", "name": "button", "visible": true, "style": {}}}, {"id": 9, "type": "gallery", "props": {"value": [], "label": "Generated images", "show_label": false, "name": "gallery", "visible": true, "elem_id": "gallery", "style": {"grid": [3], "height": "auto"}}}, {"id": 10, "type": "column", "props": {"type": "column", "variant": "default", "visible": true, "style": {}}}, {"id": 11, "type": "button", "props": {"value": "Screenshot", "variant": "secondary", "name": "button", "visible": true, "elem_id": "screenshot", "style": {"full_width": true}}}, {"id": 12, "type": "markdown", "props": {"value": "<details>\n<summary>Bias and Limitations</summary>\n<p style='line-height: normal; font-size: small'>\nWhile the capabilities of image generation models are impressive, they may also reinforce or exacerbate societal biases. While the extent and nature of the biases of the DALL\u00b7E mini model have yet to be fully documented, given the fact that the model was trained on unfiltered data from the Internet, it may generate images that contain stereotypes against minority groups. Work to analyze the nature and extent of these limitations is ongoing, and will be documented in more detail in the <a href=\"https://huggingface.co/dalle-mini/dalle-mini\" target=\"_blank\">DALL\u00b7E mini model card</a>.\n</p>\n</details>", "name": "markdown", "visible": true, "style": {}}}, {"id": 13, "type": "markdown", "props": {"value": "<p style='text-align: center'>\nDALL\u00b7E mini is migrating to \ud83d\udd8d\ufe0f <a href=\"https://www.craiyon.com/\" target=\"_blank\">craiyon.com</a>\n</p>", "name": "markdown", "visible": true, "style": {}}}, {"id": 14, "type": "markdown", "props": {"value": "<hr />\n<p style='text-align: center'>\nCreated by <a href=\"https://twitter.com/borisdayma\" target=\"_blank\">Boris Dayma</a> et al. 2021-2022\n<br/>\n<a href=\"https://github.com/borisdayma/dalle-mini\" target=\"_blank\">GitHub</a> | <a href=\"https://wandb.ai/dalle-mini/dalle-mini/reports/DALL-E-mini-Generate-images-from-any-text-prompt--VmlldzoyMDE4NDAy\" target=\"_blank\">Project Report</a>\n<p style='text-align: center'>Powered by Google <a href=\"https://sites.research.google/trc/\" target=\"_blank\">TPU Research Cloud</a>\n</p>", "name": "markdown", "visible": true, "style": {}}}], "theme": "default", "css": ".container { max-width: 800px; margin: auto; }", "title": "Gradio", "enable_queue": false, "layout": {"id": 0, "children": [{"id": 1, "children": [{"id": 2}, {"id": 3}, {"id": 4, "children": [{"id": 5, "children": [{"id": 6, "children": [{"id": 7}, {"id": 8}]}]}, {"id": 9}]}]}, {"id": 10, "children": [{"id": 11}, {"id": 12}, {"id": 13}, {"id": 14}]}]}, "dependencies": [{"targets": [8], "trigger": "click", "inputs": [7], "outputs": [9], "backend_fn": false, "js": "\n async (text) => {\n try {\n document.querySelector('#screenshot').style.display = 'none';\n response = await fetch('https://bf.dallemini.ai/generate', {\n method: 'POST',\n headers: {\n 'Accept': 'application/json',\n 'Content-Type': 'application/json'\n },\n body: JSON.stringify({\n prompt: text\n })\n });\n response = await response.json()\n let imgs = response.images.map(r => \"data:image/png;base64,\" + r)\n document.querySelector('#screenshot').style.display = 'block';\n return imgs\n } catch (e) {\n alert(\"Too much traffic, please try again.\")\n IMG = \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAMgAAADICAMAAACahl6sAAAAOVBMVEXg4OB1dXXX19fd3d2EhIR9fX14eHjJycm2trbb29uurq6goKCZmZmIiIiBgYHNzc2np6e8vLySkpKXK8HrAAABuUlEQVR4nO3Z0bKCIBCAYQNFVCzr/R/2nHU6k8KpJi6wZf7vLu1id9gFhKYBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAb249h7pzr5jD29uhospnlfNo4L+boiLKYyZ0iblKYiu/iNER3PTquD9npPgbB98Za0/twH59JVasMtzXo1m+iHny7PrwpysSuebgxCtmOTlkma121l/TFZR2UqXxEebxEO/87QZlZ3inpeCPzVftkojUyJp2OWVgKy23qSsbg8evitBSXkUjHzYN9Is0oeWoYkkUKazsxRYlYKa6ldFSfs7K/8tsnUSLrXHAuG1SOXpp5t1LEiQxSe33ZqDJIC4TdkziRJkRN9J1CXFlpIj7J9RvNSd0kiUj1zSVjyiKr4X5yTRIx0kYlY8oinbzfFSaJWFlJSsaUpZpEqimttNkTOpo9nX4TOqbfdEFM6FgQpW7c8OofSrYo1Wwaq9nG1/NhVc2nbj2HD821kuOgeg7o3hyZBj1Hpo9D7M3K+HeIrSmPeq4Vfl3ruOhpnly9vdyEfa1KLkPF7nr66GAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPjcD13rCcC3ILx/AAAAAElFTkSuQmCC\"\n document.querySelector('#screenshot').style.display = 'block';\n return Array(9).fill(IMG)\n }\n }\n ", "status_tracker": null, "queue": null, "api_name": null, "scroll_to_output": false, "show_progress": true}, {"targets": [11], "trigger": "click", "inputs": [], "outputs": [], "backend_fn": false, "js": "\n () => {\n const captureElement = document.getElementById(1)\n let bg_color = getComputedStyle(document.querySelector(\"#root .container\"))[\"background-color\"]\n captureElement.style.backgroundColor = bg_color; \n html2canvas(captureElement)\n .then(canvas => {\n canvas.style.display = 'none'\n document.body.appendChild(canvas)\n return canvas\n })\n .then(canvas => {\n const image = canvas.toDataURL('image/png').replace('image/png', 'image/octet-stream')\n const a = document.createElement('a')\n const date = new Date()\n const filename = `dallemini_${date.getFullYear()}-${date.getMonth() + 1}-${date.getDate()}_${date.getHours()}-${date.getMinutes()}-${date.getSeconds()}.png`\n a.setAttribute('download', filename)\n a.setAttribute('href', image)\n a.click()\n canvas.remove()\n })\n }\n ", "status_tracker": null, "queue": null, "api_name": null, "scroll_to_output": false, "show_progress": true}]};
|
14 |
-
</script>
|
15 |
-
|
16 |
-
<link rel="preconnect" href="https://fonts.googleapis.com" />
|
17 |
-
<link
|
18 |
-
rel="preconnect"
|
19 |
-
href="https://fonts.gstatic.com"
|
20 |
-
crossorigin="anonymous"
|
21 |
-
/>
|
22 |
-
<link
|
23 |
-
href="https://fonts.googleapis.com/css?family=Source Sans Pro"
|
24 |
-
rel="stylesheet"
|
25 |
-
/>
|
26 |
-
<link
|
27 |
-
href="https://fonts.googleapis.com/css?family=IBM Plex Mono"
|
28 |
-
rel="stylesheet"
|
29 |
-
/>
|
30 |
-
<script src="https://cdnjs.cloudflare.com/ajax/libs/iframe-resizer/4.3.1/iframeResizer.contentWindow.min.js"></script>
|
31 |
-
<script type="module" crossorigin src="https://gradio.s3-us-west-2.amazonaws.com/3.0.9b12/assets/index.8eca4ae7.js"></script>
|
32 |
-
<link rel="stylesheet" href="https://gradio.s3-us-west-2.amazonaws.com/3.0.9b12/assets/index.cbea297d.css">
|
33 |
-
<style>
|
34 |
-
#screenshot {
|
35 |
-
display: none;
|
36 |
-
}
|
37 |
-
.container > div > div {
|
38 |
-
padding: 0.5rem;
|
39 |
-
}
|
40 |
-
footer a {
|
41 |
-
color: rgb(156 163 175) !important;
|
42 |
-
}
|
43 |
-
footer img {
|
44 |
-
display: none !important;
|
45 |
-
}
|
46 |
-
</style>
|
47 |
-
<style id="mofo">
|
48 |
-
body {
|
49 |
-
display: none !important;
|
50 |
-
}
|
51 |
-
</style>
|
52 |
-
<script type="text/javascript">
|
53 |
-
if (self === top || window.location.ancestorOrigins[0] === "https://huggingface.co") {
|
54 |
-
var mofo = document.getElementById("mofo");
|
55 |
-
mofo.parentNode.removeChild(mofo);
|
56 |
-
} else {
|
57 |
-
top.location = self.location;
|
58 |
-
}
|
59 |
-
</script>
|
60 |
-
</head>
|
61 |
-
|
62 |
-
<body
|
63 |
-
style="
|
64 |
-
margin: 0;
|
65 |
-
padding: 0;
|
66 |
-
display: flex;
|
67 |
-
flex-direction: column;
|
68 |
-
flex-grow: 1;
|
69 |
-
"
|
70 |
-
>
|
71 |
-
<div
|
72 |
-
id="root"
|
73 |
-
style="display: flex; flex-direction: column; flex-grow: 1"
|
74 |
-
></div>
|
75 |
-
<script src="html2canvas.js"></script>
|
76 |
-
</body>
|
77 |
-
</html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Brasd99/AnswerMate/app.py
DELETED
@@ -1,141 +0,0 @@
|
|
1 |
-
from typing import List, Tuple, Dict, Any
|
2 |
-
import time
|
3 |
-
import json
|
4 |
-
import requests
|
5 |
-
import gradio as gr
|
6 |
-
import poe
|
7 |
-
import os
|
8 |
-
|
9 |
-
with open('config.json', 'r') as f:
|
10 |
-
config = json.load(f)
|
11 |
-
|
12 |
-
max_questions_count = config['MAX_QUESTIONS_COUNT']
|
13 |
-
max_tags_count = config['MAX_TAGS_COUNT']
|
14 |
-
max_attempts = config['MAX_ATTEMPS']
|
15 |
-
wait_time = config['WAIT_TIME']
|
16 |
-
chatgpt_url = config['CHATGPT_URL']
|
17 |
-
system_prompt = config['SYSTEM_PROMPT']
|
18 |
-
use_sage = config['USE_SAGE']
|
19 |
-
sage_token = os.environ['SAGE_TOKEN']
|
20 |
-
|
21 |
-
def get_answer(question: str, client: poe.Client=None) -> Dict[str, Any]:
|
22 |
-
if use_sage:
|
23 |
-
for chunk in client.send_message('capybara', question, with_chat_break=True):
|
24 |
-
pass
|
25 |
-
client.delete_message(chunk['messageId'])
|
26 |
-
return {
|
27 |
-
'status': True,
|
28 |
-
'content': chunk['text']
|
29 |
-
}
|
30 |
-
|
31 |
-
headers = {
|
32 |
-
'Content-Type': 'application/json; charset=utf-8'
|
33 |
-
}
|
34 |
-
payload = {
|
35 |
-
'model': 'gpt-3.5-turbo',
|
36 |
-
'messages': [
|
37 |
-
{
|
38 |
-
'role': 'system',
|
39 |
-
'content': system_prompt
|
40 |
-
},
|
41 |
-
{
|
42 |
-
'role': 'user',
|
43 |
-
'content': question
|
44 |
-
}
|
45 |
-
]
|
46 |
-
}
|
47 |
-
|
48 |
-
try:
|
49 |
-
response = requests.post(chatgpt_url, headers=headers, data=json.dumps(payload))
|
50 |
-
response.raise_for_status()
|
51 |
-
content = response.json()['choices'][0]['message']['content']
|
52 |
-
return {
|
53 |
-
'status': True,
|
54 |
-
'content': content
|
55 |
-
}
|
56 |
-
except:
|
57 |
-
return {
|
58 |
-
'status': False
|
59 |
-
}
|
60 |
-
|
61 |
-
def format_results(results: List[Tuple[str, str]]) -> str:
|
62 |
-
output = ''
|
63 |
-
for i, (question, answer) in enumerate(results):
|
64 |
-
output += f'Question №{i+1}: {question}\n'
|
65 |
-
output += f'Answer: {answer}\n'
|
66 |
-
if i < len(results) - 1:
|
67 |
-
output += '--------------------------------------\n\n'
|
68 |
-
output = output.strip()
|
69 |
-
return output
|
70 |
-
|
71 |
-
def validate_and_get_tags(tags: str) -> List[str]:
|
72 |
-
if not tags.strip():
|
73 |
-
raise gr.Error('Validation error. It is necessary to set at least one tag')
|
74 |
-
|
75 |
-
tags = [tag.strip() for tag in tags.split('\n') if tag.strip()]
|
76 |
-
|
77 |
-
if len(tags) > max_tags_count:
|
78 |
-
raise gr.Error(f'Validation error. The maximum allowed number of tags is {max_tags_count}.')
|
79 |
-
|
80 |
-
return tags
|
81 |
-
|
82 |
-
def validate_and_get_questions(questions: str) -> List[str]:
|
83 |
-
if not questions.strip():
|
84 |
-
raise gr.Error('Validation error. It is necessary to ask at least one question')
|
85 |
-
|
86 |
-
questions = [question.strip() for question in questions.split('\n') if question.strip()]
|
87 |
-
if len(questions) > max_questions_count:
|
88 |
-
raise gr.Error(f'Validation error. The maximum allowed number of questions is {max_questions_count}.')
|
89 |
-
|
90 |
-
return questions
|
91 |
-
|
92 |
-
def find_answers(tags: str, questions: str, progress=gr.Progress()) -> str:
|
93 |
-
tags = validate_and_get_tags(tags)
|
94 |
-
questions = validate_and_get_questions(questions)
|
95 |
-
|
96 |
-
print(f'New attempt to get answers. Got {len(tags)} tags and {len(questions)} questions')
|
97 |
-
print(f'Tags: {tags}')
|
98 |
-
print(f'Questions: {questions}')
|
99 |
-
|
100 |
-
tags_str = ''.join([f'[{tag}]' for tag in tags])
|
101 |
-
|
102 |
-
if use_sage:
|
103 |
-
client = poe.Client(sage_token)
|
104 |
-
|
105 |
-
results = []
|
106 |
-
for question in progress.tqdm(questions):
|
107 |
-
time.sleep(wait_time)
|
108 |
-
tagged_question = f'{tags_str} {question}'
|
109 |
-
for attempt in range(max_attempts):
|
110 |
-
answer = get_answer(tagged_question, client)
|
111 |
-
if answer['status']:
|
112 |
-
results.append((question, answer['content']))
|
113 |
-
break
|
114 |
-
elif attempt == max_attempts - 1:
|
115 |
-
results.append((question, 'An error occurred while receiving data.'))
|
116 |
-
else:
|
117 |
-
time.sleep(wait_time)
|
118 |
-
|
119 |
-
return format_results(results)
|
120 |
-
|
121 |
-
title = '<h1 style="text-align:center">AnswerMate</h1>'
|
122 |
-
|
123 |
-
with gr.Blocks(theme='soft', title='AnswerMate') as blocks:
|
124 |
-
gr.HTML(title)
|
125 |
-
gr.Markdown('The service allows you to get answers to all questions on the specified topic.')
|
126 |
-
with gr.Row():
|
127 |
-
tags_input = gr.Textbox(
|
128 |
-
label=f'Enter tags (each line is a separate tag). Maximum: {max_tags_count}.',
|
129 |
-
placeholder='.NET\nC#',
|
130 |
-
lines=max_tags_count
|
131 |
-
)
|
132 |
-
questions_input = gr.Textbox(
|
133 |
-
label=f'Enter questions (each line is a separate question). Maximum: {max_questions_count}.',
|
134 |
-
placeholder='What is inheritance, encapsulation, abstraction, polymorphism?\nWhat is CLR?',
|
135 |
-
lines=max_questions_count
|
136 |
-
)
|
137 |
-
process_button = gr.Button('Find answers')
|
138 |
-
outputs = gr.Textbox(label='Output', placeholder='Output will appear here')
|
139 |
-
process_button.click(fn=find_answers, inputs=[tags_input, questions_input], outputs=outputs)
|
140 |
-
|
141 |
-
blocks.queue(concurrency_count=1).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/pybind11/include/pybind11/complex.h
DELETED
@@ -1,65 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
pybind11/complex.h: Complex number support
|
3 |
-
|
4 |
-
Copyright (c) 2016 Wenzel Jakob <[email protected]>
|
5 |
-
|
6 |
-
All rights reserved. Use of this source code is governed by a
|
7 |
-
BSD-style license that can be found in the LICENSE file.
|
8 |
-
*/
|
9 |
-
|
10 |
-
#pragma once
|
11 |
-
|
12 |
-
#include "pybind11.h"
|
13 |
-
#include <complex>
|
14 |
-
|
15 |
-
/// glibc defines I as a macro which breaks things, e.g., boost template names
|
16 |
-
#ifdef I
|
17 |
-
# undef I
|
18 |
-
#endif
|
19 |
-
|
20 |
-
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
|
21 |
-
|
22 |
-
template <typename T> struct format_descriptor<std::complex<T>, detail::enable_if_t<std::is_floating_point<T>::value>> {
|
23 |
-
static constexpr const char c = format_descriptor<T>::c;
|
24 |
-
static constexpr const char value[3] = { 'Z', c, '\0' };
|
25 |
-
static std::string format() { return std::string(value); }
|
26 |
-
};
|
27 |
-
|
28 |
-
#ifndef PYBIND11_CPP17
|
29 |
-
|
30 |
-
template <typename T> constexpr const char format_descriptor<
|
31 |
-
std::complex<T>, detail::enable_if_t<std::is_floating_point<T>::value>>::value[3];
|
32 |
-
|
33 |
-
#endif
|
34 |
-
|
35 |
-
PYBIND11_NAMESPACE_BEGIN(detail)
|
36 |
-
|
37 |
-
template <typename T> struct is_fmt_numeric<std::complex<T>, detail::enable_if_t<std::is_floating_point<T>::value>> {
|
38 |
-
static constexpr bool value = true;
|
39 |
-
static constexpr int index = is_fmt_numeric<T>::index + 3;
|
40 |
-
};
|
41 |
-
|
42 |
-
template <typename T> class type_caster<std::complex<T>> {
|
43 |
-
public:
|
44 |
-
bool load(handle src, bool convert) {
|
45 |
-
if (!src)
|
46 |
-
return false;
|
47 |
-
if (!convert && !PyComplex_Check(src.ptr()))
|
48 |
-
return false;
|
49 |
-
Py_complex result = PyComplex_AsCComplex(src.ptr());
|
50 |
-
if (result.real == -1.0 && PyErr_Occurred()) {
|
51 |
-
PyErr_Clear();
|
52 |
-
return false;
|
53 |
-
}
|
54 |
-
value = std::complex<T>((T) result.real, (T) result.imag);
|
55 |
-
return true;
|
56 |
-
}
|
57 |
-
|
58 |
-
static handle cast(const std::complex<T> &src, return_value_policy /* policy */, handle /* parent */) {
|
59 |
-
return PyComplex_FromDoubles((double) src.real(), (double) src.imag());
|
60 |
-
}
|
61 |
-
|
62 |
-
PYBIND11_TYPE_CASTER(std::complex<T>, _("complex"));
|
63 |
-
};
|
64 |
-
PYBIND11_NAMESPACE_END(detail)
|
65 |
-
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/core/anchor/point_generator.py
DELETED
@@ -1,37 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
|
3 |
-
from .builder import ANCHOR_GENERATORS
|
4 |
-
|
5 |
-
|
6 |
-
@ANCHOR_GENERATORS.register_module()
|
7 |
-
class PointGenerator(object):
|
8 |
-
|
9 |
-
def _meshgrid(self, x, y, row_major=True):
|
10 |
-
xx = x.repeat(len(y))
|
11 |
-
yy = y.view(-1, 1).repeat(1, len(x)).view(-1)
|
12 |
-
if row_major:
|
13 |
-
return xx, yy
|
14 |
-
else:
|
15 |
-
return yy, xx
|
16 |
-
|
17 |
-
def grid_points(self, featmap_size, stride=16, device='cuda'):
|
18 |
-
feat_h, feat_w = featmap_size
|
19 |
-
shift_x = torch.arange(0., feat_w, device=device) * stride
|
20 |
-
shift_y = torch.arange(0., feat_h, device=device) * stride
|
21 |
-
shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
|
22 |
-
stride = shift_x.new_full((shift_xx.shape[0], ), stride)
|
23 |
-
shifts = torch.stack([shift_xx, shift_yy, stride], dim=-1)
|
24 |
-
all_points = shifts.to(device)
|
25 |
-
return all_points
|
26 |
-
|
27 |
-
def valid_flags(self, featmap_size, valid_size, device='cuda'):
|
28 |
-
feat_h, feat_w = featmap_size
|
29 |
-
valid_h, valid_w = valid_size
|
30 |
-
assert valid_h <= feat_h and valid_w <= feat_w
|
31 |
-
valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)
|
32 |
-
valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)
|
33 |
-
valid_x[:valid_w] = 1
|
34 |
-
valid_y[:valid_h] = 1
|
35 |
-
valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)
|
36 |
-
valid = valid_xx & valid_yy
|
37 |
-
return valid
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|