Commit
·
563cc9a
1
Parent(s):
f16f168
Update parquet files (step 19 of 476)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/101-5/gpt4free/CODE_OF_CONDUCT.md +0 -128
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Are Cracks In Teeth Bad __HOT__.md +0 -30
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download AutoCAD 2011 Full Crack 64 Bit and Unleash Your Design Potential.md +0 -34
- spaces/1gistliPinn/ChatGPT4/Examples/Angels And Demons Movie Dvdrip Torrent.md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/City Smash MOD APK 1.6 - Experience the Thrill of Destruction.md +0 -14
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bad Piggies 2 APK and Join the Green Pigs on an Epic Adventure.md +0 -129
- spaces/1phancelerku/anime-remove-background/Download Facebook Reels APK for Android and Watch Fun and Inspiring Videos.md +0 -129
- spaces/1phancelerku/anime-remove-background/Download Slug it Out 1 APK OBB 2.8.9 for Free - The Best Slugslinger Game for Android Devices.md +0 -114
- spaces/1phancelerku/anime-remove-background/Download WhatsApp Messenger on Laptop A Simple Guide.md +0 -96
- spaces/1phancelerku/anime-remove-background/Experience the thrill of being a bus driver in American Bus Driving Simulator for PC.md +0 -90
- spaces/2023Liu2023/bingo/src/components/ui/select.tsx +0 -123
- spaces/2ndelement/voicevox/voicevox_engine/utility/path_utility.py +0 -51
- spaces/AIFILMS/generate_human_motion/pyrender/tests/__init__.py +0 -0
- spaces/AILab-CVC/SEED-LLaMA/models/__init__.py +0 -0
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov6/__init__.py +0 -0
- spaces/AUST001/ChatGPT/README.md +0 -13
- spaces/Aashir01/Live_Transcription/processing_whisper.py +0 -145
- spaces/AgentVerse/agentVerse/agentverse/memory_manipulator/basic.py +0 -17
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/clock-plugin.js +0 -20
- spaces/Ainterface/compare-gpt-models/app.py +0 -121
- spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/finetune_speaker_v2.py +0 -321
- spaces/Alichuan/VITS-Umamusume-voice-synthesizer/losses.py +0 -61
- spaces/Alpaca233/SadTalker/launcher.py +0 -204
- spaces/AlphaGPT/PaperSummary/app.py +0 -865
- spaces/Amrrs/DragGan-Inversion/stylegan_human/insetgan.py +0 -448
- spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/op_edit/upfirdn2d.cpp +0 -25
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_unclip.py +0 -287
- spaces/Andy1621/uniformer_image_segmentation/configs/apcnet/apcnet_r101-d8_512x1024_40k_cityscapes.py +0 -2
- spaces/Andy1621/uniformer_image_segmentation/configs/fp16/deeplabv3_r101-d8_512x1024_80k_fp16_cityscapes.py +0 -5
- spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes.py +0 -9
- spaces/Anthony-Ml/covid_predictor/README.md +0 -12
- spaces/Apex-X/nono/roop/metadata.py +0 -2
- spaces/Arthur678/vits-uma-genshin-honkai/models.py +0 -534
- spaces/Audio-AGI/WavJourney/wavjourney_cli.py +0 -27
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/coco_panoptic.py +0 -228
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/data/test_detection_utils.py +0 -176
- spaces/AzinZ/vitscn/utils.py +0 -258
- spaces/Benson/text-generation/Examples/Amanda El Aventurero Apk Android Oyun Club.md +0 -70
- spaces/Benson/text-generation/Examples/Carx Carretera Carreras V1 74.3 Apk Mod.md +0 -77
- spaces/Benson/text-generation/Examples/Cipherlab 8000 Driver Descargar Ventanas 7.md +0 -69
- spaces/Benson/text-generation/Examples/Descargar Gratis Brawl Estrellas Apk.md +0 -69
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/msgpack/exceptions.py +0 -48
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pyparsing/util.py +0 -235
- spaces/CVH-vn1210/make_hair/minigpt4/datasets/builders/image_text_pair_builder.py +0 -86
- spaces/CVH-vn1210/make_hair/minigpt4/processors/base_processor.py +0 -26
- spaces/CVPR/LIVE/thrust/thrust/detail/type_traits/function_traits.h +0 -96
- spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/set_operations.h +0 -23
- spaces/CVPR/regionclip-demo/README.md +0 -13
- spaces/CamCam17/Alexwww-davide-comic-book-characters/app.py +0 -3
- spaces/ChandraMohanNayal/AutoGPT/autogpt/logs.py +0 -332
spaces/101-5/gpt4free/CODE_OF_CONDUCT.md
DELETED
@@ -1,128 +0,0 @@
|
|
1 |
-
# Contributor Covenant Code of Conduct
|
2 |
-
|
3 |
-
## Our Pledge
|
4 |
-
|
5 |
-
We as members, contributors, and leaders pledge to make participation in our
|
6 |
-
community a harassment-free experience for everyone, regardless of age, body
|
7 |
-
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
8 |
-
identity and expression, level of experience, education, socio-economic status,
|
9 |
-
nationality, personal appearance, race, religion, or sexual identity
|
10 |
-
and orientation.
|
11 |
-
|
12 |
-
We pledge to act and interact in ways that contribute to an open, welcoming,
|
13 |
-
diverse, inclusive, and healthy community.
|
14 |
-
|
15 |
-
## Our Standards
|
16 |
-
|
17 |
-
Examples of behavior that contributes to a positive environment for our
|
18 |
-
community include:
|
19 |
-
|
20 |
-
* Demonstrating empathy and kindness toward other people
|
21 |
-
* Being respectful of differing opinions, viewpoints, and experiences
|
22 |
-
* Giving and gracefully accepting constructive feedback
|
23 |
-
* Accepting responsibility and apologizing to those affected by our mistakes,
|
24 |
-
and learning from the experience
|
25 |
-
* Focusing on what is best not just for us as individuals, but for the
|
26 |
-
overall community
|
27 |
-
|
28 |
-
Examples of unacceptable behavior include:
|
29 |
-
|
30 |
-
* The use of sexualized language or imagery, and sexual attention or
|
31 |
-
advances of any kind
|
32 |
-
* Trolling, insulting or derogatory comments, and personal or political attacks
|
33 |
-
* Public or private harassment
|
34 |
-
* Publishing others' private information, such as a physical or email
|
35 |
-
address, without their explicit permission
|
36 |
-
* Other conduct which could reasonably be considered inappropriate in a
|
37 |
-
professional setting
|
38 |
-
|
39 |
-
## Enforcement Responsibilities
|
40 |
-
|
41 |
-
Community leaders are responsible for clarifying and enforcing our standards of
|
42 |
-
acceptable behavior and will take appropriate and fair corrective action in
|
43 |
-
response to any behavior that they deem inappropriate, threatening, offensive,
|
44 |
-
or harmful.
|
45 |
-
|
46 |
-
Community leaders have the right and responsibility to remove, edit, or reject
|
47 |
-
comments, commits, code, wiki edits, issues, and other contributions that are
|
48 |
-
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
49 |
-
decisions when appropriate.
|
50 |
-
|
51 |
-
## Scope
|
52 |
-
|
53 |
-
This Code of Conduct applies within all community spaces, and also applies when
|
54 |
-
an individual is officially representing the community in public spaces.
|
55 |
-
Examples of representing our community include using an official e-mail address,
|
56 |
-
posting via an official social media account, or acting as an appointed
|
57 |
-
representative at an online or offline event.
|
58 |
-
|
59 |
-
## Enforcement
|
60 |
-
|
61 |
-
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
62 |
-
reported to the community leaders responsible for enforcement at
|
63 |
-
https://t.me/xtekky.
|
64 |
-
All complaints will be reviewed and investigated promptly and fairly.
|
65 |
-
|
66 |
-
All community leaders are obligated to respect the privacy and security of the
|
67 |
-
reporter of any incident.
|
68 |
-
|
69 |
-
## Enforcement Guidelines
|
70 |
-
|
71 |
-
Community leaders will follow these Community Impact Guidelines in determining
|
72 |
-
the consequences for any action they deem in violation of this Code of Conduct:
|
73 |
-
|
74 |
-
### 1. Correction
|
75 |
-
|
76 |
-
**Community Impact**: Use of inappropriate language or other behavior deemed
|
77 |
-
unprofessional or unwelcome in the community.
|
78 |
-
|
79 |
-
**Consequence**: A private, written warning from community leaders, providing
|
80 |
-
clarity around the nature of the violation and an explanation of why the
|
81 |
-
behavior was inappropriate. A public apology may be requested.
|
82 |
-
|
83 |
-
### 2. Warning
|
84 |
-
|
85 |
-
**Community Impact**: A violation through a single incident or series
|
86 |
-
of actions.
|
87 |
-
|
88 |
-
**Consequence**: A warning with consequences for continued behavior. No
|
89 |
-
interaction with the people involved, including unsolicited interaction with
|
90 |
-
those enforcing the Code of Conduct, for a specified period of time. This
|
91 |
-
includes avoiding interactions in community spaces as well as external channels
|
92 |
-
like social media. Violating these terms may lead to a temporary or
|
93 |
-
permanent ban.
|
94 |
-
|
95 |
-
### 3. Temporary Ban
|
96 |
-
|
97 |
-
**Community Impact**: A serious violation of community standards, including
|
98 |
-
sustained inappropriate behavior.
|
99 |
-
|
100 |
-
**Consequence**: A temporary ban from any sort of interaction or public
|
101 |
-
communication with the community for a specified period of time. No public or
|
102 |
-
private interaction with the people involved, including unsolicited interaction
|
103 |
-
with those enforcing the Code of Conduct, is allowed during this period.
|
104 |
-
Violating these terms may lead to a permanent ban.
|
105 |
-
|
106 |
-
### 4. Permanent Ban
|
107 |
-
|
108 |
-
**Community Impact**: Demonstrating a pattern of violation of community
|
109 |
-
standards, including sustained inappropriate behavior, harassment of an
|
110 |
-
individual, or aggression toward or disparagement of classes of individuals.
|
111 |
-
|
112 |
-
**Consequence**: A permanent ban from any sort of public interaction within
|
113 |
-
the community.
|
114 |
-
|
115 |
-
## Attribution
|
116 |
-
|
117 |
-
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
118 |
-
version 2.0, available at
|
119 |
-
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
120 |
-
|
121 |
-
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
122 |
-
enforcement ladder](https://github.com/mozilla/diversity).
|
123 |
-
|
124 |
-
[homepage]: https://www.contributor-covenant.org
|
125 |
-
|
126 |
-
For answers to common questions about this code of conduct, see the FAQ at
|
127 |
-
https://www.contributor-covenant.org/faq. Translations are available at
|
128 |
-
https://www.contributor-covenant.org/translations.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Are Cracks In Teeth Bad __HOT__.md
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Are Cracks in Teeth Bad?</h1>
|
3 |
-
<p>Cracks in teeth are common dental problems that can affect anyone at any age. They can range from tiny hairline cracks on the surface of the enamel to deep fractures that extend into the pulp of the tooth. Cracks in teeth can be caused by various factors, such as chewing hard foods, grinding teeth, trauma, large fillings, or aging. But are cracks in teeth bad? The answer depends on the type and severity of the crack, as well as the symptoms and complications it may cause.</p>
|
4 |
-
<h2>are cracks in teeth bad</h2><br /><p><b><b>DOWNLOAD</b> ☑ <a href="https://byltly.com/2uKyvV">https://byltly.com/2uKyvV</a></b></p><br /><br />
|
5 |
-
<h2>Types of Cracks in Teeth</h2>
|
6 |
-
<p>There are different types of cracks in teeth, each with different characteristics and implications. Some of the common types are:</p>
|
7 |
-
<ul>
|
8 |
-
<li><strong>Craze lines</strong>: These are very fine cracks on the enamel that do not affect the inner layers of the tooth. They are usually harmless and do not cause any pain or sensitivity. They may appear as vertical lines on the front teeth or horizontal lines on the back teeth. They are more common in adults and may be caused by normal wear and tear or biting habits.</li>
|
9 |
-
<li><strong>Fractured cusp</strong>: This is a crack that occurs around a filling or a crown, usually on the chewing surface of the tooth. It may cause part of the tooth to break off, exposing the dentin or the pulp. It may cause mild to moderate pain or sensitivity, especially when biting or chewing. It may also increase the risk of decay or infection. It usually requires a dental restoration, such as a new filling, crown, or onlay.</li>
|
10 |
-
<li><strong>Cracked tooth</strong>: This is a crack that runs from the chewing surface of the tooth towards the root, but does not separate the tooth into two parts. It may affect one or more cusps of the tooth and may extend below the gum line. It may cause severe pain or sensitivity, especially when biting or releasing pressure. It may also cause swelling, inflammation, or infection of the pulp or the surrounding tissues. It usually requires a root canal treatment and a crown to save the tooth.</li>
|
11 |
-
<li><strong>Split tooth</strong>: This is a crack that splits the tooth into two or more segments. It is usually the result of an untreated cracked tooth that worsens over time. It may cause severe pain or sensitivity, as well as bleeding, infection, or abscess formation. It usually requires extraction of the tooth or removal of some segments and restoration of others.</li>
|
12 |
-
<li><strong>Vertical root fracture</strong>: This is a crack that starts from the root of the tooth and extends upwards towards the crown. It may not be visible on the surface of the tooth and may not cause any symptoms until it becomes infected. It may cause swelling, tenderness, or pus around the affected tooth or gum. It usually requires extraction of the tooth.</li>
|
13 |
-
</ul>
|
14 |
-
<h2>Complications of Cracks in Teeth</h2>
|
15 |
-
<p>Cracks in teeth can cause various complications if left untreated, such as:</p>
|
16 |
-
<ul>
|
17 |
-
<li><strong>Pain and sensitivity</strong>: Cracks in teeth can expose the dentin or pulp to external stimuli, such as temperature changes, pressure, or sugar. This can cause sharp pain or sensitivity that may interfere with eating, drinking, speaking, or sleeping.</li>
|
18 |
-
<li><strong>Decay and infection</strong>: Cracks in teeth can create spaces for bacteria to enter and accumulate, leading to decay and infection. This can damage the tooth structure and pulp, causing pain, inflammation, abscesses, or even systemic infections.</li>
|
19 |
-
<li><strong>Damage to adjacent teeth</strong>: Cracks in teeth can affect the alignment and function of other teeth in the mouth. For example, a cracked tooth may shift out of place and create gaps or crowding with neighboring teeth. A cracked tooth may also damage adjacent or opposing teeth by rubbing against them or biting them too hard.</li>
|
20 |
-
<li><strong>Tooth loss</strong>: Cracks in teeth can compromise the strength and integrity of the tooth, making it more prone to breaking or falling out. In some cases, a cracked tooth may not be salvageable and may need to be extracted.</li>
|
21 |
-
</ul>
|
22 |
-
<h2>Treatment for Cracks in Teeth</h2>
|
23 |
-
<p>The treatment for cracks in teeth depends on several factors, such as:</p>
|
24 |
-
<p></p>
|
25 |
-
<ul>
|
26 |
-
<li>The type and severity of the crack.</li>
|
27 |
-
<li>The location and extent of the crack.</li>
|
28 |
-
<li</p> ddb901b051<br />
|
29 |
-
<br />
|
30 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download AutoCAD 2011 Full Crack 64 Bit and Unleash Your Design Potential.md
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download AutoCAD 2011 Full Crack 64 Bit for Free</h1>
|
3 |
-
<p>If you are looking for a way to download AutoCAD 2011 full crack 64 bit for free, you have come to the right place. AutoCAD is one of the most popular and powerful software for designing and drafting in various fields such as architecture, engineering, construction, and more. However, the official version of AutoCAD 2011 can be quite expensive and not everyone can afford it. That's why many people are looking for a cracked version of AutoCAD 2011 that can run on 64 bit systems.</p>
|
4 |
-
<p>In this article, we will show you how to download AutoCAD 2011 full crack 64 bit for free from a reliable source. We will also provide you with the installation instructions and the activation steps to make sure that your AutoCAD 2011 works properly and without any errors. Follow the steps below and enjoy using AutoCAD 2011 full crack 64 bit for free.</p>
|
5 |
-
<h2>download autocad 2011 full crack 64 bit</h2><br /><p><b><b>DOWNLOAD</b> ✔ <a href="https://byltly.com/2uKzID">https://byltly.com/2uKzID</a></b></p><br /><br />
|
6 |
-
<h2>Step 1: Download AutoCAD 2011 Full Crack 64 Bit</h2>
|
7 |
-
<p>The first step is to download AutoCAD 2011 full crack 64 bit from a reliable source. There are many websites that claim to offer AutoCAD 2011 full crack 64 bit for free, but not all of them are trustworthy. Some of them may contain viruses, malware, or spyware that can harm your computer or steal your personal information. Some of them may also provide fake or corrupted files that will not work or cause errors.</p>
|
8 |
-
<p>That's why we recommend you to download AutoCAD 2011 full crack 64 bit from our website. We have tested and verified that our download link is safe and secure. Our download link will redirect you to a third-party file hosting service where you can download AutoCAD 2011 full crack 64 bit for free. The file size is about 3 GB and it may take some time to download depending on your internet speed.</p>
|
9 |
-
<p>To download AutoCAD 2011 full crack 64 bit from our website, click on the button below:</p>
|
10 |
-
<a href="https://example.com/download-autocad-2011-full-crack-64-bit" target="_blank"><button>Download AutoCAD 2011 Full Crack 64 Bit</button></a>
|
11 |
-
<h2>Step 2: Install AutoCAD 2011 Full Crack 64 Bit</h2>
|
12 |
-
<p>After you have downloaded AutoCAD 2011 full crack 64 bit from our website, you need to install it on your computer. To do that, follow the steps below:</p>
|
13 |
-
<ol>
|
14 |
-
<li>Extract the downloaded file using WinRAR or any other file extraction software.</li>
|
15 |
-
<li>Open the extracted folder and run the setup.exe file as administrator.</li>
|
16 |
-
<li>Follow the installation wizard and choose the language, destination folder, and components you want to install.</li>
|
17 |
-
<li>When the installation is complete, do not run AutoCAD 2011 yet.</li>
|
18 |
-
</ol>
|
19 |
-
<h2>Step 3: Activate AutoCAD 2011 Full Crack 64 Bit</h2>
|
20 |
-
<p>The final step is to activate AutoCAD 2011 full crack 64 bit using the crack file provided in the downloaded folder. To do that, follow the steps below:</p>
|
21 |
-
<p></p>
|
22 |
-
<ol>
|
23 |
-
<li>Open the extracted folder and copy the crack file (xf-acad9-32-BITS.exe or xf-acad9-64-BITS.exe depending on your system) to the installation folder (usually C:\Program Files\Autodesk\AutoCAD LT 2011).</li>
|
24 |
-
<li>Run the crack file as administrator and click on "Patch". You should see a message saying "Successfully patched".</li>
|
25 |
-
<li>Next, click on "Generate" and copy the generated activation code.</li>
|
26 |
-
<li>Now, run AutoCAD 2011 and click on "Activate". You will be asked to enter a serial number and a product key. Use any of these serial numbers and product keys:</li>
|
27 |
-
<ul>
|
28 |
-
<li>Serial number: XXX-XXXXXXXXX</li>
|
29 |
-
<li>Product key: XXXXX-XXXXX-XXXXX-XXXXX-XXXXX</li>
|
30 |
-
</ul>
|
31 |
-
<li>Paste the activation code you copied from the crack file and click on "Next". You should see a message saying "Thank you for activating your Autodesk product".</li>
|
32 |
-
<li>Congratulations! You have successfully installed and activated AutoCAD 2011 full crack 64 bit for free.</li></p> ddb901b051<br />
|
33 |
-
<br />
|
34 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Angels And Demons Movie Dvdrip Torrent.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Angels And Demons Movie Dvdrip Torrent</h2><br /><p><b><b>Download File</b> ►►►►► <a href="https://imgfil.com/2uxXUo">https://imgfil.com/2uxXUo</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Hawa Singh Bollywood Movie: Check out the latest news about Sooraj ... the latest released Bollywood HD Movies, Games and Software directly from Torrent. ... Sanjana is mentally tortured and physically raped by an invisible demon. ... Kadvi Hawa Kickass The soul of Sanjana's father comes as an angel and returns her ... 1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/City Smash MOD APK 1.6 - Experience the Thrill of Destruction.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>City Smash 1.6 Mod Apk: A Physics Playground for Destruction Lovers</h1>
|
3 |
-
Do you love to watch things explode, burn, or collapse? Do you enjoy creating chaos and mayhem in a virtual world? If you answered yes to any of these questions, then you will love City Smash, a simulation game that lets you unleash your destructive fantasies on a city with various weapons and tools. In this article, we will tell you everything you need to know about City Smash, and why you should download the latest mod apk version of the game. <h2>What is City Smash?</h2>
|
4 |
-
City Smash is a physics-based simulation game developed by Parallax Games. It is available for Android devices and can be downloaded from the Google Play Store or from other sources. The game is a sandbox where you can smash a city with a nuclear bomb, missile, black hole, laser beam, lightning, or any other weapon you can think of. You can also use tools like gravity gun, magnet, or earthquake to manipulate the environment and cause more damage. The game has no objectives or missions, it is just a fun way to relax and enjoy the destruction. <h3>Features of City Smash</h3>
|
5 |
-
City Smash has many features that make it an entertaining and addictive game. Some of these features are: - Realistic physics: The game uses a realistic physics engine that simulates the effects of different weapons and tools on the city. You can see buildings collapse, cars fly, people scream, and fire spread as you wreak havoc on the city. - Variety of weapons and tools: The game offers a wide range of weapons and tools that you can use to destroy the city. You can choose from nuclear bombs, missiles, rockets, grenades, black holes, lasers, lightning, meteors, tornadoes, and more. You can also use tools like gravity gun, magnet, earthquake, tsunami, and more to manipulate the environment and create more chaos. - Customizable city: The game allows you to customize the city according to your preferences. You can choose from different themes like modern, medieval, futuristic, or alien. You can also change the size and shape of the city, as well as the number and type of buildings and vehicles. - Dynamic weather: The game has a dynamic weather system that changes according to the time of day and the season. You can see rain, snow, fog, thunderstorms, and more as you play the game. The weather also affects the gameplay and the physics of the city. - Fun sound effects: The game has fun and realistic sound effects that enhance the gameplay experience. You can hear the explosions, crashes, screams, sirens, and more as you destroy the city. <h4>How to play City Smash</h4>
|
6 |
-
City Smash is very easy to play. All you need to do is select a weapon or tool from the menu at the bottom of the screen, and then tap on the city to use it. You can also swipe on the screen to rotate or zoom in or out of the city. You can also pause the game by tapping on the pause button at the top right corner of the screen. There are no rules or limits in City Smash, so you can play as long as you want and have fun. <h2>Why download City Smash 1.6 Mod Apk?</h2>
|
7 |
-
City Smash is a free game that you can download from the Google Play Store or from other sources. However, if you want to enjoy more features and benefits in the game, you should download City Smash 1.6 Mod Apk. This is a modified version of the game that gives you access to unlimited skills and resources in the game. <h3>Benefits of City Smash 1.6 Mod Apk</h3>
|
8 |
-
Some of the benefits of downloading City Smash 1.6 Mod Apk are: - Unlimited skills: With this mod apk version, you can use any weapon or tool in the game without any cooldown or limit. You can also unlock all the weapons and tools in the game for free. - Unlimited resources: With this mod - Unlimited resources: With this mod apk version, you can get unlimited money and gems in the game. You can use these resources to buy more weapons and tools, as well as customize the city and the weather. - No ads: With this mod apk version, you can enjoy the game without any annoying ads or pop-ups. You can play the game without any interruptions or distractions. <h4>How to download and install City Smash 1.6 Mod Apk</h4>
|
9 |
-
If you want to download and install City Smash 1.6 Mod Apk, you need to follow these simple steps: - Step 1: Go to a trusted website that provides the mod apk file for City Smash 1.6. You can search for it on Google or use the link below. - Step 2: Download the mod apk file to your device. Make sure you have enough storage space and a stable internet connection. - Step 3: Enable the installation of apps from unknown sources on your device. You can do this by going to Settings > Security > Unknown Sources and toggling it on. - Step 4: Locate the mod apk file on your device and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to finish. - Step 5: Launch the game and enjoy the unlimited skills and resources. <h2>Conclusion</h2>
|
10 |
-
City Smash is a fun and addictive simulation game that lets you destroy a city with various weapons and tools. It is a physics playground for destruction lovers who want to relax and enjoy the chaos and mayhem. If you want to enhance your gameplay experience, you should download City Smash 1.6 Mod Apk, which gives you unlimited skills and resources, as well as no ads. You can download it from the link below and follow the steps above to install it on your device. <h3>FAQs</h3>
|
11 |
-
Here are some frequently asked questions about City Smash 1.6 Mod Apk: - Q: Is City Smash 1.6 Mod Apk safe to download and install? - A: Yes, City Smash 1.6 Mod Apk is safe to download and install, as long as you get it from a trusted website like the one below. However, you should always be careful when downloading and installing apps from unknown sources, as they may contain viruses or malware that can harm your device. - Q: Is City Smash 1.6 Mod Apk compatible with my device? - A: City Smash 1.6 Mod Apk is compatible with most Android devices that run on Android 4.4 or higher. However, some devices may not support some features or functions of the game, such as the dynamic weather or the realistic physics. - Q: How can I update City Smash 1.6 Mod Apk? - A: To update City Smash 1.6 Mod Apk, you need to download and install the latest version of the mod apk file from the same website where you got it from. You can also check for updates on the website or on the game itself. - Q: How can I uninstall City Smash 1.6 Mod Apk? - A: To uninstall City Smash 1.6 Mod Apk, you need to go to Settings > Apps > City Smash and tap on Uninstall. You can also delete the mod apk file from your device if you want to free up some storage space. - Q: How can I contact the developer of City Smash? - A: If you have any questions, feedback, or suggestions for City Smash, you can contact the developer of the game by sending an email to [email protected] or by visiting their Facebook page. : [City Smash 1.6 Mod Apk Download Link](https://androidtop.net/en/1000-city-smash.html) : [Parallax Games Facebook Page](https://www.facebook.com/parallaxgamesstudio)</p>
|
12 |
-
<h2>city smash 1.6 mod apk</h2><br /><p><b><b>Download File</b> ——— <a href="https://urlin.us/2uSWZK">https://urlin.us/2uSWZK</a></b></p><br /><br /> 197e85843d<br />
|
13 |
-
<br />
|
14 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bad Piggies 2 APK and Join the Green Pigs on an Epic Adventure.md
DELETED
@@ -1,129 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Bad Piggies 2 APK for Android</h1>
|
3 |
-
<p>If you are a fan of the crafty contraption-building arcade puzzle game Bad Piggies, you might be interested in downloading its sequel, Bad Piggies 2, for your Android device. In this article, we will show you what Bad Piggies 2 is, why you should download its APK file, and how to do it from a reliable source like APKCombo.</p>
|
4 |
-
<h2>download bad piggies 2 apk</h2><br /><p><b><b>DOWNLOAD</b> › <a href="https://urlin.us/2uSZbe">https://urlin.us/2uSZbe</a></b></p><br /><br />
|
5 |
-
<h2>What is Bad Piggies 2?</h2>
|
6 |
-
<h3>The sequel to the popular puzzle-strategy game</h3>
|
7 |
-
<p>Bad Piggies 2 is a video game developed by Rovio Entertainment, the creators of Angry Birds. It is the sequel to Bad Piggies, which was released in 2012. In this game, you play as the ingenious green pigs, who are the enemies of the famous Angry Birds. You have to embark on an exciting adventure full of challenges and puzzles to solve. The main goal is to build creative vehicles and machines to help the pigs cross the rugged terrain and reach their goals at each level. You have to use various parts and elements to assemble the vehicles, ensuring they are strong and stable enough to get through any obstacle and reach the finish line safely.</p>
|
8 |
-
<h3>The features and improvements of Bad Piggies 2</h3>
|
9 |
-
<p>Bad Piggies 2 contains several improvements and new features compared to its predecessor. Some of them are:</p>
|
10 |
-
<ul>
|
11 |
-
<li>A new engine under the hood, a fresh coat of paint, and perfectly tuned for high speed action.</li>
|
12 |
-
<li>New characters, levels, and gameplays that keep the experience fresh and exciting.</li>
|
13 |
-
<li>A focus on experimentation and creativity. Each level presents different challenges and conditions that require unique and original solutions. You can unlock new parts and tools as you play and use them to build the wackiest vehicles of all time.</li>
|
14 |
-
<li>A state fair where you can take part in exciting events and complete unique challenges.</li>
|
15 |
-
<li>Regular updates with new levels and surprises to keep the game packed with contraption crafting action.</li>
|
16 |
-
</ul>
|
17 |
-
<h2>Why download Bad Piggies 2 APK?</h2>
|
18 |
-
<h3>The benefits of downloading the APK file</h3>
|
19 |
-
<p>An APK file is an Android Package file that contains all the files and data needed to install an app on an Android device. Downloading an APK file can have some benefits, such as:</p>
|
20 |
-
<ul>
|
21 |
-
<li>You can access apps that are not available in your region or on your device's official app store.</li>
|
22 |
-
<li>You can get updates faster than waiting for them to roll out on your device's official app store.</li>
|
23 |
-
<li>You can install apps that have been modified or customized by third-party developers.</li>
|
24 |
-
<li>You can backup your apps and data in case you need to restore them later.</li>
|
25 |
-
</ul>
|
26 |
-
<h3>The risks and precautions of downloading the APK file</h3>
|
27 |
-
<p>However, downloading an APK file also comes with some risks and precautions, such as:</p>
|
28 |
-
<ul>
|
29 |
-
<li>You may expose your device to malware or viruses that can harm your device or steal your personal information.</li>
|
30 |
-
<li>You may violate - You may violate the terms and conditions of the original app developer or the app store, which can result in legal issues or account suspension.</li>
|
31 |
-
<li>You may encounter compatibility or performance issues with your device or the app itself.</li>
|
32 |
-
</ul>
|
33 |
-
<p>Therefore, you should always be careful and cautious when downloading an APK file. You should only download APK files from trusted and reputable sources, such as APKCombo. You should also scan the APK file with a reliable antivirus software before installing it. You should also enable the option to install apps from unknown sources on your device's settings, but only temporarily and only for the specific app you want to install.</p>
|
34 |
-
<h2>How to download Bad Piggies 2 APK from APKCombo?</h2>
|
35 |
-
<h3>The steps to download and install the APK file</h3>
|
36 |
-
<p>If you want to download Bad Piggies 2 APK from APKCombo, you can follow these simple steps:</p>
|
37 |
-
<p>bad piggies 2 apk free download<br />
|
38 |
-
bad piggies 2 game download for android<br />
|
39 |
-
bad piggies 2 mod apk unlimited money<br />
|
40 |
-
bad piggies 2 latest version apk<br />
|
41 |
-
bad piggies 2 rovio entertainment corporation<br />
|
42 |
-
bad piggies 2 apk download uptodown<br />
|
43 |
-
bad piggies 2 apkcombo<br />
|
44 |
-
bad piggies 2 android game<br />
|
45 |
-
bad piggies 2 puzzle strategy game<br />
|
46 |
-
bad piggies 2 apk file<br />
|
47 |
-
bad piggies 2 offline apk<br />
|
48 |
-
bad piggies 2 hack apk download<br />
|
49 |
-
bad piggies 2 apk pure<br />
|
50 |
-
bad piggies 2 apk mirror<br />
|
51 |
-
bad piggies 2 apk old version<br />
|
52 |
-
bad piggies 2 apk no ads<br />
|
53 |
-
bad piggies 2 full version apk<br />
|
54 |
-
bad piggies 2 apk for pc<br />
|
55 |
-
bad piggies 2 apk obb<br />
|
56 |
-
bad piggies 2 apk revdl<br />
|
57 |
-
bad piggies 2 update apk<br />
|
58 |
-
bad piggies 2 new levels apk<br />
|
59 |
-
bad piggies 2 unlimited coins apk<br />
|
60 |
-
bad piggies 2 premium apk<br />
|
61 |
-
bad piggies 2 cracked apk<br />
|
62 |
-
bad piggies 2 online apk<br />
|
63 |
-
bad piggies 2 mod menu apk<br />
|
64 |
-
bad piggies 2 mega mod apk<br />
|
65 |
-
bad piggies 2 pro apk<br />
|
66 |
-
bad piggies 2 unlocked apk<br />
|
67 |
-
bad piggies 2 cheats apk<br />
|
68 |
-
bad piggies 2 tips and tricks apk<br />
|
69 |
-
bad piggies 2 guide apk<br />
|
70 |
-
bad piggies 2 walkthrough apk<br />
|
71 |
-
bad piggies 2 gameplay apk<br />
|
72 |
-
bad piggies 2 review apk<br />
|
73 |
-
bad piggies 2 features apk<br />
|
74 |
-
bad piggies 2 requirements apk<br />
|
75 |
-
bad piggies 2 size apk<br />
|
76 |
-
bad piggies 2 rating apk<br />
|
77 |
-
bad piggies 2 download link apk<br />
|
78 |
-
how to download bad piggies 2 apk<br />
|
79 |
-
where to download bad piggies 2 apk<br />
|
80 |
-
best site to download bad piggies 2 apk<br />
|
81 |
-
safe download of bad piggies 2 apk<br />
|
82 |
-
fast download of bad piggies 2 apk<br />
|
83 |
-
easy download of bad piggies 2 apk<br />
|
84 |
-
free and secure download of bad piggies 2 apk</p>
|
85 |
-
<ol>
|
86 |
-
<li>Go to the APKCombo website and search for Bad Piggies 2 in the search bar.</li>
|
87 |
-
<li>Select the Bad Piggies 2 app from the list of results and click on the Download APK button.</li>
|
88 |
-
<li>Choose the version and variant of the APK file you want to download and click on the Download button again.</li>
|
89 |
-
<li>Wait for the download to finish and then locate the APK file on your device's storage.</li>
|
90 |
-
<li>Tap on the APK file and follow the instructions to install it on your device.</li>
|
91 |
-
<li>Enjoy playing Bad Piggies 2 on your Android device.</li>
|
92 |
-
</ol>
|
93 |
-
<h3>The table of the latest versions and requirements of Bad Piggies 2 APK</h3>
|
94 |
-
<p>Here is a table that shows the latest versions and requirements of Bad Piggies 2 APK that you can download from APKCombo:</p>
|
95 |
-
<table>
|
96 |
-
<tr><th>Version</th><th>Variant</th><th>Size</th><th>Updated</th><th>Android</th></tr>
|
97 |
-
<tr><td>1.0.0</td><td>arm64-v8a + armeabi-v7a + x86 + x86_64</td><td>99.5 MB</td><td>June 18, 2023</td><td>4.4+</td></tr>
|
98 |
-
<tr><td>1.0.0</td><td>x86_64</td><td>25.9 MB</td><td>June 18, 2023</td><td>4.4+</td></tr>
|
99 |
-
<tr><td>1.0.0</td><td>x86</td><td>25.9 MB</td><td>June 18, 2023</td><td>4.4+</td></tr>
|
100 |
-
<tr><td>1.0.0</td><td>armeabi-v7a</td><td>25.9 MB</td><td>June 18, 2023</td><td>4.4+</td></tr>
|
101 |
-
<tr><td>1.0.0</td><td>arm64-v8a</td><td>25.9 MB</td><td>June 18, 2023</td><td>4.4+</td></tr>
|
102 |
-
<tr><td>Beta Testers Only (Unreleased)</td><td>All Variants</td><td>N/A</td><td>N/A</td><td>N/A</td></tr>
|
103 |
-
</table>
|
104 |
-
<h2>Conclusion</h2>
|
105 |
-
<p>In conclusion, Bad Piggies 2 is a fun and addictive puzzle-strategy game that lets you unleash your creativity and engineering skills to build amazing vehicles and machines for the green pigs. You can download Bad Piggies 2 APK from APKCombo, a safe and reliable source of APK files for Android apps. However, you should always be careful and cautious when downloading an APK file, as it may pose some risks and challenges for your device or the app itself. We hope this article has helped you learn how to download Bad Piggies 2 APK for Android and enjoy playing this awesome game.</p>
|
106 |
-
<h2>FAQs</h2>
|
107 |
-
<h3>Q1: Is Bad Piggies 2 free to play?</h3>
|
108 |
-
<p>A1: Yes, Bad Piggies 2 is free to play, but it may contain some in-app purchases and ads that can enhance your gaming experience or support the developers.</p>
|
109 |
-
<h3>Q2: How many levels are there in Bad Piggies 2?</h3>
|
110 |
-
<p>A2: There are over 200 levels in Bad Piggies 2, each with different objectives, challenges, and rewards. You can also create your own levels using the sandbox mode and share them with other players online.</p>
|
111 |
-
<h3>Q3: Can I play <h3>Q3: Can I play Bad Piggies 2 offline?</h3>
|
112 |
-
<p>A3: Yes, you can play Bad Piggies 2 offline, but you may need an internet connection to access some features, such as the state fair, the sandbox mode, or the online leaderboards.</p>
|
113 |
-
<h3>Q4: What are the alternatives to Bad Piggies 2?</h3>
|
114 |
-
<p>A4: If you are looking for other games similar to Bad Piggies 2, you may want to check out these alternatives:</p>
|
115 |
-
<ul>
|
116 |
-
<li>Angry Birds 2: The sequel to the legendary slingshot game that started it all. You can join the Angry Birds in their epic battle against the Bad Piggies and use your skills and strategy to destroy their structures and save the eggs.</li>
|
117 |
-
<li>Cut the Rope 2: The sequel to the award-winning physics-based puzzle game that features the adorable candy-loving monster Om Nom. You can help him collect candies and stars by cutting ropes, using gadgets, and overcoming obstacles.</li>
|
118 |
-
<li>Where's My Water? 2: The sequel to the hit puzzle game that challenges you to guide water, steam, and mud to Swampy the Alligator and his friends. You can explore new locations, modes, and characters in this fun and addictive game.</li>
|
119 |
-
</ul>
|
120 |
-
<h3>Q5: How can I contact Rovio Entertainment for support or feedback?</h3>
|
121 |
-
<p>A5: If you have any questions, issues, or suggestions regarding Bad Piggies 2 or any other Rovio Entertainment game, you can contact them through their official website, social media channels, or email address. Here are some of their contact details:</p>
|
122 |
-
<ul>
|
123 |
-
<li>Website: https://www.rovio.com/</li>
|
124 |
-
<li>Facebook: https://www.facebook.com/RovioEntertainment</li>
|
125 |
-
<li>Twitter: https://twitter.com/Rovio</li>
|
126 |
-
<li>Email: [email protected]</li>
|
127 |
-
</ul></p> 197e85843d<br />
|
128 |
-
<br />
|
129 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Facebook Reels APK for Android and Watch Fun and Inspiring Videos.md
DELETED
@@ -1,129 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download Facebook Reels on Your Android Device</h1>
|
3 |
-
<p>Facebook Reels are one of the latest features that allow you to create and share short videos with music, effects, stickers, text, and more. They are fun, creative, and engaging ways to express yourself and connect with your friends and followers on Facebook. But what if you want to download your favorite Facebook Reels on your Android device? In this article, we will show you how to do that using an app or a web browser. We will also share some tips and tricks for downloading Facebook Reels efficiently and safely.</p>
|
4 |
-
<h2>facebook reels download app apk</h2><br /><p><b><b>Download</b> ★ <a href="https://jinyurl.com/2uNP7Z">https://jinyurl.com/2uNP7Z</a></b></p><br /><br />
|
5 |
-
<h2>What are Facebook Reels?</h2>
|
6 |
-
<p>Facebook Reels are short videos that you can create on the Facebook app on your mobile device. You can either record a clip (or a series of clips) in real time or upload an existing (or series of existing) pre-recorded video from your phone's gallery. You can then edit and trim your clips, add effects, stickers, text, music, or your own original audio. You can share your Reels with your friends and followers in their core News Feed or in a dedicated Reels section in News Feed that gives people who are new to you the opportunity to discover and enjoy your creations.</p>
|
7 |
-
<h2>Why Download Facebook Reels?</h2>
|
8 |
-
<p>Downloading Facebook Reels can have many benefits. For example, you can:</ <p>- Save your Reels for offline viewing or sharing with others who don't have Facebook - Backup your Reels in case you lose access to your Facebook account or delete them by mistake - Repurpose your Reels for other platforms or projects that require video content - Enjoy your Reels without any interruptions, ads, or buffering issues</p>
|
9 |
-
<h2>How to Create Facebook Reels?</h2>
|
10 |
-
<p>Before you can download Facebook Reels, you need to create them first. Here are the steps to create Facebook Reels on the Facebook app on your Android device:</p>
|
11 |
-
<ol>
|
12 |
-
<li>Open the Facebook app and tap on the camera icon at the top left corner of the screen.</li>
|
13 |
-
<li>Swipe right to access the Reels mode. You will see a variety of tools and options at the bottom of the screen, such as music, speed, effects, timer, and flash.</li>
|
14 |
-
<li>To record a clip, press and hold the capture button. You can also tap it to start and stop recording. You can record multiple clips and stitch them together to make a Reel.</li>
|
15 |
-
<li>To upload a pre-recorded video from your gallery, tap on the gallery icon at the bottom right corner of the screen and select the video you want to use. You can trim and crop the video as needed.</li>
|
16 |
-
<li>Once you have recorded or uploaded your clips, you can edit them using the tools and options at the bottom of the screen. You can add effects, stickers, text, music, or your own original audio. You can also adjust the volume, alignment, and duration of your clips.</li>
|
17 |
-
<li>When you are done editing your Reel, tap on the arrow icon at the bottom right corner of the screen to preview it. You can also add a caption, hashtags, and tags to your Reel.</li>
|
18 |
-
<li>To share your Reel, tap on the share icon at the bottom right corner of the screen. You can choose to share it with your friends and followers in their core News Feed or in a dedicated Reels section in News Feed that gives people who are new to you the opportunity to discover and enjoy your creations. You can also save it to your camera roll or share it to other apps.</li>
|
19 |
-
</ol>
|
20 |
-
<h2>How to Download Facebook Reels?</h2>
|
21 |
-
<p>There are two main ways to download Facebook Reels on your Android device: using an app or using a web browser. We will explain both methods in detail below.</p>
|
22 |
-
<h3>Easy Downloader for Facebook</h3>
|
23 |
-
<p>Easy Downloader for Facebook is an app that allows you to download Facebook videos, including Reels, with ease. It has a simple and user-friendly interface that lets you browse, watch, and download Facebook videos in high quality. It also supports downloading multiple videos at once and has a built-in video player and gallery.</p>
|
24 |
-
<h4>How to Install Easy Downloader for Facebook?</h4>
|
25 |
-
<p>To install Easy Downloader for Facebook on your Android device, you need to download its APK file from a trusted source like APKCombo. Here are the steps to do that:</p>
|
26 |
-
<p>facebook reels video downloader app apk<br />
|
27 |
-
how to download facebook reels on android app apk<br />
|
28 |
-
facebook reels creator app apk download<br />
|
29 |
-
best app to download facebook reels videos apk<br />
|
30 |
-
facebook reels app apk free download for android<br />
|
31 |
-
download facebook reels music app apk<br />
|
32 |
-
facebook reels editor app apk download<br />
|
33 |
-
easy downloader for facebook reels apk<br />
|
34 |
-
facebook reels maker app apk download<br />
|
35 |
-
facebook reels app apk latest version download<br />
|
36 |
-
download facebook reels without watermark app apk<br />
|
37 |
-
facebook reels downloader app mod apk<br />
|
38 |
-
facebook reels app apk download for pc<br />
|
39 |
-
facebook reels saver app apk download<br />
|
40 |
-
facebook reels app apk old version download<br />
|
41 |
-
download facebook reels with sound app apk<br />
|
42 |
-
facebook reels downloader app premium apk<br />
|
43 |
-
facebook reels app apk download for ios<br />
|
44 |
-
facebook reels converter app apk download<br />
|
45 |
-
facebook reels app apk pro download<br />
|
46 |
-
download facebook reels in hd app apk<br />
|
47 |
-
facebook reels downloader app cracked apk<br />
|
48 |
-
facebook reels app apk download for windows 10<br />
|
49 |
-
facebook reels compressor app apk download<br />
|
50 |
-
facebook reels app apk no watermark download<br />
|
51 |
-
download facebook reels to mp4 app apk<br />
|
52 |
-
facebook reels downloader app unlocked apk<br />
|
53 |
-
facebook reels app apk download for mac<br />
|
54 |
-
facebook reels cutter app apk download<br />
|
55 |
-
facebook reels app apk full version download<br />
|
56 |
-
download facebook reels to gallery app apk<br />
|
57 |
-
facebook reels downloader app ad free apk<br />
|
58 |
-
facebook reels app apk download for laptop<br />
|
59 |
-
facebook reels merger app apk download<br />
|
60 |
-
facebook reels app apk hack download<br />
|
61 |
-
download facebook reels to mp3 app apk<br />
|
62 |
-
facebook reels downloader app paid apk<br />
|
63 |
-
facebook reels app apk download for chromebook<br />
|
64 |
-
facebook reels splitter app apk download<br />
|
65 |
-
facebook reels app apk update download</p>
|
66 |
-
<ol>
|
67 |
-
<li>Open your web browser and go to <a href="">APKCombo</a>.</li>
|
68 |
-
<li>Type "Easy Downloader for Facebook" in the search box and hit enter.</li>
|
69 |
-
<li>Select the app from the search results and tap on the download button.</li>
|
70 |
-
<li>Wait for the APK file to download on your device.</li>
|
71 |
-
<li>Once downloaded, open the APK file and follow the instructions to install the app.</li>
|
72 |
-
<li>You may need to enable unknown sources in your settings to allow the installation of apps from sources other than Google Play Store.</li>
|
73 |
-
</ol>
|
74 |
-
<h4>How to Use Easy Downloader for Facebook?</h4>
|
75 |
-
<p>To use Easy Downloader for Facebook to download Facebook Reels on your Android device, follow these steps:</p>
|
76 |
-
<ol>
|
77 |
-
<li>Open Easy Downloader for Facebook app and log in with your Facebook account.</li>
|
78 |
-
<li>Browse through your News Feed or search for the Reel you want to download.</li>
|
79 |
-
<li>Tap on the Reel and then tap on the download icon at the top right corner of the screen.</li>
|
80 |
-
<li>Select the format and quality you want to download and tap on OK.</li>
|
81 |
-
<li>The Reel will start downloading on your device. You can check the progress in the notification bar or in the app's download manager.</li>
|
82 |
-
<li>Once downloaded, you can find the Reel in your device's gallery or in the app's video player.</li>
|
83 |
-
</ol>
|
84 |
-
<h2>How to Download Facebook Reels without an App?</h2>
|
85 |
-
<p>If you don't want to use an app to download Facebook Reels on your Android device, you can use a web browser instead. There are many websites that allow you to download Facebook videos, including Reels, with ease. One of them is FB Video Saver.</p>
|
86 |
-
<h3>FB Video Saver</h3>
|
87 |
-
<p>FB Video Saver is a website that allows you to download Facebook videos, including Reels, for free. It has a simple and user-friendly interface that lets you paste the URL of the video you want to download and choose the format and quality you prefer. It also supports downloading videos from other platforms like Instagram, Twitter, TikTok, and YouTube.</p>
|
88 |
-
<h4>How to Use FB Video Saver?</h4>
|
89 |
-
<p>To use FB Video Saver to download Facebook Reels on your Android device, follow these steps:</p>
|
90 |
-
<ol>
|
91 |
-
<li>Open your web browser and go to <a href="">FB Video Saver</a>.</li>
|
92 |
-
<li>Open the Facebook app and find the Reel you want to download.</li>
|
93 |
-
<li>Tap on the three-dot icon at the top right corner of the Reel and select Copy Link.</li>
|
94 |
-
<li>Go back to FB Video Saver and paste the link in the search box and hit enter.</li>
|
95 |
-
<li>Select the format and quality you want to download and tap on Download.</li>
|
96 |
-
<li>The Reel will start downloading on your device. You can check the progress in the notification bar or in your browser's download manager.</li>
|
97 |
-
<li>Once downloaded, you can find the Reel in your device's gallery or in your browser's video player.</li>
|
98 |
-
</ol>
|
99 |
-
<h2>Tips and Tricks for Downloading Facebook Reels</h2>
|
100 |
-
<p>Downloading Facebook Reels can be easy and fun, but there are some things you need to keep in mind to make sure you do it efficiently and safely. Here are some tips and tricks for downloading Facebook Reels:</p>
|
101 |
-
<h3>Check the Permissions and Privacy Settings</h3>
|
102 |
-
<p>Before you download any app or website to download Facebook Reels, make sure you check the permissions and privacy settings they require. Some apps or websites may ask for access to your contacts, location, camera, microphone, or other sensitive information that may compromise your security or privacy. Make sure you only grant permissions that are necessary and relevant for the app or website to function properly. You can also check the reviews and ratings of the app or website to see if other users have reported any issues or concerns.</p>
|
103 |
-
<h3>Choose the Right Format and Quality</h3>
|
104 |
-
<p>When you download Facebook Reels, you may have the option to choose the format and quality of the video. The format refers to the type of file that the video is saved as, such as MP4, AVI, or MOV. The quality refers to the resolution and bitrate of the video, such as 720p, 1080p, or 4K. The format and quality of the video affect its size, compatibility, and performance. Generally, you want to choose a format that is compatible with your device and a quality that is high enough for your viewing preference. However, you also need to consider your storage space and internet speed. Higher quality videos take up more space on your device and require more bandwidth to download. You can use a table like this one to compare different formats and qualities:</p>
|
105 |
-
| Format | Quality | Size (per minute) | Compatibility | | --- | --- | --- | --- | | MP4 | 720p | 15 MB | Most devices | | MP4 | 1080p | 30 MB | Most devices | | MP4 | 4K | 375 MB | Some devices | | AVI | 720p | 25 MB | Some devices | | AVI | 1080p | 50 MB | Some devices | | AVI | 4K | 625 MB | Some devices | | MOV | 720p | 20 MB | Some devices | | MOV | 1080p | 40 MB | Some devices | | MOV | 4K | 500 MB | Some devices | <h3>Manage Your Storage Space</h3>
|
106 |
-
<p>Downloading Facebook Reels can take up a lot of space on your device, especially if you download many videos or high-quality videos. To avoid running out of space or slowing down your device, you need to manage your storage space regularly. You can do this by:</p>
|
107 |
-
<ul>
|
108 |
-
<li>Deleting any unwanted or duplicate videos from your device or moving them to an external storage device or cloud service.</li>
|
109 |
-
<li>Clearing any cache or temporary files from your device or browser that may accumulate over time.</li>
|
110 |
-
<li>Using a storage manager app or tool that can help you analyze and optimize your storage space.</li>
|
111 |
-
</ul>
|
112 |
-
<h2>Conclusion</h2>
|
113 |
-
<p>In conclusion, Facebook Reels are a great way to create and share short videos with music, effects, stickers, text, and more. They are fun, creative, and engaging ways to express yourself and connect with your friends and followers on Facebook. But if you want to download your favorite Facebook Reels on your Android device, you can use an app like Easy Downloader for Facebook or a website like FB Video Saver. You can also follow some tips and tricks to download Facebook Reels efficiently and safely. We hope this article has helped you learn how to download Facebook Reels on your Android device. If you have any questions or feedback, please let us know in the comments below.</p>
|
114 |
-
<h2>FAQs</h2>
|
115 |
-
<p>Here are some frequently asked questions about downloading Facebook Reels:</p>
|
116 |
-
<ol>
|
117 |
-
<li>How long can a Facebook Reel be?</li>
|
118 |
-
<p>A Facebook Reel can be up to 60 seconds long.</p>
|
119 |
-
<li>Can I download Facebook Reels from other users?</li>
|
120 |
-
<p>Yes, you can download Facebook Reels from other users as long as they have made them public or shared them with you. However, you should respect their privacy and intellectual property rights and only use their Reels for personal and non-commercial purposes.</p>
|
121 |
-
<li>Can I download Facebook Reels on my iPhone or iPad?</li>
|
122 |
-
<p>Yes, you can download Facebook Reels on your iPhone or iPad using a similar method as described above. You can use an app like Video Downloader for Facebook or a website like SaveFrom.net to download Facebook Reels on your iOS device.</p>
|
123 |
-
<li>Can I edit or remix the Facebook Reels I download?</li>
|
124 |
-
<p>Yes, you can edit or remix the Facebook Reels you download using any video editing app or tool of your choice. However, you should always give credit to the original creator and respect their creative rights and preferences.</p>
|
125 |
-
<li>Can I share the Facebook Reels I download with others?</li>
|
126 |
-
<p>Yes, you can share the Facebook Reels you download with others as long as you do not violate any terms of service, privacy policies, or copyright laws. You can share them via email, messaging apps, social media platforms, or any other means of communication.</p>
|
127 |
-
</ol></p> 401be4b1e0<br />
|
128 |
-
<br />
|
129 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Slug it Out 1 APK OBB 2.8.9 for Free - The Best Slugslinger Game for Android Devices.md
DELETED
@@ -1,114 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Slug it Out 1 APK Y OBB 2.8.9: How to Download and Play the Popular Puzzle Game</h1>
|
3 |
-
<p>If you are a fan of the anime series Slugterra, you might want to try out Slug it Out 1, a fast-paced action puzzle game that lets you collect and battle with slugs. However, if you have an Android device that runs on version 7.0 or above, you might encounter some problems with installing and playing the game. That's why you need to download and install Slug it Out 1 APK Y OBB 2.8.9 files, which are compatible with older versions of Android.</p>
|
4 |
-
<p>In this article, we will explain what Slug it Out 1 is, why you need APK and OBB files to play it, how to download and install them, how to play the game, and some tips and tricks to help you become the best slugslinger of all time.</p>
|
5 |
-
<h2>slug it out 1 apk y obb 2.8.9</h2><br /><p><b><b>Download</b> –––––>>> <a href="https://jinyurl.com/2uNLg2">https://jinyurl.com/2uNLg2</a></b></p><br /><br />
|
6 |
-
<h2>What is Slug it Out 1?</h2>
|
7 |
-
<h3>A fast-paced action puzzle game based on the animated series Slugterra</h3>
|
8 |
-
<p>Slug it Out 1 is a game developed by Epic Story Interactive, based on the popular animated television series Slugterra. The series follows the adventures of Eli Shane, a young slugslinger who travels to an underground world called Slugterra, where he collects and trains slugs, which are magical creatures that can transform into powerful beasts when fired from a blaster.</p>
|
9 |
-
<h3>The gameplay and features of Slug it Out 1</h3>
|
10 |
-
<p>The game is a match-3 puzzle game, where you have to match tiles on the playing board to power up your slugs and blast your opponents. You can collect new slugs and unlock slug powers as you play through story mode or challenge mode. You can also combine two slugs for an incredible fusion shot or ghoul your slugs for extra power.</p>
|
11 |
-
<p>The game features all of your favourite characters and villains from the series, such as Eli Shane, Kord Zane, Pronto, Trixie Sting, Dr. Blakk, El Diablos Nacho, Twist, Tad, Billy Blasteroid, Locke & Lode, John Bull & Bludgeon, Quentin & Quartzite, Shockwire & Shorty.</p>
|
12 |
-
<p>The game also has a variety of slugs to collect and use in battle <h2>Tips and tricks for Slug it Out 1</h2>
|
13 |
-
<p>Now that you know how to download, install, and play Slug it Out 1, you might want to learn some tips and tricks to improve your skills and have more fun. Here are some of them:</p>
|
14 |
-
<h3>Choose the best slugs for each round</h3>
|
15 |
-
<p>Not all slugs are created equal. Some slugs have more power, speed, or special abilities than others. You should choose the slugs that suit your play style and the situation. For example, if you need to deal a lot of damage quickly, you might want to use an Infurnus, a Blastipede, or a Pyringo. If you need to heal yourself or your slugs, you might want to use a Boon Doc, a Ping, or a Narwhaddle. If you need to defend yourself from enemy attacks, you might want to use a Bubbaleone, a Tempesto, or a Crystalyd.</p>
|
16 |
-
<p>You can also check the stats and powers of each slug in the slugpedia, which you can access from the main menu. You can see the damage, speed, recharge time, and ability of each slug. You can also upgrade your slugs by spending coins or gems to make them stronger and faster.</p>
|
17 |
-
<h3>Combine two slugs for a fusion shot</h3>
|
18 |
-
<p>One of the coolest features of Slug it Out 1 is the fusion shot. This is when you combine two slugs of the same element to create a powerful blast that can deal massive damage or have special effects. For example, if you combine two fire slugs, such as an Infurnus and a Flaringo, you will create a fireball that can burn your enemies. If you combine two light slugs, such as a Glimmer and a Midas, you will create a beam of light that can heal you and give you coins.</p>
|
19 |
-
<p>slug it out 1 apk y obb 2.8.9 download<br />
|
20 |
-
slug it out 1 apk y obb 2.8.9 mod<br />
|
21 |
-
slug it out 1 apk y obb 2.8.9 free<br />
|
22 |
-
slug it out 1 apk y obb 2.8.9 latest version<br />
|
23 |
-
slug it out 1 apk y obb 2.8.9 android<br />
|
24 |
-
slug it out 1 apk y obb 2.8.9 offline<br />
|
25 |
-
slug it out 1 apk y obb 2.8.9 unlimited money<br />
|
26 |
-
slug it out 1 apk y obb 2.8.9 update<br />
|
27 |
-
slug it out 1 apk y obb 2.8.9 hack<br />
|
28 |
-
slug it out 1 apk y obb 2.8.9 full<br />
|
29 |
-
slug it out 1 apk y obb 2.8.9 mega<br />
|
30 |
-
slug it out 1 apk y obb 2.8.9 mediafire<br />
|
31 |
-
slug it out 1 apk y obb 2.8.9 gameplay<br />
|
32 |
-
slug it out 1 apk y obb 2.8.9 review<br />
|
33 |
-
slug it out 1 apk y obb 2.8.9 cheats<br />
|
34 |
-
slug it out 1 apk y obb 2.8.9 install<br />
|
35 |
-
slug it out 1 apk y obb 2.8.9 guide<br />
|
36 |
-
slug it out 1 apk y obb 2.8.9 tips<br />
|
37 |
-
slug it out 1 apk y obb 2.8.9 tricks<br />
|
38 |
-
slug it out 1 apk y obb 2.8.9 tutorial<br />
|
39 |
-
slug it out 1 apk y obb 2.8.9 features<br />
|
40 |
-
slug it out 1 apk y obb 2.8.9 requirements<br />
|
41 |
-
slug it out 1 apk y obb 2.8.9 size<br />
|
42 |
-
slug it out 1 apk y obb 2.8.9 link<br />
|
43 |
-
slug it out 1 apk y obb 2.8.9 direct<br />
|
44 |
-
slug it out 1 apk y obb 2.8.9 no root<br />
|
45 |
-
slug it out 1 apk y obb 2.8.9 online<br />
|
46 |
-
slug it out 1 apk y obb 2.8.9 new slugs<br />
|
47 |
-
slug it out 1 apk y obb 2.8.9 best slugs<br />
|
48 |
-
slug it out 1 apk y obb 2.8.9 fusion shots<br />
|
49 |
-
slug it out 1 apk y obb 2.8.9 ghoul slugs<br />
|
50 |
-
slug it out 1 apk y obb 2.8.9 story mode<br />
|
51 |
-
slug it out 1 apk y obb 2.8.9 challenge mode<br />
|
52 |
-
slug it out 1 apk y obb 2.8.9 special items<br />
|
53 |
-
slug it out 1 apk y obb 2.8.9 blaster mods<br />
|
54 |
-
slug it out 1 apk y obb 2.8.9 characters<br />
|
55 |
-
slug it out 1 apk y obb 2.8.9 villains<br />
|
56 |
-
slug it out 1 apk y obb 2</p>
|
57 |
-
<p>To perform a fusion shot, you need to match four tiles of the same color on the board. This will charge up both slugs of that element in your blaster. Then, tap both slug icons at the same time to fire them together. You can also see the fusion shot icon on the top right corner of the screen when it is available.</p>
|
58 |
-
<h3>Ghoul your slugs for extra power</h3>
|
59 |
-
<p>Ghouls are corrupted versions of slugs that have been exposed to dark water by Dr. Blakk. They are more powerful and aggressive than normal slugs, but they also have some drawbacks. They are harder to control, they can harm their own slingers, and they can lose their powers if they are purified by light or water.</p>
|
60 |
-
<p>You can ghoul your own slugs in Slug it Out 1 by using special items called ghoul jars. These items can be bought from the store or earned from story mode or challenge mode. To use them, you need to match five tiles of the same color on the board. This will charge up one slug of that element in your blaster and turn it into a ghoul. Then, tap the ghoul icon to fire it at your enemies.</p>
|
61 |
-
<p>Ghouls have different powers and appearances than normal slugs. For example, a Darkfurnus is a ghoul version of an Infurnus that can shoot dark fireballs that explode on impact. A Goon Doc is a ghoul version of a Boon Doc that can drain the health of your enemies and transfer it to you.</p>
|
62 |
-
<h3>Use special items to boost your gameplay</h3>
|
63 |
-
<p>Besides slugs and ghouls, you can also use special items to enhance your gameplay in Slug it Out 1. These items can be bought from the store or earned from story mode or challenge mode. They include:</p>
|
64 |
-
<table>
|
65 |
-
<tr>
|
66 |
-
<th>Item</th>
|
67 |
-
<th>Effect</th>
|
68 |
-
</tr>
|
69 |
-
<tr>
|
70 |
-
<td>Blaster Mod</td>
|
71 |
-
<td>Increases the damage of your blaster shots by 10%</td>
|
72 |
-
</tr>
|
73 |
-
<tr>
|
74 |
-
<td>Slug Charger</td>
|
75 |
-
<td>Increases the speed of your slugs by 10%</td>
|
76 |
-
</tr>
|
77 |
-
<tr>
|
78 |
-
<td>Coin Doubler</td>
|
79 |
-
<td>Doubles the amount of coins you earn from each round</td>
|
80 |
-
</tr>
|
81 |
-
<tr>
|
82 |
-
<td>Gem Finder</td>
|
83 |
-
<td>Increases the chance of finding gems on the board by 10%</td>
|
84 |
-
</tr>
|
85 |
-
<tr>
|
86 |
-
<td>Chest Key</td>
|
87 |
-
<td>Unlocks a chest that contains coins, gems, or slugs</td>
|
88 |
-
</tr>
|
89 |
-
<tr>
|
90 |
-
<td>Mystery Box</td>
|
91 |
-
<td>Gives you a random item or slug</td>
|
92 |
-
</tr>
|
93 |
-
<tr>
|
94 |
-
<td>New Character</td>
|
95 |
-
<td>Lets you play as a different character from the series, such as Kord Zane or Trixie Sting</td>
|
96 |
-
</tr>
|
97 |
-
</table>
|
98 |
-
<p>You can use these items by tapping their icons on the bottom of the screen. You can also see how many of each item you have on the top left corner of the screen. You can only use one item per round, so choose wisely.</p>
|
99 |
-
<h2>Conclusion</h2>
|
100 |
-
<p>Slug it Out 1 is a fun and addictive game that will appeal to fans of Slugterra and puzzle games alike. You can download and install Slug it Out 1 APK Y OBB 2.8.9 files to play the game on your Android device, even if it does not support the latest version of Android. You can also follow our tips and tricks to improve your gameplay and become a master slugslinger.</p>
|
101 |
-
<p>We hope you enjoyed this article and found it helpful. If you have any questions or feedback, feel free to leave a comment below. Happy slugging!</p>
|
102 |
-
<h2>FAQs</h2>
|
103 |
-
<h3>Q: Is Slug it Out 1 free to play?</h3>
|
104 |
-
<p>A: Yes, Slug it Out 1 is free to download and play, but it contains in-app purchases that allow you to buy coins, gems, slugs, items, and other features.</p>
|
105 |
-
<h3>Q: Is Slug it Out 1 safe to download and install?</h3>
|
106 |
-
<p>A: Yes, as long as you download and install the APK and OBB files from a reliable source, such as [APKPure] or [APKCombo]. You should also scan the files with an antivirus app before installing them.</p>
|
107 |
-
<h3>Q: How can I update Slug it Out 1?</h3>
|
108 |
-
<p>A: If you have downloaded and installed Slug it Out 1 APK Y OBB 2.8.9 files, you will not be able to update the game from the Google Play Store. You will have to download and install the latest version of the APK and OBB files from the same source you used before.</p>
|
109 |
-
<h3>Q: How can I backup my progress in Slug it Out 1?</h3>
|
110 |
-
<p>A: You can backup your progress in Slug it Out 1 by using a cloud service, such as Google Drive or Dropbox. You will need to copy and paste the folder named "com.nerdcorps.slugitout" from your device's internal storage or SD card to your cloud service. You can also use an app like [Helium] or [Titanium Backup] to backup your data.</p>
|
111 |
-
<h3>Q: How can I contact the developers of Slug it Out 1?</h3>
|
112 |
-
<p>A: You can contact the developers of Slug it Out 1 by sending an email to [email protected] or visiting their website at https://www.epicstoryinteractive.com/.</p> 401be4b1e0<br />
|
113 |
-
<br />
|
114 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download WhatsApp Messenger on Laptop A Simple Guide.md
DELETED
@@ -1,96 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download WhatsApp Messenger on Your Laptop</h1>
|
3 |
-
<p>WhatsApp Messenger is one of the most popular and widely used messaging apps in the world. It allows you to send text and voice messages, make voice and video calls, share images, documents, locations, and other content with your contacts for free. All you need is an internet connection and a phone number to use WhatsApp Messenger.</p>
|
4 |
-
<h2>download whatsapp messenger on laptop</h2><br /><p><b><b>Download File</b> ……… <a href="https://jinyurl.com/2uNKmU">https://jinyurl.com/2uNKmU</a></b></p><br /><br />
|
5 |
-
<p>But did you know that you can also use WhatsApp Messenger on your laptop? Yes, you can access your WhatsApp chats and calls from your computer without having to pick up your phone. This can be very convenient and productive, especially if you work or study on your laptop most of the time.</p>
|
6 |
-
<p>In this article, we will show you how to download WhatsApp Messenger on your laptop using two different methods: WhatsApp Web and WhatsApp Desktop App. We will also explain the benefits and drawbacks of using WhatsApp Messenger on your laptop, and provide some tips and recommendations for a better experience.</p>
|
7 |
-
<h2>How to Download WhatsApp Messenger on Your Laptop</h2>
|
8 |
-
<p>There are two ways to use WhatsApp Messenger on your laptop: through a web browser or through a desktop app. Both methods require you to have an active account on your phone and a stable internet connection on both devices.</p>
|
9 |
-
<h3>Option 1: Use WhatsApp Web</h3>
|
10 |
-
<p>WhatsApp Web is the easiest and fastest way to use WhatsApp Messenger on your laptop. It is a web-based version of the app that you can access from any browser. Here is how to use it:</p>
|
11 |
-
<ol>
|
12 |
-
<li>Open your web browser and go to <a href="(^6^)">web.whatsapp.com</a>.</li>
|
13 |
-
<li>On your phone, open the WhatsApp app, tap Menu or Settings and select Linked Devices.</li>
|
14 |
-
<li>Tap on Link a device and point your phone's camera to the QR code displayed on the web page.</li>
|
15 |
-
<li>Once the QR code is scanned, you will see your WhatsApp chats and contacts on your laptop screen.</li>
|
16 |
-
<li>You can now use WhatsApp Web features such as sending and receiving messages and media, making voice and video calls, creating group chats, changing settings, etc.</li>
|
17 |
-
</ol>
|
18 |
-
<p>However, there are some limitations of using WhatsApp Web. For example:</p>
|
19 |
-
<p>How to install whatsapp desktop app on windows 10<br />
|
20 |
-
Whatsapp web login on laptop browser<br />
|
21 |
-
Whatsapp for mac os x download free<br />
|
22 |
-
Whatsapp messenger for pc windows 8.1<br />
|
23 |
-
Whatsapp desktop app vs whatsapp web<br />
|
24 |
-
Download whatsapp for laptop without phone<br />
|
25 |
-
Whatsapp video call on laptop windows 10<br />
|
26 |
-
Whatsapp desktop app not working on mac<br />
|
27 |
-
Whatsapp web scan qr code on laptop<br />
|
28 |
-
Whatsapp for laptop windows 7 32 bit<br />
|
29 |
-
Whatsapp desktop app dark mode<br />
|
30 |
-
Whatsapp web download for pc windows 10<br />
|
31 |
-
Whatsapp for macbook air free download<br />
|
32 |
-
Whatsapp desktop app update<br />
|
33 |
-
Whatsapp web logout from laptop<br />
|
34 |
-
Whatsapp for laptop without bluestacks<br />
|
35 |
-
Whatsapp desktop app notifications not working<br />
|
36 |
-
Whatsapp web voice message on laptop<br />
|
37 |
-
Whatsapp for macbook pro download<br />
|
38 |
-
Whatsapp desktop app keyboard shortcuts<br />
|
39 |
-
Whatsapp web status update on laptop<br />
|
40 |
-
Whatsapp for laptop windows 10 64 bit<br />
|
41 |
-
Whatsapp desktop app privacy settings<br />
|
42 |
-
Whatsapp web video download on laptop<br />
|
43 |
-
Whatsapp for mac os catalina download<br />
|
44 |
-
Whatsapp desktop app backup chat history<br />
|
45 |
-
Whatsapp web stickers on laptop<br />
|
46 |
-
Whatsapp for laptop windows xp free download<br />
|
47 |
-
Whatsapp desktop app change profile picture<br />
|
48 |
-
Whatsapp web send documents on laptop<br />
|
49 |
-
Whatsapp for mac os mojave download<br />
|
50 |
-
Whatsapp desktop app delete messages<br />
|
51 |
-
Whatsapp web mute chats on laptop<br />
|
52 |
-
Whatsapp for laptop windows 8 free download<br />
|
53 |
-
Whatsapp desktop app create group chat<br />
|
54 |
-
Whatsapp web view contacts on laptop<br />
|
55 |
-
Whatsapp for mac os sierra download<br />
|
56 |
-
Whatsapp desktop app block contacts<br />
|
57 |
-
Whatsapp web archive chats on laptop<br />
|
58 |
-
Whatsapp for laptop windows 7 free download full version</p>
|
59 |
-
<ul>
|
60 |
-
<li>You need to keep your phone connected to the internet at all times. If your phone loses connection or battery, you will not be able to use WhatsApp Web.</li>
|
61 |
-
<li>You cannot use some features that are available on the phone app, such as deleting messages for everyone, changing your profile picture or status, blocking contacts, etc.</li>
|
62 |
-
<li>You may face some security risks if you use WhatsApp Web on a public or shared computer. Anyone who has access to the computer can see your chats and media. To prevent this, you should always log out of WhatsApp Web when you are done using it.</li>
|
63 |
-
</ul>
|
64 |
-
<h3>Option 2: Use WhatsApp Desktop App</h3>
|
65 |
-
<p>WhatsApp Desktop App is another way to use WhatsApp Messenger on your laptop. It is a standalone application that you can download and install from the official website or from the Microsoft Store or Apple App Store. Here is how to use it:</p>
|
66 |
-
<ol>
|
67 |
-
<li>In your computer's browser, go to <a href ">whatsapp.com/download</a> and choose the version that matches your operating system (Windows or Mac).</li>
|
68 |
-
<li>Download the file and run the installer to install the WhatsApp Desktop App on your laptop.</li>
|
69 |
-
<li>Open the WhatsApp Desktop App and scan the QR code with your phone, just like you did for WhatsApp Web.</li>
|
70 |
-
<li>Once the QR code is scanned, you will see your WhatsApp chats and contacts on your laptop screen.</li>
|
71 |
-
<li>You can now use WhatsApp Desktop App features such as sending and receiving messages and media, making voice and video calls, creating group chats, changing settings, etc.</li>
|
72 |
-
</ol>
|
73 |
-
<p>The WhatsApp Desktop App has some advantages over WhatsApp Web. For example:</p>
|
74 |
-
<ul>
|
75 |
-
<li>You do not need to open a browser and go to a web page every time you want to use WhatsApp on your laptop. You can simply launch the app from your desktop or taskbar.</li>
|
76 |
-
<li>You can use some features that are not available on WhatsApp Web, such as deleting messages for everyone, changing your profile picture or status, blocking contacts, etc.</li>
|
77 |
-
<li>You can get notifications and alerts on your laptop even when the app is not open or active.</li>
|
78 |
-
<li>You can use keyboard shortcuts and emojis to enhance your messaging experience.</li>
|
79 |
-
</ul>
|
80 |
-
<h2>Conclusion</h2>
|
81 |
-
<p>WhatsApp Messenger is a great app for staying in touch with your friends, family, and colleagues. You can use it not only on your phone, but also on your laptop. You can choose between WhatsApp Web or WhatsApp Desktop App, depending on your preference and convenience. Both methods are easy and fast to set up and use.</p>
|
82 |
-
<p>However, you should also be aware of the limitations and risks of using WhatsApp on your laptop. You should always keep your phone connected to the internet, log out of WhatsApp Web when using a public or shared computer, and update the app regularly to avoid any issues or security breaches.</p>
|
83 |
-
<p>We hope this article has helped you learn how to download WhatsApp Messenger on your laptop. If you have any questions or feedback, please let us know in the comments below. Happy chatting!</p>
|
84 |
-
<h2>FAQs</h2>
|
85 |
-
<h3>What are the system requirements for using WhatsApp Messenger on your laptop?</h3>
|
86 |
-
<p>To use WhatsApp Web, you need a web browser that supports JavaScript and HTML5, such as Chrome, Firefox, Safari, Edge, or Opera. To use WhatsApp Desktop App, you need Windows 8.1 or newer, or Mac OS X 10.10 or newer. You also need at least 4 GB of RAM and 200 MB of free disk space.</p>
|
87 |
-
<h3>Is WhatsApp Messenger safe and secure to use?</h3>
|
88 |
-
<p>WhatsApp Messenger uses end-to-end encryption to protect your messages and calls from being intercepted or accessed by anyone else. This means that only you and the person you are communicating with can read or listen to your conversations. However, you should also be careful about who you share your personal information with, what links you click on, and what files you download from unknown sources.</p>
|
89 |
-
<h3>Can I use WhatsApp Messenger on multiple devices at the same time?</h3>
|
90 |
-
<p>You can use WhatsApp Messenger on one phone and one laptop at the same time. However, you cannot use it on more than one phone or more than one laptop simultaneously. If you try to link another device with your account, you will be logged out of the previous one.</p>
|
91 |
-
<h3>How can I backup and restore my WhatsApp chats and media on my laptop?</h3>
|
92 |
-
<p>You can backup and restore your WhatsApp chats and media on your laptop using Google Drive (for Windows) or iCloud (for Mac). To backup your data, go to Settings > Chats > Chat Backup and choose how often you want to backup your data. To restore your data, uninstall and reinstall the WhatsApp Desktop App and follow the instructions to restore from Google Drive or iCloud.</p>
|
93 |
-
<h3>How can I update WhatsApp Messenger on my laptop?</h3>
|
94 |
-
<p>To update WhatsApp Web, you just need to refresh the web page in your browser. To update WhatsApp Desktop App, you need to download the latest version from <a href="">whatsapp.com/download</a> or from the Microsoft Store or Apple App Store. You will be notified when a new update is available.</p> 401be4b1e0<br />
|
95 |
-
<br />
|
96 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Experience the thrill of being a bus driver in American Bus Driving Simulator for PC.md
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download American Bus Driving Simulator for PC</h1>
|
3 |
-
<h2>Introduction</h2>
|
4 |
-
<p>American Bus Driving Simulator is a simulation game that lets you experience what it's like to be a bus driver in the USA. You can drive various bus models, such as city buses, electric buses, and school buses, across three different regions: US, Germany, and Brazil. You can also test your skills in career mode, freeride mode, or multiplayer mode with friends.</p>
|
5 |
-
<h2>download american bus driving simulator</h2><br /><p><b><b>DOWNLOAD</b> ✸✸✸ <a href="https://jinyurl.com/2uNMnR">https://jinyurl.com/2uNMnR</a></b></p><br /><br />
|
6 |
-
<p>If you want to play this game on your PC, you might be wondering how to download it and what are the system requirements. In this article, we will show you how to download American Bus Driving Simulator for PC using BlueStacks, a popular Android emulator that allows you to run Android apps and games on your computer. We will also tell you what are the features and benefits of playing this game on PC with BlueStacks.</p>
|
7 |
-
<h2>How to Download American Bus Driving Simulator for PC with BlueStacks</h2>
|
8 |
-
<p>To download American Bus Driving Simulator for PC with BlueStacks, you need to follow these simple steps:</p>
|
9 |
-
<ol>
|
10 |
-
<li>Download and install BlueStacks on your PC from the official website .</li>
|
11 |
-
<li>Launch BlueStacks and sign in with your Google account.</li>
|
12 |
-
<li>Search for American Bus Driving Simulator in the search bar on the top right corner.</li>
|
13 |
-
<li>Click on the game icon and then click on Install.</li>
|
14 |
-
<li>Wait for the installation to finish and then click on Open.</li>
|
15 |
-
<li>Enjoy playing American Bus Driving Simulator on your PC with BlueStacks!</li>
|
16 |
-
</ol>
|
17 |
-
<h2>Why Play American Bus Driving Simulator on PC with BlueStacks?</h2>
|
18 |
-
<p>Playing American Bus Driving Simulator on PC with BlueStacks has many advantages over playing it on your mobile device. Here are some of them:</p>
|
19 |
-
<ul>
|
20 |
-
<li>You can enjoy a bigger and better screen that enhances the graphics and details of the game.</li>
|
21 |
-
<li>You can use your keyboard and mouse or a gamepad to control the bus more easily and accurately.</li>
|
22 |
-
<li>You can customize the key mapping and sensitivity according to your preferences.</li>
|
23 |
-
<li>You can use the Multi-Instance feature to play multiple games or accounts at the same time.</li>
|
24 |
-
<li>You can use the Eco Mode feature to save battery and CPU resources while running the game in the background.</li>
|
25 |
-
<li>You can use the Screen Recorder feature to record your gameplay and share it with others.</li>
|
26 |
-
<li>You can use the Macro feature to automate repetitive tasks and actions in the game.</li>
|
27 |
-
</ul>
|
28 |
-
<h2>Conclusion</h2>
|
29 |
-
<p>American Bus Driving Simulator is a fun and realistic simulation game that lets you drive various buses across different cities and regions. You can play it on your PC with BlueStacks, an Android emulator that offers many features and benefits that enhance your gaming experience. To download American Bus Driving Simulator for PC with BlueStacks, you just need to follow the steps we mentioned above. Try it out today and see for yourself how much fun it is to be a bus driver!</p>
|
30 |
-
<h2>FAQs</h2>
|
31 |
-
<h3>Q: Is American Bus Driving Simulator free to play?</h3>
|
32 |
-
<p>A: Yes, American Bus Driving Simulator is free to play. However, it contains ads and in-app purchases that you can disable or buy if you want.</p>
|
33 |
-
<h3>Q: Can I play American Bus Driving Simulator offline?</h3>
|
34 |
-
<p>A: Yes, you can play American Bus Driving Simulator offline. However, some features, such as multiplayer mode, require an internet connection.</p>
|
35 |
-
<h3>Q: How can I update American Bus Driving Simulator on PC?</h3>
|
36 |
-
<p>A: To update American Bus Driving Simulator on PC, you need to launch BlueStacks and go to My Games tab. Then, click on the game icon and then click on Update. Wait for the update to finish and then click on Open.</p>
|
37 |
-
<h3>Q: How can I contact the developer of American Bus Driving Simulator?</h3>
|
38 |
-
<p>A: You can contact the developer of American Bus Driving Simulator by sending an email to [email protected] or visiting their Facebook page .</p>
|
39 |
-
<p>How to download american bus driving simulator for free<br />
|
40 |
-
American bus driving simulator game download for pc<br />
|
41 |
-
Best american bus driving simulator download sites<br />
|
42 |
-
Download american bus driving simulator mod apk<br />
|
43 |
-
American bus driving simulator 2023 download<br />
|
44 |
-
Download american bus driving simulator for android<br />
|
45 |
-
American bus driving simulator online no download<br />
|
46 |
-
Download american bus driving simulator for windows 10<br />
|
47 |
-
American bus driving simulator full version download<br />
|
48 |
-
Download american bus driving simulator for mac<br />
|
49 |
-
American bus driving simulator download for laptop<br />
|
50 |
-
Download american bus driving simulator offline<br />
|
51 |
-
American bus driving simulator free download for ios<br />
|
52 |
-
Download american bus driving simulator pro<br />
|
53 |
-
American bus driving simulator download with crack<br />
|
54 |
-
Download american bus driving simulator latest version<br />
|
55 |
-
American bus driving simulator download for chromebook<br />
|
56 |
-
Download american bus driving simulator hack<br />
|
57 |
-
American bus driving simulator download without ads<br />
|
58 |
-
Download american bus driving simulator 3d<br />
|
59 |
-
American bus driving simulator download for linux<br />
|
60 |
-
Download american bus driving simulator unlimited money<br />
|
61 |
-
American bus driving simulator download with multiplayer<br />
|
62 |
-
Download american bus driving simulator premium<br />
|
63 |
-
American bus driving simulator download with realistic graphics<br />
|
64 |
-
Download american bus driving simulator cheats<br />
|
65 |
-
American bus driving simulator download for ps4<br />
|
66 |
-
Download american bus driving simulator update<br />
|
67 |
-
American bus driving simulator download for xbox one<br />
|
68 |
-
Download american bus driving simulator tips and tricks<br />
|
69 |
-
American bus driving simulator download for nintendo switch<br />
|
70 |
-
Download american bus driving simulator walkthrough<br />
|
71 |
-
American bus driving simulator download with custom buses<br />
|
72 |
-
Download american bus driving simulator review<br />
|
73 |
-
American bus driving simulator download with different routes<br />
|
74 |
-
Download american bus driving simulator gameplay<br />
|
75 |
-
American bus driving simulator download with traffic rules<br />
|
76 |
-
Download american bus driving simulator guide<br />
|
77 |
-
American bus driving simulator download with weather effects<br />
|
78 |
-
Download american bus driving simulator tutorial<br />
|
79 |
-
American bus driving simulator download with voice chat<br />
|
80 |
-
Download american bus driving simulator system requirements<br />
|
81 |
-
American bus driving simulator download with controller support<br />
|
82 |
-
Download american bus driving simulator steam key<br />
|
83 |
-
American bus driving simulator download with sound effects<br />
|
84 |
-
Download american bus driving simulator coupon code<br />
|
85 |
-
American bus driving simulator download with achievements<br />
|
86 |
-
Download american bus driving simulator trailer</p>
|
87 |
-
<h3>Q: What are some other bus simulator games that I can play on PC?</h3>
|
88 |
-
<p>A: Some other bus simulator games that you can play on PC are Bus Driving Sim 22 and Bus Simulator 21 Next Stop . Both games offer realistic physics, graphics, and gameplay that will make you feel like a real bus driver.</p> 197e85843d<br />
|
89 |
-
<br />
|
90 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2023Liu2023/bingo/src/components/ui/select.tsx
DELETED
@@ -1,123 +0,0 @@
|
|
1 |
-
'use client'
|
2 |
-
|
3 |
-
import * as React from 'react'
|
4 |
-
import * as SelectPrimitive from '@radix-ui/react-select'
|
5 |
-
|
6 |
-
import { cn } from '@/lib/utils'
|
7 |
-
import {
|
8 |
-
IconArrowDown,
|
9 |
-
IconCheck,
|
10 |
-
IconChevronUpDown
|
11 |
-
} from '@/components/ui/icons'
|
12 |
-
|
13 |
-
const Select = SelectPrimitive.Root
|
14 |
-
|
15 |
-
const SelectGroup = SelectPrimitive.Group
|
16 |
-
|
17 |
-
const SelectValue = SelectPrimitive.Value
|
18 |
-
|
19 |
-
const SelectTrigger = React.forwardRef<
|
20 |
-
React.ElementRef<typeof SelectPrimitive.Trigger>,
|
21 |
-
React.ComponentPropsWithoutRef<typeof SelectPrimitive.Trigger>
|
22 |
-
>(({ className, children, ...props }, ref) => (
|
23 |
-
<SelectPrimitive.Trigger
|
24 |
-
ref={ref}
|
25 |
-
className={cn(
|
26 |
-
'flex h-9 w-full items-center justify-between rounded-md border border-input bg-transparent px-3 py-2 text-sm shadow ring-offset-background placeholder:text-muted-foreground focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2 disabled:cursor-not-allowed disabled:opacity-50',
|
27 |
-
className
|
28 |
-
)}
|
29 |
-
{...props}
|
30 |
-
>
|
31 |
-
{children}
|
32 |
-
<SelectPrimitive.Icon asChild>
|
33 |
-
<IconChevronUpDown className="opacity-50" />
|
34 |
-
</SelectPrimitive.Icon>
|
35 |
-
</SelectPrimitive.Trigger>
|
36 |
-
))
|
37 |
-
SelectTrigger.displayName = SelectPrimitive.Trigger.displayName
|
38 |
-
|
39 |
-
const SelectContent = React.forwardRef<
|
40 |
-
React.ElementRef<typeof SelectPrimitive.Content>,
|
41 |
-
React.ComponentPropsWithoutRef<typeof SelectPrimitive.Content>
|
42 |
-
>(({ className, children, position = 'popper', ...props }, ref) => (
|
43 |
-
<SelectPrimitive.Portal>
|
44 |
-
<SelectPrimitive.Content
|
45 |
-
ref={ref}
|
46 |
-
className={cn(
|
47 |
-
'relative z-50 min-w-[8rem] overflow-hidden rounded-md border bg-popover text-popover-foreground shadow-md animate-in fade-in-80',
|
48 |
-
position === 'popper' && 'translate-y-1',
|
49 |
-
className
|
50 |
-
)}
|
51 |
-
position={position}
|
52 |
-
{...props}
|
53 |
-
>
|
54 |
-
<SelectPrimitive.Viewport
|
55 |
-
className={cn(
|
56 |
-
'p-1',
|
57 |
-
position === 'popper' &&
|
58 |
-
'h-[var(--radix-select-trigger-height)] w-full min-w-[var(--radix-select-trigger-width)]'
|
59 |
-
)}
|
60 |
-
>
|
61 |
-
{children}
|
62 |
-
</SelectPrimitive.Viewport>
|
63 |
-
</SelectPrimitive.Content>
|
64 |
-
</SelectPrimitive.Portal>
|
65 |
-
))
|
66 |
-
SelectContent.displayName = SelectPrimitive.Content.displayName
|
67 |
-
|
68 |
-
const SelectLabel = React.forwardRef<
|
69 |
-
React.ElementRef<typeof SelectPrimitive.Label>,
|
70 |
-
React.ComponentPropsWithoutRef<typeof SelectPrimitive.Label>
|
71 |
-
>(({ className, ...props }, ref) => (
|
72 |
-
<SelectPrimitive.Label
|
73 |
-
ref={ref}
|
74 |
-
className={cn('py-1.5 pl-8 pr-2 text-sm font-semibold', className)}
|
75 |
-
{...props}
|
76 |
-
/>
|
77 |
-
))
|
78 |
-
SelectLabel.displayName = SelectPrimitive.Label.displayName
|
79 |
-
|
80 |
-
const SelectItem = React.forwardRef<
|
81 |
-
React.ElementRef<typeof SelectPrimitive.Item>,
|
82 |
-
React.ComponentPropsWithoutRef<typeof SelectPrimitive.Item>
|
83 |
-
>(({ className, children, ...props }, ref) => (
|
84 |
-
<SelectPrimitive.Item
|
85 |
-
ref={ref}
|
86 |
-
className={cn(
|
87 |
-
'relative flex w-full cursor-default select-none items-center rounded-sm py-1.5 pl-8 pr-2 text-sm outline-none focus:bg-accent focus:text-accent-foreground data-[disabled]:pointer-events-none data-[disabled]:opacity-50',
|
88 |
-
className
|
89 |
-
)}
|
90 |
-
{...props}
|
91 |
-
>
|
92 |
-
<span className="absolute left-2 flex h-3.5 w-3.5 items-center justify-center">
|
93 |
-
<SelectPrimitive.ItemIndicator>
|
94 |
-
<IconCheck className="h-4 w-4" />
|
95 |
-
</SelectPrimitive.ItemIndicator>
|
96 |
-
</span>
|
97 |
-
<SelectPrimitive.ItemText>{children}</SelectPrimitive.ItemText>
|
98 |
-
</SelectPrimitive.Item>
|
99 |
-
))
|
100 |
-
SelectItem.displayName = SelectPrimitive.Item.displayName
|
101 |
-
|
102 |
-
const SelectSeparator = React.forwardRef<
|
103 |
-
React.ElementRef<typeof SelectPrimitive.Separator>,
|
104 |
-
React.ComponentPropsWithoutRef<typeof SelectPrimitive.Separator>
|
105 |
-
>(({ className, ...props }, ref) => (
|
106 |
-
<SelectPrimitive.Separator
|
107 |
-
ref={ref}
|
108 |
-
className={cn('-mx-1 my-1 h-px bg-muted', className)}
|
109 |
-
{...props}
|
110 |
-
/>
|
111 |
-
))
|
112 |
-
SelectSeparator.displayName = SelectPrimitive.Separator.displayName
|
113 |
-
|
114 |
-
export {
|
115 |
-
Select,
|
116 |
-
SelectGroup,
|
117 |
-
SelectValue,
|
118 |
-
SelectTrigger,
|
119 |
-
SelectContent,
|
120 |
-
SelectLabel,
|
121 |
-
SelectItem,
|
122 |
-
SelectSeparator
|
123 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2ndelement/voicevox/voicevox_engine/utility/path_utility.py
DELETED
@@ -1,51 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import sys
|
3 |
-
import traceback
|
4 |
-
from pathlib import Path
|
5 |
-
|
6 |
-
from appdirs import user_data_dir
|
7 |
-
|
8 |
-
|
9 |
-
def engine_root() -> Path:
|
10 |
-
if is_development():
|
11 |
-
root_dir = Path(__file__).parents[2]
|
12 |
-
|
13 |
-
# Nuitka/Pyinstallerでビルドされている場合
|
14 |
-
else:
|
15 |
-
root_dir = Path(sys.argv[0]).parent
|
16 |
-
|
17 |
-
return root_dir.resolve(strict=True)
|
18 |
-
|
19 |
-
|
20 |
-
def is_development() -> bool:
|
21 |
-
"""
|
22 |
-
開発版かどうか判定する関数
|
23 |
-
Nuitka/Pyinstallerでコンパイルされていない場合は開発環境とする。
|
24 |
-
"""
|
25 |
-
# nuitkaビルドをした際はグローバルに__compiled__が含まれる
|
26 |
-
if "__compiled__" in globals():
|
27 |
-
return False
|
28 |
-
|
29 |
-
# pyinstallerでビルドをした際はsys.frozenが設定される
|
30 |
-
elif getattr(sys, "frozen", False):
|
31 |
-
return False
|
32 |
-
|
33 |
-
return True
|
34 |
-
|
35 |
-
|
36 |
-
def get_save_dir():
|
37 |
-
# FIXME: ファイル保存場所をエンジン固有のIDが入ったものにする
|
38 |
-
# FIXME: Windowsは`voicevox-engine/voicevox-engine`ディレクトリに保存されているので
|
39 |
-
# `VOICEVOX/voicevox-engine`に変更する
|
40 |
-
if is_development():
|
41 |
-
app_name = "voicevox-engine-dev"
|
42 |
-
else:
|
43 |
-
app_name = "voicevox-engine"
|
44 |
-
return Path(user_data_dir(app_name))
|
45 |
-
|
46 |
-
|
47 |
-
def delete_file(file_path: str) -> None:
|
48 |
-
try:
|
49 |
-
os.remove(file_path)
|
50 |
-
except OSError:
|
51 |
-
traceback.print_exc()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/generate_human_motion/pyrender/tests/__init__.py
DELETED
File without changes
|
spaces/AILab-CVC/SEED-LLaMA/models/__init__.py
DELETED
File without changes
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov6/__init__.py
DELETED
File without changes
|
spaces/AUST001/ChatGPT/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: ChatGPT
|
3 |
-
emoji: 🐠
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: green
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.18.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: openrail
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aashir01/Live_Transcription/processing_whisper.py
DELETED
@@ -1,145 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
|
3 |
-
import numpy as np
|
4 |
-
from transformers import WhisperProcessor
|
5 |
-
|
6 |
-
|
7 |
-
class WhisperPrePostProcessor(WhisperProcessor):
|
8 |
-
def chunk_iter_with_batch(self, inputs, chunk_len, stride_left, stride_right, batch_size):
|
9 |
-
inputs_len = inputs.shape[0]
|
10 |
-
step = chunk_len - stride_left - stride_right
|
11 |
-
|
12 |
-
all_chunk_start_idx = np.arange(0, inputs_len, step)
|
13 |
-
num_samples = len(all_chunk_start_idx)
|
14 |
-
|
15 |
-
num_batches = math.ceil(num_samples / batch_size)
|
16 |
-
batch_idx = np.array_split(np.arange(num_samples), num_batches)
|
17 |
-
|
18 |
-
for i, idx in enumerate(batch_idx):
|
19 |
-
chunk_start_idx = all_chunk_start_idx[idx]
|
20 |
-
|
21 |
-
chunk_end_idx = chunk_start_idx + chunk_len
|
22 |
-
|
23 |
-
chunks = [inputs[chunk_start:chunk_end] for chunk_start, chunk_end in zip(chunk_start_idx, chunk_end_idx)]
|
24 |
-
processed = self.feature_extractor(
|
25 |
-
chunks, sampling_rate=self.feature_extractor.sampling_rate, return_tensors="np"
|
26 |
-
)
|
27 |
-
|
28 |
-
_stride_left = np.where(chunk_start_idx == 0, 0, stride_left)
|
29 |
-
is_last = np.where(stride_right > 0, chunk_end_idx > inputs_len, chunk_end_idx >= inputs_len)
|
30 |
-
_stride_right = np.where(is_last, 0, stride_right)
|
31 |
-
|
32 |
-
chunk_lens = [chunk.shape[0] for chunk in chunks]
|
33 |
-
strides = [
|
34 |
-
(int(chunk_l), int(_stride_l), int(_stride_r))
|
35 |
-
for chunk_l, _stride_l, _stride_r in zip(chunk_lens, _stride_left, _stride_right)
|
36 |
-
]
|
37 |
-
|
38 |
-
yield {"stride": strides, **processed}
|
39 |
-
|
40 |
-
def preprocess_batch(self, inputs, chunk_length_s=0, stride_length_s=None, batch_size=None):
|
41 |
-
stride = None
|
42 |
-
if isinstance(inputs, dict):
|
43 |
-
stride = inputs.pop("stride", None)
|
44 |
-
# Accepting `"array"` which is the key defined in `datasets` for
|
45 |
-
# better integration
|
46 |
-
if not ("sampling_rate" in inputs and ("raw" in inputs or "array" in inputs)):
|
47 |
-
raise ValueError(
|
48 |
-
"When passing a dictionary to FlaxWhisperPipline, the dict needs to contain a "
|
49 |
-
'"raw" or "array" key containing the numpy array representing the audio, and a "sampling_rate" key '
|
50 |
-
"containing the sampling rate associated with the audio array."
|
51 |
-
)
|
52 |
-
|
53 |
-
_inputs = inputs.pop("raw", None)
|
54 |
-
if _inputs is None:
|
55 |
-
# Remove path which will not be used from `datasets`.
|
56 |
-
inputs.pop("path", None)
|
57 |
-
_inputs = inputs.pop("array", None)
|
58 |
-
in_sampling_rate = inputs.pop("sampling_rate")
|
59 |
-
inputs = _inputs
|
60 |
-
|
61 |
-
if in_sampling_rate != self.feature_extractor.sampling_rate:
|
62 |
-
try:
|
63 |
-
import librosa
|
64 |
-
except ImportError as err:
|
65 |
-
raise ImportError(
|
66 |
-
"To support resampling audio files, please install 'librosa' and 'soundfile'."
|
67 |
-
) from err
|
68 |
-
|
69 |
-
inputs = librosa.resample(
|
70 |
-
inputs, orig_sr=in_sampling_rate, target_sr=self.feature_extractor.sampling_rate
|
71 |
-
)
|
72 |
-
ratio = self.feature_extractor.sampling_rate / in_sampling_rate
|
73 |
-
else:
|
74 |
-
ratio = 1
|
75 |
-
|
76 |
-
if not isinstance(inputs, np.ndarray):
|
77 |
-
raise ValueError(f"We expect a numpy ndarray as input, got `{type(inputs)}`.")
|
78 |
-
if len(inputs.shape) != 1:
|
79 |
-
raise ValueError(
|
80 |
-
f"We expect a single channel audio input for the Flax Whisper API, got {len(inputs.shape)} channels."
|
81 |
-
)
|
82 |
-
|
83 |
-
if stride is not None:
|
84 |
-
if stride[0] + stride[1] > inputs.shape[0]:
|
85 |
-
raise ValueError("Stride is too large for input.")
|
86 |
-
|
87 |
-
# Stride needs to get the chunk length here, it's going to get
|
88 |
-
# swallowed by the `feature_extractor` later, and then batching
|
89 |
-
# can add extra data in the inputs, so we need to keep track
|
90 |
-
# of the original length in the stride so we can cut properly.
|
91 |
-
stride = (inputs.shape[0], int(round(stride[0] * ratio)), int(round(stride[1] * ratio)))
|
92 |
-
|
93 |
-
if chunk_length_s:
|
94 |
-
if stride_length_s is None:
|
95 |
-
stride_length_s = chunk_length_s / 6
|
96 |
-
|
97 |
-
if isinstance(stride_length_s, (int, float)):
|
98 |
-
stride_length_s = [stride_length_s, stride_length_s]
|
99 |
-
|
100 |
-
chunk_len = round(chunk_length_s * self.feature_extractor.sampling_rate)
|
101 |
-
stride_left = round(stride_length_s[0] * self.feature_extractor.sampling_rate)
|
102 |
-
stride_right = round(stride_length_s[1] * self.feature_extractor.sampling_rate)
|
103 |
-
|
104 |
-
if chunk_len < stride_left + stride_right:
|
105 |
-
raise ValueError("Chunk length must be superior to stride length.")
|
106 |
-
|
107 |
-
for item in self.chunk_iter_with_batch(
|
108 |
-
inputs,
|
109 |
-
chunk_len,
|
110 |
-
stride_left,
|
111 |
-
stride_right,
|
112 |
-
batch_size,
|
113 |
-
):
|
114 |
-
yield item
|
115 |
-
else:
|
116 |
-
processed = self.feature_extractor(
|
117 |
-
inputs, sampling_rate=self.feature_extractor.sampling_rate, return_tensors="np"
|
118 |
-
)
|
119 |
-
if stride is not None:
|
120 |
-
processed["stride"] = stride
|
121 |
-
yield processed
|
122 |
-
|
123 |
-
def postprocess(self, model_outputs, return_timestamps=None, return_language=None):
|
124 |
-
# unpack the outputs from list(dict(list)) to list(dict)
|
125 |
-
model_outputs = [dict(zip(output, t)) for output in model_outputs for t in zip(*output.values())]
|
126 |
-
|
127 |
-
time_precision = self.feature_extractor.chunk_length / 1500 # max source positions = 1500
|
128 |
-
# Send the chunking back to seconds, it's easier to handle in whisper
|
129 |
-
sampling_rate = self.feature_extractor.sampling_rate
|
130 |
-
for output in model_outputs:
|
131 |
-
if "stride" in output:
|
132 |
-
chunk_len, stride_left, stride_right = output["stride"]
|
133 |
-
# Go back in seconds
|
134 |
-
chunk_len /= sampling_rate
|
135 |
-
stride_left /= sampling_rate
|
136 |
-
stride_right /= sampling_rate
|
137 |
-
output["stride"] = chunk_len, stride_left, stride_right
|
138 |
-
|
139 |
-
text, optional = self.tokenizer._decode_asr(
|
140 |
-
model_outputs,
|
141 |
-
return_timestamps=return_timestamps,
|
142 |
-
return_language=return_language,
|
143 |
-
time_precision=time_precision,
|
144 |
-
)
|
145 |
-
return {"text": text, **optional}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/agentverse/memory_manipulator/basic.py
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
from abc import abstractmethod
|
2 |
-
from typing import Dict, List
|
3 |
-
|
4 |
-
from pydantic import BaseModel, Field
|
5 |
-
|
6 |
-
from agentverse.message import Message
|
7 |
-
from agentverse.memory_manipulator import BaseMemoryManipulator
|
8 |
-
from . import memory_manipulator_registry
|
9 |
-
|
10 |
-
@memory_manipulator_registry.register("basic")
|
11 |
-
class BasicMemoryManipulator(BaseMemoryManipulator):
|
12 |
-
|
13 |
-
def manipulate_memory(self) -> None:
|
14 |
-
pass
|
15 |
-
|
16 |
-
def reset(self) -> None:
|
17 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/clock-plugin.js
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
import Clock from './clock.js';
|
2 |
-
|
3 |
-
class ClockPlugin extends Phaser.Plugins.BasePlugin {
|
4 |
-
|
5 |
-
constructor(pluginManager) {
|
6 |
-
super(pluginManager);
|
7 |
-
}
|
8 |
-
|
9 |
-
start() {
|
10 |
-
var eventEmitter = this.game.events;
|
11 |
-
eventEmitter.on('destroy', this.destroy, this);
|
12 |
-
}
|
13 |
-
|
14 |
-
add(scene, config) {
|
15 |
-
return new Clock(scene, config);
|
16 |
-
}
|
17 |
-
|
18 |
-
}
|
19 |
-
|
20 |
-
export default ClockPlugin;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ainterface/compare-gpt-models/app.py
DELETED
@@ -1,121 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import requests
|
3 |
-
import openai
|
4 |
-
import logging
|
5 |
-
|
6 |
-
# 设置OpenAI API密钥
|
7 |
-
openai.api_key = st.secrets["OPENAI_API_KEY"]
|
8 |
-
WELM_SECRET = st.secrets["WELM_SECRET"]
|
9 |
-
|
10 |
-
# 准备一些 prompt 的例子
|
11 |
-
def examples():
|
12 |
-
st.write('''<style>
|
13 |
-
[data-testid="column"] {
|
14 |
-
min-width: 1rem !important;
|
15 |
-
}
|
16 |
-
</style>''', unsafe_allow_html=True)
|
17 |
-
columns = st.columns(4)
|
18 |
-
with columns[0]:
|
19 |
-
if st.button('个性对话生成'):
|
20 |
-
st.session_state['prompt'] = "李白,字太白,号⻘莲居士,又号“谪仙人”,唐代伟大的浪漫主义 诗人,被后人誉为“诗仙”。\n我:今天我们穿越时空连线李白,请问李白你爱喝酒吗? 李白:当然。花间一壶酒,独酌无相亲。举杯邀明月,对影成三人。 \n我:你觉得杜甫怎么样? \n李白:他很仰慕我,但他有属于自己的⻛采。 \n我:你为何能如此逍遥? \n李白:天生我材必有用,千金散尽还复来!\n我:你去过哪些地方?\n李白:"
|
21 |
-
with columns[1]:
|
22 |
-
if st.button('开放问题回答'):
|
23 |
-
st.session_state['prompt'] = '请根据所学知识回答下面这个问题\n问题:百年孤独的作者是?\n回答:加西亚·马尔克斯\n问题:二战转折点是?\n回答:'
|
24 |
-
with columns[2]:
|
25 |
-
if st.button('文本风格转换'):
|
26 |
-
st.session_state['prompt'] = "有这样一段文本,{医生微笑着递给小明棒棒糖,同时让小明服下了药。}\n改写这段话让它变得更加惊悚。{医生眼露凶光让小明服药,小明感到非常害怕}。\n\n有这样一段文本,{雨下得很大}\n改写这段话让它变得更加具体。{一霎时,雨点连成了线,大雨就像天塌了似的铺天盖地从空中倾泻下来。}。\n\n有这样一段文本,{王老师离开了电影院,外面已经天黑了}\n改写这段话让它包含更多电影信息。{这部电影比小王预想的时间要长,虽然口碑很好,但离开电影院时,小王还是有些失望。}\n\n有这样一段文本,{男人站在超市外面打电话}\n改写这段话来描述小丑。{男人站在马戏团外一边拿着气球一边打电话}\n\n有这样一段文本,{风铃声响起}\n改写这段话写的更加丰富。{我对这个风铃的感情是由它的铃声引起的。每当风吹来时,风铃发出非常动听的声音,听起来是那么乐观、豁达,像一个小女孩格格的笑声。}\n\n有这样一段文本,{我想家了}\n改写这段话包含更多悲伤的感情。{"
|
27 |
-
with columns[3]:
|
28 |
-
if st.button('文本续写'):
|
29 |
-
st.session_state['prompt'] = "中国地大物博,自然⻛光秀丽,大自然的⻤斧神工造就了许多动人心魄的美景,"
|
30 |
-
|
31 |
-
# 定义completion函数
|
32 |
-
def completion(model_engine, prompt, max_tokens, temperature, top_p, top_k, n, stop_tokens):
|
33 |
-
if model_engine == "davinci-003":
|
34 |
-
model = "text-davinci-003"
|
35 |
-
|
36 |
-
answer = openai.Completion.create(
|
37 |
-
model=model, prompt=prompt, temperature=temperature, max_tokens=max_tokens, top_p=top_p, n=n,
|
38 |
-
stop=[" Human:", " AI:"], frequency_penalty=0, presence_penalty=0.6,
|
39 |
-
)
|
40 |
-
for idx, choice in enumerate(answer['choices']):
|
41 |
-
text = choice['text']
|
42 |
-
st.success(f'生成结果#{idx}: ')
|
43 |
-
st.write(text)
|
44 |
-
st.json(answer, expanded=False)
|
45 |
-
|
46 |
-
elif model_engine == "WeLM":
|
47 |
-
resp = requests.post("https://welm.weixin.qq.com/v1/completions", json={
|
48 |
-
'prompt': prompt,
|
49 |
-
'max_tokens': max_tokens,
|
50 |
-
'temperature': temperature,
|
51 |
-
'top_p': top_p,
|
52 |
-
'top_k': top_k,
|
53 |
-
'n': n,
|
54 |
-
'model': 'xl',
|
55 |
-
"stop": stop_tokens,
|
56 |
-
}, headers={"Authorization": f"Bearer {WELM_SECRET}"})
|
57 |
-
answer = resp.json()
|
58 |
-
|
59 |
-
for idx, choice in enumerate(answer['choices']):
|
60 |
-
if choice.get("finish_reason", None) != "finished":
|
61 |
-
st.error(f'生成结果#{idx}出错: {choice["finish_reason"]}')
|
62 |
-
elif choice.get("text", None) is None:
|
63 |
-
st.error(f'生成结果#{idx}出错: internal error')
|
64 |
-
else:
|
65 |
-
text = choice.get("text", "")
|
66 |
-
# text = cut_message(text)
|
67 |
-
if len(text) == 0:
|
68 |
-
st.info(f'生成结果#{idx}: 结果为空,可能的原因:生成的第一个字符为stop字符,请合理配置prompt或stop。比如,在prompt后追加"某某:"')
|
69 |
-
else:
|
70 |
-
st.success(f'生成结果#{idx}: ')
|
71 |
-
st.write(text)
|
72 |
-
st.json(answer, expanded=False)
|
73 |
-
|
74 |
-
|
75 |
-
# Streamlit应用程序
|
76 |
-
def app():
|
77 |
-
# 左侧栏
|
78 |
-
st.sidebar.title("参数设置")
|
79 |
-
model_engine_list = st.sidebar.multiselect("请选择要对比的模型", ["davinci-003", "WeLM"], default=["davinci-003", "WeLM"])
|
80 |
-
default_top_p = 0.95
|
81 |
-
default_top_k = 0
|
82 |
-
default_temperature = 0.85
|
83 |
-
default_n = 3
|
84 |
-
default_tokens = 256
|
85 |
-
temperature = st.sidebar.slider("Temperature", 0.0, 1.0, default_temperature, 0.01)
|
86 |
-
top_p = st.sidebar.slider('Top p', 0.0, 1.0, default_top_p)
|
87 |
-
top_k = st.sidebar.slider('Top k', 0, 100, default_top_k)
|
88 |
-
n = st.sidebar.slider('n', 1, 5, default_n)
|
89 |
-
max_tokens = st.sidebar.slider('max tokens', 4, 512, default_tokens)
|
90 |
-
stop_tokens = ""
|
91 |
-
if st.sidebar.checkbox("使用换行符作为截断", value=True):
|
92 |
-
stop_tokens = "\n"
|
93 |
-
|
94 |
-
# 主界面
|
95 |
-
st.title("对比不同模型生成文本的效果")
|
96 |
-
st.text('Tips: ')
|
97 |
-
st.text("* Davinci是是GPT-3语言生成模型,可以一定程度上理解用户的指令")
|
98 |
-
st.text("* WeLM不是一个直接的对话机器人,而是一个补全用户输入信息的生成模型")
|
99 |
-
st.text("* 因此 prompt 需要经过一定的设计,才能有比较好的效果")
|
100 |
-
st.text("* 修改Prompt可以更多参考 https://welm.weixin.qq.com/docs/introduction/ 或者使用下方的例子")
|
101 |
-
|
102 |
-
examples()
|
103 |
-
prompt = st.text_area("请输入Prompt:", key="prompt")
|
104 |
-
if st.button("生成"):
|
105 |
-
if prompt.strip() == '':
|
106 |
-
st.error("请输入内容")
|
107 |
-
st.stop()
|
108 |
-
columns = st.columns(len(model_engine_list))
|
109 |
-
for col, model_engine in zip(columns, model_engine_list):
|
110 |
-
with col:
|
111 |
-
st.subheader(model_engine)
|
112 |
-
with st.spinner("正在生成中..."):
|
113 |
-
completion(model_engine, prompt, max_tokens, temperature, top_p, top_k, n, stop_tokens)
|
114 |
-
pf = prompt.replace('\n','\\n')
|
115 |
-
logging.info(f"n={n},T={temperature},top_p={top_p},top_k={top_k},token={max_tokens},m={model_engine_list},p={pf}")
|
116 |
-
|
117 |
-
if __name__ == '__main__':
|
118 |
-
st.set_page_config(
|
119 |
-
page_title="对比不同模型生成文本的效果", layout="wide", initial_sidebar_state="auto",
|
120 |
-
)
|
121 |
-
app()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/finetune_speaker_v2.py
DELETED
@@ -1,321 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
-
import argparse
|
4 |
-
import itertools
|
5 |
-
import math
|
6 |
-
import torch
|
7 |
-
from torch import nn, optim
|
8 |
-
from torch.nn import functional as F
|
9 |
-
from torch.utils.data import DataLoader
|
10 |
-
from torch.utils.tensorboard import SummaryWriter
|
11 |
-
import torch.multiprocessing as mp
|
12 |
-
import torch.distributed as dist
|
13 |
-
from torch.nn.parallel import DistributedDataParallel as DDP
|
14 |
-
from torch.cuda.amp import autocast, GradScaler
|
15 |
-
from tqdm import tqdm
|
16 |
-
|
17 |
-
import librosa
|
18 |
-
import logging
|
19 |
-
|
20 |
-
logging.getLogger('numba').setLevel(logging.WARNING)
|
21 |
-
|
22 |
-
import commons
|
23 |
-
import utils
|
24 |
-
from data_utils import (
|
25 |
-
TextAudioSpeakerLoader,
|
26 |
-
TextAudioSpeakerCollate,
|
27 |
-
DistributedBucketSampler
|
28 |
-
)
|
29 |
-
from models import (
|
30 |
-
SynthesizerTrn,
|
31 |
-
MultiPeriodDiscriminator,
|
32 |
-
)
|
33 |
-
from losses import (
|
34 |
-
generator_loss,
|
35 |
-
discriminator_loss,
|
36 |
-
feature_loss,
|
37 |
-
kl_loss
|
38 |
-
)
|
39 |
-
from mel_processing import mel_spectrogram_torch, spec_to_mel_torch
|
40 |
-
|
41 |
-
|
42 |
-
torch.backends.cudnn.benchmark = True
|
43 |
-
global_step = 0
|
44 |
-
|
45 |
-
|
46 |
-
def main():
|
47 |
-
"""Assume Single Node Multi GPUs Training Only"""
|
48 |
-
assert torch.cuda.is_available(), "CPU training is not allowed."
|
49 |
-
|
50 |
-
n_gpus = torch.cuda.device_count()
|
51 |
-
os.environ['MASTER_ADDR'] = 'localhost'
|
52 |
-
os.environ['MASTER_PORT'] = '8000'
|
53 |
-
|
54 |
-
hps = utils.get_hparams()
|
55 |
-
mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
|
56 |
-
|
57 |
-
|
58 |
-
def run(rank, n_gpus, hps):
|
59 |
-
global global_step
|
60 |
-
symbols = hps['symbols']
|
61 |
-
if rank == 0:
|
62 |
-
logger = utils.get_logger(hps.model_dir)
|
63 |
-
logger.info(hps)
|
64 |
-
utils.check_git_hash(hps.model_dir)
|
65 |
-
writer = SummaryWriter(log_dir=hps.model_dir)
|
66 |
-
writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
|
67 |
-
|
68 |
-
# Use gloo backend on Windows for Pytorch
|
69 |
-
dist.init_process_group(backend= 'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank)
|
70 |
-
torch.manual_seed(hps.train.seed)
|
71 |
-
torch.cuda.set_device(rank)
|
72 |
-
|
73 |
-
train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data, symbols)
|
74 |
-
train_sampler = DistributedBucketSampler(
|
75 |
-
train_dataset,
|
76 |
-
hps.train.batch_size,
|
77 |
-
[32,300,400,500,600,700,800,900,1000],
|
78 |
-
num_replicas=n_gpus,
|
79 |
-
rank=rank,
|
80 |
-
shuffle=True)
|
81 |
-
collate_fn = TextAudioSpeakerCollate()
|
82 |
-
train_loader = DataLoader(train_dataset, num_workers=2, shuffle=False, pin_memory=True,
|
83 |
-
collate_fn=collate_fn, batch_sampler=train_sampler)
|
84 |
-
# train_loader = DataLoader(train_dataset, batch_size=hps.train.batch_size, num_workers=2, shuffle=False, pin_memory=True,
|
85 |
-
# collate_fn=collate_fn)
|
86 |
-
if rank == 0:
|
87 |
-
eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data, symbols)
|
88 |
-
eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False,
|
89 |
-
batch_size=hps.train.batch_size, pin_memory=True,
|
90 |
-
drop_last=False, collate_fn=collate_fn)
|
91 |
-
|
92 |
-
net_g = SynthesizerTrn(
|
93 |
-
len(symbols),
|
94 |
-
hps.data.filter_length // 2 + 1,
|
95 |
-
hps.train.segment_size // hps.data.hop_length,
|
96 |
-
n_speakers=hps.data.n_speakers,
|
97 |
-
**hps.model).cuda(rank)
|
98 |
-
net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
|
99 |
-
|
100 |
-
# load existing model
|
101 |
-
_, _, _, _ = utils.load_checkpoint("./pretrained_models/G_0.pth", net_g, None, drop_speaker_emb=hps.drop_speaker_embed)
|
102 |
-
_, _, _, _ = utils.load_checkpoint("./pretrained_models/D_0.pth", net_d, None)
|
103 |
-
epoch_str = 1
|
104 |
-
global_step = 0
|
105 |
-
# freeze all other layers except speaker embedding
|
106 |
-
for p in net_g.parameters():
|
107 |
-
p.requires_grad = True
|
108 |
-
for p in net_d.parameters():
|
109 |
-
p.requires_grad = True
|
110 |
-
# for p in net_d.parameters():
|
111 |
-
# p.requires_grad = False
|
112 |
-
# net_g.emb_g.weight.requires_grad = True
|
113 |
-
optim_g = torch.optim.AdamW(
|
114 |
-
net_g.parameters(),
|
115 |
-
hps.train.learning_rate,
|
116 |
-
betas=hps.train.betas,
|
117 |
-
eps=hps.train.eps)
|
118 |
-
optim_d = torch.optim.AdamW(
|
119 |
-
net_d.parameters(),
|
120 |
-
hps.train.learning_rate,
|
121 |
-
betas=hps.train.betas,
|
122 |
-
eps=hps.train.eps)
|
123 |
-
# optim_d = None
|
124 |
-
net_g = DDP(net_g, device_ids=[rank])
|
125 |
-
net_d = DDP(net_d, device_ids=[rank])
|
126 |
-
|
127 |
-
scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay)
|
128 |
-
scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay)
|
129 |
-
|
130 |
-
scaler = GradScaler(enabled=hps.train.fp16_run)
|
131 |
-
|
132 |
-
for epoch in range(epoch_str, hps.train.epochs + 1):
|
133 |
-
if rank==0:
|
134 |
-
train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, eval_loader], logger, [writer, writer_eval])
|
135 |
-
else:
|
136 |
-
train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, None], None, None)
|
137 |
-
scheduler_g.step()
|
138 |
-
scheduler_d.step()
|
139 |
-
|
140 |
-
|
141 |
-
def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers):
|
142 |
-
net_g, net_d = nets
|
143 |
-
optim_g, optim_d = optims
|
144 |
-
scheduler_g, scheduler_d = schedulers
|
145 |
-
train_loader, eval_loader = loaders
|
146 |
-
if writers is not None:
|
147 |
-
writer, writer_eval = writers
|
148 |
-
|
149 |
-
# train_loader.batch_sampler.set_epoch(epoch)
|
150 |
-
global global_step
|
151 |
-
|
152 |
-
net_g.train()
|
153 |
-
net_d.train()
|
154 |
-
for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers) in enumerate(tqdm(train_loader)):
|
155 |
-
x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
|
156 |
-
spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True)
|
157 |
-
y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
|
158 |
-
speakers = speakers.cuda(rank, non_blocking=True)
|
159 |
-
|
160 |
-
with autocast(enabled=hps.train.fp16_run):
|
161 |
-
y_hat, l_length, attn, ids_slice, x_mask, z_mask,\
|
162 |
-
(z, z_p, m_p, logs_p, m_q, logs_q) = net_g(x, x_lengths, spec, spec_lengths, speakers)
|
163 |
-
|
164 |
-
mel = spec_to_mel_torch(
|
165 |
-
spec,
|
166 |
-
hps.data.filter_length,
|
167 |
-
hps.data.n_mel_channels,
|
168 |
-
hps.data.sampling_rate,
|
169 |
-
hps.data.mel_fmin,
|
170 |
-
hps.data.mel_fmax)
|
171 |
-
y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length)
|
172 |
-
y_hat_mel = mel_spectrogram_torch(
|
173 |
-
y_hat.squeeze(1),
|
174 |
-
hps.data.filter_length,
|
175 |
-
hps.data.n_mel_channels,
|
176 |
-
hps.data.sampling_rate,
|
177 |
-
hps.data.hop_length,
|
178 |
-
hps.data.win_length,
|
179 |
-
hps.data.mel_fmin,
|
180 |
-
hps.data.mel_fmax
|
181 |
-
)
|
182 |
-
|
183 |
-
y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice
|
184 |
-
|
185 |
-
# Discriminator
|
186 |
-
y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach())
|
187 |
-
with autocast(enabled=False):
|
188 |
-
loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g)
|
189 |
-
loss_disc_all = loss_disc
|
190 |
-
optim_d.zero_grad()
|
191 |
-
scaler.scale(loss_disc_all).backward()
|
192 |
-
scaler.unscale_(optim_d)
|
193 |
-
grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None)
|
194 |
-
scaler.step(optim_d)
|
195 |
-
|
196 |
-
with autocast(enabled=hps.train.fp16_run):
|
197 |
-
# Generator
|
198 |
-
y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat)
|
199 |
-
with autocast(enabled=False):
|
200 |
-
loss_dur = torch.sum(l_length.float())
|
201 |
-
loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
|
202 |
-
loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
|
203 |
-
|
204 |
-
loss_fm = feature_loss(fmap_r, fmap_g)
|
205 |
-
loss_gen, losses_gen = generator_loss(y_d_hat_g)
|
206 |
-
loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl
|
207 |
-
optim_g.zero_grad()
|
208 |
-
scaler.scale(loss_gen_all).backward()
|
209 |
-
scaler.unscale_(optim_g)
|
210 |
-
grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None)
|
211 |
-
scaler.step(optim_g)
|
212 |
-
scaler.update()
|
213 |
-
|
214 |
-
if rank==0:
|
215 |
-
if global_step % hps.train.log_interval == 0:
|
216 |
-
lr = optim_g.param_groups[0]['lr']
|
217 |
-
losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl]
|
218 |
-
logger.info('Train Epoch: {} [{:.0f}%]'.format(
|
219 |
-
epoch,
|
220 |
-
100. * batch_idx / len(train_loader)))
|
221 |
-
logger.info([x.item() for x in losses] + [global_step, lr])
|
222 |
-
|
223 |
-
scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, "grad_norm_g": grad_norm_g}
|
224 |
-
scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl})
|
225 |
-
|
226 |
-
scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)})
|
227 |
-
scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)})
|
228 |
-
scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)})
|
229 |
-
image_dict = {
|
230 |
-
"slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()),
|
231 |
-
"slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()),
|
232 |
-
"all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()),
|
233 |
-
"all/attn": utils.plot_alignment_to_numpy(attn[0,0].data.cpu().numpy())
|
234 |
-
}
|
235 |
-
utils.summarize(
|
236 |
-
writer=writer,
|
237 |
-
global_step=global_step,
|
238 |
-
images=image_dict,
|
239 |
-
scalars=scalar_dict)
|
240 |
-
|
241 |
-
if global_step % hps.train.eval_interval == 0:
|
242 |
-
evaluate(hps, net_g, eval_loader, writer_eval)
|
243 |
-
utils.save_checkpoint(net_g, None, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "G_{}.pth".format(global_step)))
|
244 |
-
utils.save_checkpoint(net_g, None, hps.train.learning_rate, epoch,
|
245 |
-
os.path.join(hps.model_dir, "G_latest.pth".format(global_step)))
|
246 |
-
# utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "D_{}.pth".format(global_step)))
|
247 |
-
old_g=os.path.join(hps.model_dir, "G_{}.pth".format(global_step-4000))
|
248 |
-
# old_d=os.path.join(hps.model_dir, "D_{}.pth".format(global_step-400))
|
249 |
-
if os.path.exists(old_g):
|
250 |
-
os.remove(old_g)
|
251 |
-
# if os.path.exists(old_d):
|
252 |
-
# os.remove(old_d)
|
253 |
-
global_step += 1
|
254 |
-
if epoch > hps.max_epochs:
|
255 |
-
print("Maximum epoch reached, closing training...")
|
256 |
-
exit()
|
257 |
-
|
258 |
-
if rank == 0:
|
259 |
-
logger.info('====> Epoch: {}'.format(epoch))
|
260 |
-
|
261 |
-
|
262 |
-
def evaluate(hps, generator, eval_loader, writer_eval):
|
263 |
-
generator.eval()
|
264 |
-
with torch.no_grad():
|
265 |
-
for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers) in enumerate(eval_loader):
|
266 |
-
x, x_lengths = x.cuda(0), x_lengths.cuda(0)
|
267 |
-
spec, spec_lengths = spec.cuda(0), spec_lengths.cuda(0)
|
268 |
-
y, y_lengths = y.cuda(0), y_lengths.cuda(0)
|
269 |
-
speakers = speakers.cuda(0)
|
270 |
-
|
271 |
-
# remove else
|
272 |
-
x = x[:1]
|
273 |
-
x_lengths = x_lengths[:1]
|
274 |
-
spec = spec[:1]
|
275 |
-
spec_lengths = spec_lengths[:1]
|
276 |
-
y = y[:1]
|
277 |
-
y_lengths = y_lengths[:1]
|
278 |
-
speakers = speakers[:1]
|
279 |
-
break
|
280 |
-
y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, speakers, max_len=1000)
|
281 |
-
y_hat_lengths = mask.sum([1,2]).long() * hps.data.hop_length
|
282 |
-
|
283 |
-
mel = spec_to_mel_torch(
|
284 |
-
spec,
|
285 |
-
hps.data.filter_length,
|
286 |
-
hps.data.n_mel_channels,
|
287 |
-
hps.data.sampling_rate,
|
288 |
-
hps.data.mel_fmin,
|
289 |
-
hps.data.mel_fmax)
|
290 |
-
y_hat_mel = mel_spectrogram_torch(
|
291 |
-
y_hat.squeeze(1).float(),
|
292 |
-
hps.data.filter_length,
|
293 |
-
hps.data.n_mel_channels,
|
294 |
-
hps.data.sampling_rate,
|
295 |
-
hps.data.hop_length,
|
296 |
-
hps.data.win_length,
|
297 |
-
hps.data.mel_fmin,
|
298 |
-
hps.data.mel_fmax
|
299 |
-
)
|
300 |
-
image_dict = {
|
301 |
-
"gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy())
|
302 |
-
}
|
303 |
-
audio_dict = {
|
304 |
-
"gen/audio": y_hat[0,:,:y_hat_lengths[0]]
|
305 |
-
}
|
306 |
-
if global_step == 0:
|
307 |
-
image_dict.update({"gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())})
|
308 |
-
audio_dict.update({"gt/audio": y[0,:,:y_lengths[0]]})
|
309 |
-
|
310 |
-
utils.summarize(
|
311 |
-
writer=writer_eval,
|
312 |
-
global_step=global_step,
|
313 |
-
images=image_dict,
|
314 |
-
audios=audio_dict,
|
315 |
-
audio_sampling_rate=hps.data.sampling_rate
|
316 |
-
)
|
317 |
-
generator.train()
|
318 |
-
|
319 |
-
|
320 |
-
if __name__ == "__main__":
|
321 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alichuan/VITS-Umamusume-voice-synthesizer/losses.py
DELETED
@@ -1,61 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch.nn import functional as F
|
3 |
-
|
4 |
-
import commons
|
5 |
-
|
6 |
-
|
7 |
-
def feature_loss(fmap_r, fmap_g):
|
8 |
-
loss = 0
|
9 |
-
for dr, dg in zip(fmap_r, fmap_g):
|
10 |
-
for rl, gl in zip(dr, dg):
|
11 |
-
rl = rl.float().detach()
|
12 |
-
gl = gl.float()
|
13 |
-
loss += torch.mean(torch.abs(rl - gl))
|
14 |
-
|
15 |
-
return loss * 2
|
16 |
-
|
17 |
-
|
18 |
-
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
|
19 |
-
loss = 0
|
20 |
-
r_losses = []
|
21 |
-
g_losses = []
|
22 |
-
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
|
23 |
-
dr = dr.float()
|
24 |
-
dg = dg.float()
|
25 |
-
r_loss = torch.mean((1-dr)**2)
|
26 |
-
g_loss = torch.mean(dg**2)
|
27 |
-
loss += (r_loss + g_loss)
|
28 |
-
r_losses.append(r_loss.item())
|
29 |
-
g_losses.append(g_loss.item())
|
30 |
-
|
31 |
-
return loss, r_losses, g_losses
|
32 |
-
|
33 |
-
|
34 |
-
def generator_loss(disc_outputs):
|
35 |
-
loss = 0
|
36 |
-
gen_losses = []
|
37 |
-
for dg in disc_outputs:
|
38 |
-
dg = dg.float()
|
39 |
-
l = torch.mean((1-dg)**2)
|
40 |
-
gen_losses.append(l)
|
41 |
-
loss += l
|
42 |
-
|
43 |
-
return loss, gen_losses
|
44 |
-
|
45 |
-
|
46 |
-
def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):
|
47 |
-
"""
|
48 |
-
z_p, logs_q: [b, h, t_t]
|
49 |
-
m_p, logs_p: [b, h, t_t]
|
50 |
-
"""
|
51 |
-
z_p = z_p.float()
|
52 |
-
logs_q = logs_q.float()
|
53 |
-
m_p = m_p.float()
|
54 |
-
logs_p = logs_p.float()
|
55 |
-
z_mask = z_mask.float()
|
56 |
-
|
57 |
-
kl = logs_p - logs_q - 0.5
|
58 |
-
kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p)
|
59 |
-
kl = torch.sum(kl * z_mask)
|
60 |
-
l = kl / torch.sum(z_mask)
|
61 |
-
return l
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alpaca233/SadTalker/launcher.py
DELETED
@@ -1,204 +0,0 @@
|
|
1 |
-
# this scripts installs necessary requirements and launches main program in webui.py
|
2 |
-
# borrow from : https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/master/launch.py
|
3 |
-
import subprocess
|
4 |
-
import os
|
5 |
-
import sys
|
6 |
-
import importlib.util
|
7 |
-
import shlex
|
8 |
-
import platform
|
9 |
-
import json
|
10 |
-
|
11 |
-
python = sys.executable
|
12 |
-
git = os.environ.get('GIT', "git")
|
13 |
-
index_url = os.environ.get('INDEX_URL', "")
|
14 |
-
stored_commit_hash = None
|
15 |
-
skip_install = False
|
16 |
-
dir_repos = "repositories"
|
17 |
-
script_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
18 |
-
|
19 |
-
if 'GRADIO_ANALYTICS_ENABLED' not in os.environ:
|
20 |
-
os.environ['GRADIO_ANALYTICS_ENABLED'] = 'False'
|
21 |
-
|
22 |
-
|
23 |
-
def check_python_version():
|
24 |
-
is_windows = platform.system() == "Windows"
|
25 |
-
major = sys.version_info.major
|
26 |
-
minor = sys.version_info.minor
|
27 |
-
micro = sys.version_info.micro
|
28 |
-
|
29 |
-
if is_windows:
|
30 |
-
supported_minors = [10]
|
31 |
-
else:
|
32 |
-
supported_minors = [7, 8, 9, 10, 11]
|
33 |
-
|
34 |
-
if not (major == 3 and minor in supported_minors):
|
35 |
-
|
36 |
-
raise (f"""
|
37 |
-
INCOMPATIBLE PYTHON VERSION
|
38 |
-
This program is tested with 3.10.6 Python, but you have {major}.{minor}.{micro}.
|
39 |
-
If you encounter an error with "RuntimeError: Couldn't install torch." message,
|
40 |
-
or any other error regarding unsuccessful package (library) installation,
|
41 |
-
please downgrade (or upgrade) to the latest version of 3.10 Python
|
42 |
-
and delete current Python and "venv" folder in WebUI's directory.
|
43 |
-
You can download 3.10 Python from here: https://www.python.org/downloads/release/python-3109/
|
44 |
-
{"Alternatively, use a binary release of WebUI: https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases" if is_windows else ""}
|
45 |
-
Use --skip-python-version-check to suppress this warning.
|
46 |
-
""")
|
47 |
-
|
48 |
-
|
49 |
-
def commit_hash():
|
50 |
-
global stored_commit_hash
|
51 |
-
|
52 |
-
if stored_commit_hash is not None:
|
53 |
-
return stored_commit_hash
|
54 |
-
|
55 |
-
try:
|
56 |
-
stored_commit_hash = run(f"{git} rev-parse HEAD").strip()
|
57 |
-
except Exception:
|
58 |
-
stored_commit_hash = "<none>"
|
59 |
-
|
60 |
-
return stored_commit_hash
|
61 |
-
|
62 |
-
|
63 |
-
def run(command, desc=None, errdesc=None, custom_env=None, live=False):
|
64 |
-
if desc is not None:
|
65 |
-
print(desc)
|
66 |
-
|
67 |
-
if live:
|
68 |
-
result = subprocess.run(command, shell=True, env=os.environ if custom_env is None else custom_env)
|
69 |
-
if result.returncode != 0:
|
70 |
-
raise RuntimeError(f"""{errdesc or 'Error running command'}.
|
71 |
-
Command: {command}
|
72 |
-
Error code: {result.returncode}""")
|
73 |
-
|
74 |
-
return ""
|
75 |
-
|
76 |
-
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ if custom_env is None else custom_env)
|
77 |
-
|
78 |
-
if result.returncode != 0:
|
79 |
-
|
80 |
-
message = f"""{errdesc or 'Error running command'}.
|
81 |
-
Command: {command}
|
82 |
-
Error code: {result.returncode}
|
83 |
-
stdout: {result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stdout)>0 else '<empty>'}
|
84 |
-
stderr: {result.stderr.decode(encoding="utf8", errors="ignore") if len(result.stderr)>0 else '<empty>'}
|
85 |
-
"""
|
86 |
-
raise RuntimeError(message)
|
87 |
-
|
88 |
-
return result.stdout.decode(encoding="utf8", errors="ignore")
|
89 |
-
|
90 |
-
|
91 |
-
def check_run(command):
|
92 |
-
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
|
93 |
-
return result.returncode == 0
|
94 |
-
|
95 |
-
|
96 |
-
def is_installed(package):
|
97 |
-
try:
|
98 |
-
spec = importlib.util.find_spec(package)
|
99 |
-
except ModuleNotFoundError:
|
100 |
-
return False
|
101 |
-
|
102 |
-
return spec is not None
|
103 |
-
|
104 |
-
|
105 |
-
def repo_dir(name):
|
106 |
-
return os.path.join(script_path, dir_repos, name)
|
107 |
-
|
108 |
-
|
109 |
-
def run_python(code, desc=None, errdesc=None):
|
110 |
-
return run(f'"{python}" -c "{code}"', desc, errdesc)
|
111 |
-
|
112 |
-
|
113 |
-
def run_pip(args, desc=None):
|
114 |
-
if skip_install:
|
115 |
-
return
|
116 |
-
|
117 |
-
index_url_line = f' --index-url {index_url}' if index_url != '' else ''
|
118 |
-
return run(f'"{python}" -m pip {args} --prefer-binary{index_url_line}', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}")
|
119 |
-
|
120 |
-
|
121 |
-
def check_run_python(code):
|
122 |
-
return check_run(f'"{python}" -c "{code}"')
|
123 |
-
|
124 |
-
|
125 |
-
def git_clone(url, dir, name, commithash=None):
|
126 |
-
# TODO clone into temporary dir and move if successful
|
127 |
-
|
128 |
-
if os.path.exists(dir):
|
129 |
-
if commithash is None:
|
130 |
-
return
|
131 |
-
|
132 |
-
current_hash = run(f'"{git}" -C "{dir}" rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}").strip()
|
133 |
-
if current_hash == commithash:
|
134 |
-
return
|
135 |
-
|
136 |
-
run(f'"{git}" -C "{dir}" fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}")
|
137 |
-
run(f'"{git}" -C "{dir}" checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}")
|
138 |
-
return
|
139 |
-
|
140 |
-
run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}")
|
141 |
-
|
142 |
-
if commithash is not None:
|
143 |
-
run(f'"{git}" -C "{dir}" checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}")
|
144 |
-
|
145 |
-
|
146 |
-
def git_pull_recursive(dir):
|
147 |
-
for subdir, _, _ in os.walk(dir):
|
148 |
-
if os.path.exists(os.path.join(subdir, '.git')):
|
149 |
-
try:
|
150 |
-
output = subprocess.check_output([git, '-C', subdir, 'pull', '--autostash'])
|
151 |
-
print(f"Pulled changes for repository in '{subdir}':\n{output.decode('utf-8').strip()}\n")
|
152 |
-
except subprocess.CalledProcessError as e:
|
153 |
-
print(f"Couldn't perform 'git pull' on repository in '{subdir}':\n{e.output.decode('utf-8').strip()}\n")
|
154 |
-
|
155 |
-
|
156 |
-
def run_extension_installer(extension_dir):
|
157 |
-
path_installer = os.path.join(extension_dir, "install.py")
|
158 |
-
if not os.path.isfile(path_installer):
|
159 |
-
return
|
160 |
-
|
161 |
-
try:
|
162 |
-
env = os.environ.copy()
|
163 |
-
env['PYTHONPATH'] = os.path.abspath(".")
|
164 |
-
|
165 |
-
print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env))
|
166 |
-
except Exception as e:
|
167 |
-
print(e, file=sys.stderr)
|
168 |
-
|
169 |
-
|
170 |
-
def prepare_environment():
|
171 |
-
global skip_install
|
172 |
-
|
173 |
-
torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cu113")
|
174 |
-
|
175 |
-
## check windows
|
176 |
-
if sys.platform != 'win32':
|
177 |
-
requirements_file = os.environ.get('REQS_FILE', "req.txt")
|
178 |
-
else:
|
179 |
-
requirements_file = os.environ.get('REQS_FILE', "requirements.txt")
|
180 |
-
|
181 |
-
commit = commit_hash()
|
182 |
-
|
183 |
-
print(f"Python {sys.version}")
|
184 |
-
print(f"Commit hash: {commit}")
|
185 |
-
|
186 |
-
if not is_installed("torch") or not is_installed("torchvision"):
|
187 |
-
run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch", live=True)
|
188 |
-
|
189 |
-
run_pip(f"install -r \"{requirements_file}\"", "requirements for SadTalker WebUI (may take longer time in first time)")
|
190 |
-
|
191 |
-
if sys.platform != 'win32' and not is_installed('tts'):
|
192 |
-
run_pip(f"install TTS", "install TTS individually in SadTalker, which might not work on windows.")
|
193 |
-
|
194 |
-
|
195 |
-
def start():
|
196 |
-
print(f"Launching SadTalker Web UI")
|
197 |
-
from app_sadtalker import sadtalker_demo
|
198 |
-
demo = sadtalker_demo()
|
199 |
-
demo.queue()
|
200 |
-
demo.launch()
|
201 |
-
|
202 |
-
if __name__ == "__main__":
|
203 |
-
prepare_environment()
|
204 |
-
start()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlphaGPT/PaperSummary/app.py
DELETED
@@ -1,865 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import base64
|
3 |
-
import configparser
|
4 |
-
import datetime
|
5 |
-
import json
|
6 |
-
import os
|
7 |
-
import re
|
8 |
-
from collections import namedtuple
|
9 |
-
|
10 |
-
import arxiv
|
11 |
-
import numpy as np
|
12 |
-
import openai
|
13 |
-
import requests
|
14 |
-
import tenacity
|
15 |
-
import tiktoken
|
16 |
-
|
17 |
-
import fitz, io, os
|
18 |
-
from PIL import Image
|
19 |
-
|
20 |
-
|
21 |
-
class Paper:
|
22 |
-
def __init__(self, path, title='', url='', abs='', authers=[]):
|
23 |
-
# 初始化函数,根据pdf路径初始化Paper对象
|
24 |
-
self.url = url # 文章链接
|
25 |
-
self.path = path # pdf路径
|
26 |
-
self.section_names = [] # 段落标题
|
27 |
-
self.section_texts = {} # 段落内容
|
28 |
-
self.abs = abs
|
29 |
-
self.title_page = 0
|
30 |
-
if title == '':
|
31 |
-
self.pdf = fitz.open(self.path) # pdf文档
|
32 |
-
self.title = self.get_title()
|
33 |
-
self.parse_pdf()
|
34 |
-
else:
|
35 |
-
self.title = title
|
36 |
-
self.authers = authers
|
37 |
-
self.roman_num = ["I", "II", 'III', "IV", "V", "VI", "VII", "VIII", "IIX", "IX", "X"]
|
38 |
-
self.digit_num = [str(d+1) for d in range(10)]
|
39 |
-
self.first_image = ''
|
40 |
-
|
41 |
-
def parse_pdf(self):
|
42 |
-
self.pdf = fitz.open(self.path) # pdf文档
|
43 |
-
self.text_list = [page.get_text() for page in self.pdf]
|
44 |
-
self.all_text = ' '.join(self.text_list)
|
45 |
-
self.section_page_dict = self._get_all_page_index() # 段落与页码的对应字典
|
46 |
-
print("section_page_dict", self.section_page_dict)
|
47 |
-
self.section_text_dict = self._get_all_page() # 段落与内容的对应字典
|
48 |
-
self.section_text_dict.update({"title": self.title})
|
49 |
-
self.section_text_dict.update({"paper_info": self.get_paper_info()})
|
50 |
-
self.pdf.close()
|
51 |
-
|
52 |
-
def get_paper_info(self):
|
53 |
-
first_page_text = self.pdf[self.title_page].get_text()
|
54 |
-
if "Abstract" in self.section_text_dict.keys():
|
55 |
-
abstract_text = self.section_text_dict['Abstract']
|
56 |
-
else:
|
57 |
-
abstract_text = self.abs
|
58 |
-
first_page_text = first_page_text.replace(abstract_text, "")
|
59 |
-
return first_page_text
|
60 |
-
|
61 |
-
def get_image_path(self, image_path=''):
|
62 |
-
"""
|
63 |
-
将PDF中的第一张图保存到image.png里面,存到本地目录,返回文件名称,供gitee读取
|
64 |
-
:param filename: 图片所在路径,"C:\\Users\\Administrator\\Desktop\\nwd.pdf"
|
65 |
-
:param image_path: 图片提取后的保存路径
|
66 |
-
:return:
|
67 |
-
"""
|
68 |
-
# open file
|
69 |
-
max_size = 0
|
70 |
-
image_list = []
|
71 |
-
with fitz.Document(self.path) as my_pdf_file:
|
72 |
-
# 遍历所有页面
|
73 |
-
for page_number in range(1, len(my_pdf_file) + 1):
|
74 |
-
# 查看独立页面
|
75 |
-
page = my_pdf_file[page_number - 1]
|
76 |
-
# 查看当前页所有图片
|
77 |
-
images = page.get_images()
|
78 |
-
# 遍历当前页面所有图片
|
79 |
-
for image_number, image in enumerate(page.get_images(), start=1):
|
80 |
-
# 访问图片xref
|
81 |
-
xref_value = image[0]
|
82 |
-
# 提取图片信息
|
83 |
-
base_image = my_pdf_file.extract_image(xref_value)
|
84 |
-
# 访问图片
|
85 |
-
image_bytes = base_image["image"]
|
86 |
-
# 获取图片扩展名
|
87 |
-
ext = base_image["ext"]
|
88 |
-
# 加载图片
|
89 |
-
image = Image.open(io.BytesIO(image_bytes))
|
90 |
-
image_size = image.size[0] * image.size[1]
|
91 |
-
if image_size > max_size:
|
92 |
-
max_size = image_size
|
93 |
-
image_list.append(image)
|
94 |
-
for image in image_list:
|
95 |
-
image_size = image.size[0] * image.size[1]
|
96 |
-
if image_size == max_size:
|
97 |
-
image_name = f"image.{ext}"
|
98 |
-
im_path = os.path.join(image_path, image_name)
|
99 |
-
print("im_path:", im_path)
|
100 |
-
|
101 |
-
max_pix = 480
|
102 |
-
origin_min_pix = min(image.size[0], image.size[1])
|
103 |
-
|
104 |
-
if image.size[0] > image.size[1]:
|
105 |
-
min_pix = int(image.size[1] * (max_pix/image.size[0]))
|
106 |
-
newsize = (max_pix, min_pix)
|
107 |
-
else:
|
108 |
-
min_pix = int(image.size[0] * (max_pix/image.size[1]))
|
109 |
-
newsize = (min_pix, max_pix)
|
110 |
-
image = image.resize(newsize)
|
111 |
-
|
112 |
-
image.save(open(im_path, "wb"))
|
113 |
-
return im_path, ext
|
114 |
-
return None, None
|
115 |
-
|
116 |
-
# 定义一个函数,根据字体的大小,识别每个章节名称,并返回一个列表
|
117 |
-
def get_chapter_names(self,):
|
118 |
-
# # 打开一个pdf文件
|
119 |
-
doc = fitz.open(self.path) # pdf文档
|
120 |
-
text_list = [page.get_text() for page in doc]
|
121 |
-
all_text = ''
|
122 |
-
for text in text_list:
|
123 |
-
all_text += text
|
124 |
-
# # 创建一个空列表,用于存储章节名称
|
125 |
-
chapter_names = []
|
126 |
-
for line in all_text.split('\n'):
|
127 |
-
line_list = line.split(' ')
|
128 |
-
if '.' in line:
|
129 |
-
point_split_list = line.split('.')
|
130 |
-
space_split_list = line.split(' ')
|
131 |
-
if 1 < len(space_split_list) < 5:
|
132 |
-
if 1 < len(point_split_list) < 5 and (point_split_list[0] in self.roman_num or point_split_list[0] in self.digit_num):
|
133 |
-
print("line:", line)
|
134 |
-
chapter_names.append(line)
|
135 |
-
# 这段代码可能会有新的bug,本意是为了消除"Introduction"的问题的!
|
136 |
-
elif 1 < len(point_split_list) < 5:
|
137 |
-
print("line:", line)
|
138 |
-
chapter_names.append(line)
|
139 |
-
|
140 |
-
return chapter_names
|
141 |
-
|
142 |
-
def get_title(self):
|
143 |
-
doc = self.pdf # 打开pdf文件
|
144 |
-
max_font_size = 0 # 初始化最大字体大小为0
|
145 |
-
max_string = "" # 初始化最大字体大小对应的字符串为空
|
146 |
-
max_font_sizes = [0]
|
147 |
-
for page_index, page in enumerate(doc): # 遍历每一页
|
148 |
-
text = page.get_text("dict") # 获取页面上的文本信息
|
149 |
-
blocks = text["blocks"] # 获取文本块列表
|
150 |
-
for block in blocks: # 遍历每个文本块
|
151 |
-
if block["type"] == 0 and len(block['lines']): # 如果是文字类型
|
152 |
-
if len(block["lines"][0]["spans"]):
|
153 |
-
font_size = block["lines"][0]["spans"][0]["size"] # 获取第一行第一段文字的字体大小
|
154 |
-
max_font_sizes.append(font_size)
|
155 |
-
if font_size > max_font_size: # 如果字体大小大于当前最大值
|
156 |
-
max_font_size = font_size # 更新最大值
|
157 |
-
max_string = block["lines"][0]["spans"][0]["text"] # 更新最大值对应的字符串
|
158 |
-
max_font_sizes.sort()
|
159 |
-
print("max_font_sizes", max_font_sizes[-10:])
|
160 |
-
cur_title = ''
|
161 |
-
for page_index, page in enumerate(doc): # 遍历每一页
|
162 |
-
text = page.get_text("dict") # 获取页面上的文本信息
|
163 |
-
blocks = text["blocks"] # 获取文本块列表
|
164 |
-
for block in blocks: # 遍历每个文本块
|
165 |
-
if block["type"] == 0 and len(block['lines']): # 如果是文字类型
|
166 |
-
if len(block["lines"][0]["spans"]):
|
167 |
-
cur_string = block["lines"][0]["spans"][0]["text"] # 更新最大值对应的字符串
|
168 |
-
font_flags = block["lines"][0]["spans"][0]["flags"] # 获取第一行第一段文字的字体特征
|
169 |
-
font_size = block["lines"][0]["spans"][0]["size"] # 获取第一行第一段文字的字体大小
|
170 |
-
# print(font_size)
|
171 |
-
if abs(font_size - max_font_sizes[-1]) < 0.3 or abs(font_size - max_font_sizes[-2]) < 0.3:
|
172 |
-
# print("The string is bold.", max_string, "font_size:", font_size, "font_flags:", font_flags)
|
173 |
-
if len(cur_string) > 4 and "arXiv" not in cur_string:
|
174 |
-
# print("The string is bold.", max_string, "font_size:", font_size, "font_flags:", font_flags)
|
175 |
-
if cur_title == '' :
|
176 |
-
cur_title += cur_string
|
177 |
-
else:
|
178 |
-
cur_title += ' ' + cur_string
|
179 |
-
self.title_page = page_index
|
180 |
-
# break
|
181 |
-
title = cur_title.replace('\n', ' ')
|
182 |
-
return title
|
183 |
-
|
184 |
-
|
185 |
-
def _get_all_page_index(self):
|
186 |
-
# 定义需要寻找的章节名称列表
|
187 |
-
section_list = ["Abstract",
|
188 |
-
'Introduction', 'Related Work', 'Background',
|
189 |
-
"Preliminary", "Problem Formulation",
|
190 |
-
'Methods', 'Methodology', "Method", 'Approach', 'Approaches',
|
191 |
-
# exp
|
192 |
-
"Materials and Methods", "Experiment Settings",
|
193 |
-
'Experiment', "Experimental Results", "Evaluation", "Experiments",
|
194 |
-
"Results", 'Findings', 'Data Analysis',
|
195 |
-
"Discussion", "Results and Discussion", "Conclusion",
|
196 |
-
'References']
|
197 |
-
# 初始化一个字典来存储找到的章节和它们在文档中出现的页码
|
198 |
-
section_page_dict = {}
|
199 |
-
# 遍历每一页文档
|
200 |
-
for page_index, page in enumerate(self.pdf):
|
201 |
-
# 获取当前页面的文本内容
|
202 |
-
cur_text = page.get_text()
|
203 |
-
# ���历需要寻找的章节名称列表
|
204 |
-
for section_name in section_list:
|
205 |
-
# 将章节名称转换成大写形式
|
206 |
-
section_name_upper = section_name.upper()
|
207 |
-
# 如果当前页面包含"Abstract"这个关键词
|
208 |
-
if "Abstract" == section_name and section_name in cur_text:
|
209 |
-
# 将"Abstract"和它所在的页码加入字典中
|
210 |
-
section_page_dict[section_name] = page_index
|
211 |
-
# 如果当前页面包含章节名称,则将章节名称和它所在的页码加入字典中
|
212 |
-
else:
|
213 |
-
if section_name + '\n' in cur_text:
|
214 |
-
section_page_dict[section_name] = page_index
|
215 |
-
elif section_name_upper + '\n' in cur_text:
|
216 |
-
section_page_dict[section_name] = page_index
|
217 |
-
# 返回所有找到的章节名称及它们在文档中出现的页码
|
218 |
-
return section_page_dict
|
219 |
-
|
220 |
-
def _get_all_page(self):
|
221 |
-
"""
|
222 |
-
获取PDF文件中每个页面的文本信息,并将文本信息按照章节组织成字典返回。
|
223 |
-
|
224 |
-
Returns:
|
225 |
-
section_dict (dict): 每个章节的文本信息字典,key为章节名,value为章节文本。
|
226 |
-
"""
|
227 |
-
text = ''
|
228 |
-
text_list = []
|
229 |
-
section_dict = {}
|
230 |
-
|
231 |
-
# 再处理其他章节:
|
232 |
-
text_list = [page.get_text() for page in self.pdf]
|
233 |
-
for sec_index, sec_name in enumerate(self.section_page_dict):
|
234 |
-
print(sec_index, sec_name, self.section_page_dict[sec_name])
|
235 |
-
if sec_index <= 0 and self.abs:
|
236 |
-
continue
|
237 |
-
else:
|
238 |
-
# 直接考虑后面的内容:
|
239 |
-
start_page = self.section_page_dict[sec_name]
|
240 |
-
if sec_index < len(list(self.section_page_dict.keys()))-1:
|
241 |
-
end_page = self.section_page_dict[list(self.section_page_dict.keys())[sec_index+1]]
|
242 |
-
else:
|
243 |
-
end_page = len(text_list)
|
244 |
-
print("start_page, end_page:", start_page, end_page)
|
245 |
-
cur_sec_text = ''
|
246 |
-
if end_page - start_page == 0:
|
247 |
-
if sec_index < len(list(self.section_page_dict.keys()))-1:
|
248 |
-
next_sec = list(self.section_page_dict.keys())[sec_index+1]
|
249 |
-
if text_list[start_page].find(sec_name) == -1:
|
250 |
-
start_i = text_list[start_page].find(sec_name.upper())
|
251 |
-
else:
|
252 |
-
start_i = text_list[start_page].find(sec_name)
|
253 |
-
if text_list[start_page].find(next_sec) == -1:
|
254 |
-
end_i = text_list[start_page].find(next_sec.upper())
|
255 |
-
else:
|
256 |
-
end_i = text_list[start_page].find(next_sec)
|
257 |
-
cur_sec_text += text_list[start_page][start_i:end_i]
|
258 |
-
else:
|
259 |
-
for page_i in range(start_page, end_page):
|
260 |
-
# print("page_i:", page_i)
|
261 |
-
if page_i == start_page:
|
262 |
-
if text_list[start_page].find(sec_name) == -1:
|
263 |
-
start_i = text_list[start_page].find(sec_name.upper())
|
264 |
-
else:
|
265 |
-
start_i = text_list[start_page].find(sec_name)
|
266 |
-
cur_sec_text += text_list[page_i][start_i:]
|
267 |
-
elif page_i < end_page:
|
268 |
-
cur_sec_text += text_list[page_i]
|
269 |
-
elif page_i == end_page:
|
270 |
-
if sec_index < len(list(self.section_page_dict.keys()))-1:
|
271 |
-
next_sec = list(self.section_page_dict.keys())[sec_index+1]
|
272 |
-
if text_list[start_page].find(next_sec) == -1:
|
273 |
-
end_i = text_list[start_page].find(next_sec.upper())
|
274 |
-
else:
|
275 |
-
end_i = text_list[start_page].find(next_sec)
|
276 |
-
cur_sec_text += text_list[page_i][:end_i]
|
277 |
-
section_dict[sec_name] = cur_sec_text.replace('-\n', '').replace('\n', ' ')
|
278 |
-
return section_dict
|
279 |
-
|
280 |
-
|
281 |
-
# 定义Reader类
|
282 |
-
class Reader:
|
283 |
-
# 初始化方法,设置属性
|
284 |
-
def __init__(self, key_word, query, filter_keys,
|
285 |
-
root_path='./',
|
286 |
-
gitee_key='',
|
287 |
-
sort=arxiv.SortCriterion.SubmittedDate, user_name='defualt', args=None):
|
288 |
-
self.user_name = user_name # 读者姓名
|
289 |
-
self.key_word = key_word # 读者感兴趣的关键词
|
290 |
-
self.query = query # 读者输入的搜索查询
|
291 |
-
self.sort = sort # 读者选择的排序方式
|
292 |
-
if args.language == 'en':
|
293 |
-
self.language = 'English'
|
294 |
-
elif args.language == 'zh':
|
295 |
-
self.language = 'Chinese'
|
296 |
-
else:
|
297 |
-
self.language = 'Chinese'
|
298 |
-
self.filter_keys = filter_keys # 用于在摘要中筛选的关键词
|
299 |
-
self.root_path = root_path
|
300 |
-
# 创建一个ConfigParser对象
|
301 |
-
self.config = configparser.ConfigParser()
|
302 |
-
# 读取配置文件
|
303 |
-
self.config.read('apikey.ini')
|
304 |
-
OPENAI_KEY = os.environ.get("OPENAI_KEY", "")
|
305 |
-
# 获取某个键对应的值
|
306 |
-
self.chat_api_list = self.config.get('OpenAI', 'OPENAI_API_KEYS')[1:-1].replace('\'', '').split(',')
|
307 |
-
self.chat_api_list.append(OPENAI_KEY)
|
308 |
-
|
309 |
-
# prevent short strings from being incorrectly used as API keys.
|
310 |
-
self.chat_api_list = [api.strip() for api in self.chat_api_list if len(api) > 20]
|
311 |
-
self.chatgpt_model = self.config.get('OpenAI', 'CHATGPT_MODEL')
|
312 |
-
|
313 |
-
# 如果已经设置了OpenAI key, 则不使用Azure Interface
|
314 |
-
if not self.chat_api_list:
|
315 |
-
self.chat_api_list.append(self.config.get('AzureOPenAI', 'OPENAI_API_KEYS'))
|
316 |
-
self.chatgpt_model = self.config.get('AzureOPenAI', 'CHATGPT_MODEL')
|
317 |
-
|
318 |
-
openai.api_base = self.config.get('AzureOPenAI', 'OPENAI_API_BASE')
|
319 |
-
openai.api_type = 'azure'
|
320 |
-
openai.api_version = self.config.get('AzureOPenAI', 'OPENAI_API_VERSION')
|
321 |
-
|
322 |
-
self.cur_api = 0
|
323 |
-
self.file_format = args.file_format
|
324 |
-
if args.save_image:
|
325 |
-
self.gitee_key = self.config.get('Gitee', 'api')
|
326 |
-
else:
|
327 |
-
self.gitee_key = ''
|
328 |
-
self.max_token_num = 4096
|
329 |
-
self.encoding = tiktoken.get_encoding("gpt2")
|
330 |
-
|
331 |
-
def get_arxiv(self, max_results=30):
|
332 |
-
search = arxiv.Search(query=self.query,
|
333 |
-
max_results=max_results,
|
334 |
-
sort_by=self.sort,
|
335 |
-
sort_order=arxiv.SortOrder.Descending,
|
336 |
-
)
|
337 |
-
return search
|
338 |
-
|
339 |
-
def filter_arxiv(self, max_results=30):
|
340 |
-
search = self.get_arxiv(max_results=max_results)
|
341 |
-
print("all search:")
|
342 |
-
for index, result in enumerate(search.results()):
|
343 |
-
print(index, result.title, result.updated)
|
344 |
-
|
345 |
-
filter_results = []
|
346 |
-
filter_keys = self.filter_keys
|
347 |
-
|
348 |
-
print("filter_keys:", self.filter_keys)
|
349 |
-
# 确保每个关键词都能在摘要中找到,才算是目标论文
|
350 |
-
for index, result in enumerate(search.results()):
|
351 |
-
abs_text = result.summary.replace('-\n', '-').replace('\n', ' ')
|
352 |
-
meet_num = 0
|
353 |
-
for f_key in filter_keys.split(" "):
|
354 |
-
if f_key.lower() in abs_text.lower():
|
355 |
-
meet_num += 1
|
356 |
-
if meet_num == len(filter_keys.split(" ")):
|
357 |
-
filter_results.append(result)
|
358 |
-
# break
|
359 |
-
print("筛选后剩下的论文数量:")
|
360 |
-
print("filter_results:", len(filter_results))
|
361 |
-
print("filter_papers:")
|
362 |
-
for index, result in enumerate(filter_results):
|
363 |
-
print(index, result.title, result.updated)
|
364 |
-
return filter_results
|
365 |
-
|
366 |
-
def validateTitle(self, title):
|
367 |
-
# 将论文的乱七八糟的路径格式修正
|
368 |
-
rstr = r"[\/\\\:\*\?\"\<\>\|]" # '/ \ : * ? " < > |'
|
369 |
-
new_title = re.sub(rstr, "_", title) # 替换为下划线
|
370 |
-
return new_title
|
371 |
-
|
372 |
-
def download_pdf(self, filter_results):
|
373 |
-
# 先创建文件夹
|
374 |
-
date_str = str(datetime.datetime.now())[:13].replace(' ', '-')
|
375 |
-
key_word = str(self.key_word.replace(':', ' '))
|
376 |
-
path = self.root_path + 'pdf_files/' + self.query.replace('au: ', '').replace('title: ', '').replace('ti: ',
|
377 |
-
'').replace(
|
378 |
-
':', ' ')[:25] + '-' + date_str
|
379 |
-
try:
|
380 |
-
os.makedirs(path)
|
381 |
-
except:
|
382 |
-
pass
|
383 |
-
print("All_paper:", len(filter_results))
|
384 |
-
# 开始下载:
|
385 |
-
paper_list = []
|
386 |
-
for r_index, result in enumerate(filter_results):
|
387 |
-
try:
|
388 |
-
title_str = self.validateTitle(result.title)
|
389 |
-
pdf_name = title_str + '.pdf'
|
390 |
-
# result.download_pdf(path, filename=pdf_name)
|
391 |
-
self.try_download_pdf(result, path, pdf_name)
|
392 |
-
paper_path = os.path.join(path, pdf_name)
|
393 |
-
print("paper_path:", paper_path)
|
394 |
-
paper = Paper(path=paper_path,
|
395 |
-
url=result.entry_id,
|
396 |
-
title=result.title,
|
397 |
-
abs=result.summary.replace('-\n', '-').replace('\n', ' '),
|
398 |
-
authers=[str(aut) for aut in result.authors],
|
399 |
-
)
|
400 |
-
# 下载完毕,开始解析:
|
401 |
-
paper.parse_pdf()
|
402 |
-
paper_list.append(paper)
|
403 |
-
except Exception as e:
|
404 |
-
print("download_error:", e)
|
405 |
-
pass
|
406 |
-
return paper_list
|
407 |
-
|
408 |
-
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
|
409 |
-
stop=tenacity.stop_after_attempt(5),
|
410 |
-
reraise=True)
|
411 |
-
def try_download_pdf(self, result, path, pdf_name):
|
412 |
-
result.download_pdf(path, filename=pdf_name)
|
413 |
-
|
414 |
-
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
|
415 |
-
stop=tenacity.stop_after_attempt(5),
|
416 |
-
reraise=True)
|
417 |
-
def upload_gitee(self, image_path, image_name='', ext='png'):
|
418 |
-
"""
|
419 |
-
上传到码云
|
420 |
-
:return:
|
421 |
-
"""
|
422 |
-
with open(image_path, 'rb') as f:
|
423 |
-
base64_data = base64.b64encode(f.read())
|
424 |
-
base64_content = base64_data.decode()
|
425 |
-
|
426 |
-
date_str = str(datetime.datetime.now())[:19].replace(':', '-').replace(' ', '-') + '.' + ext
|
427 |
-
path = image_name + '-' + date_str
|
428 |
-
|
429 |
-
payload = {
|
430 |
-
"access_token": self.gitee_key,
|
431 |
-
"owner": self.config.get('Gitee', 'owner'),
|
432 |
-
"repo": self.config.get('Gitee', 'repo'),
|
433 |
-
"path": self.config.get('Gitee', 'path'),
|
434 |
-
"content": base64_content,
|
435 |
-
"message": "upload image"
|
436 |
-
}
|
437 |
-
# 这里需要修改成你的gitee的账户和仓库名,以及文件夹的名字:
|
438 |
-
url = f'https://gitee.com/api/v5/repos/' + self.config.get('Gitee', 'owner') + '/' + self.config.get('Gitee',
|
439 |
-
'repo') + '/contents/' + self.config.get(
|
440 |
-
'Gitee', 'path') + '/' + path
|
441 |
-
rep = requests.post(url, json=payload).json()
|
442 |
-
print("rep:", rep)
|
443 |
-
if 'content' in rep.keys():
|
444 |
-
image_url = rep['content']['download_url']
|
445 |
-
else:
|
446 |
-
image_url = r"https://gitee.com/api/v5/repos/" + self.config.get('Gitee', 'owner') + '/' + self.config.get(
|
447 |
-
'Gitee', 'repo') + '/contents/' + self.config.get('Gitee', 'path') + '/' + path
|
448 |
-
|
449 |
-
return image_url
|
450 |
-
|
451 |
-
def summary_with_chat(self, paper_list):
|
452 |
-
htmls = []
|
453 |
-
for paper_index, paper in enumerate(paper_list):
|
454 |
-
# 第一步先用title,abs,和introduction进行总结。
|
455 |
-
text = ''
|
456 |
-
text += 'Title:' + paper.title
|
457 |
-
text += 'Url:' + paper.url
|
458 |
-
text += 'Abstract:' + paper.abs
|
459 |
-
text += 'Paper_info:' + paper.section_text_dict['paper_info']
|
460 |
-
# intro
|
461 |
-
text += list(paper.section_text_dict.values())[0]
|
462 |
-
chat_summary_text = ""
|
463 |
-
try:
|
464 |
-
chat_summary_text = self.chat_summary(text=text)
|
465 |
-
except Exception as e:
|
466 |
-
print("summary_error:", e)
|
467 |
-
import sys
|
468 |
-
exc_type, exc_obj, exc_tb = sys.exc_info()
|
469 |
-
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
|
470 |
-
print(exc_type, fname, exc_tb.tb_lineno)
|
471 |
-
if "maximum context" in str(e):
|
472 |
-
current_tokens_index = str(e).find("your messages resulted in") + len(
|
473 |
-
"your messages resulted in") + 1
|
474 |
-
offset = int(str(e)[current_tokens_index:current_tokens_index + 4])
|
475 |
-
summary_prompt_token = offset + 1000 + 150
|
476 |
-
chat_summary_text = self.chat_summary(text=text, summary_prompt_token=summary_prompt_token)
|
477 |
-
|
478 |
-
htmls.append(chat_summary_text)
|
479 |
-
|
480 |
-
# 第二步总结方法:
|
481 |
-
# TODO,由于有些文章的方法章节名是算法名,所以简单的通过关键词来筛选,很难获取,后面需要用其他的方案去优化。
|
482 |
-
method_key = ''
|
483 |
-
for parse_key in paper.section_text_dict.keys():
|
484 |
-
if 'method' in parse_key.lower() or 'approach' in parse_key.lower():
|
485 |
-
method_key = parse_key
|
486 |
-
break
|
487 |
-
|
488 |
-
if method_key != '':
|
489 |
-
text = ''
|
490 |
-
method_text = ''
|
491 |
-
summary_text = ''
|
492 |
-
summary_text += "<summary>" + "\n[$TODO$]\n"
|
493 |
-
# methods
|
494 |
-
method_text += paper.section_text_dict[method_key]
|
495 |
-
text = summary_text + "\n\n<Methods>:\n\n" + method_text
|
496 |
-
chat_method_text = ""
|
497 |
-
try:
|
498 |
-
chat_method_text = self.chat_method(text=text)
|
499 |
-
except Exception as e:
|
500 |
-
print("method_error:", e)
|
501 |
-
import sys
|
502 |
-
exc_type, exc_obj, exc_tb = sys.exc_info()
|
503 |
-
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
|
504 |
-
print(exc_type, fname, exc_tb.tb_lineno)
|
505 |
-
if "maximum context" in str(e):
|
506 |
-
current_tokens_index = str(e).find("your messages resulted in") + len(
|
507 |
-
"your messages resulted in") + 1
|
508 |
-
offset = int(str(e)[current_tokens_index:current_tokens_index + 4])
|
509 |
-
method_prompt_token = offset + 800 + 150
|
510 |
-
chat_method_text = self.chat_method(text=text, method_prompt_token=method_prompt_token)
|
511 |
-
htmls.append(chat_method_text)
|
512 |
-
else:
|
513 |
-
chat_method_text = ''
|
514 |
-
|
515 |
-
# 第三步总结全文,并打分:
|
516 |
-
conclusion_key = ''
|
517 |
-
for parse_key in paper.section_text_dict.keys():
|
518 |
-
if 'conclu' in parse_key.lower():
|
519 |
-
conclusion_key = parse_key
|
520 |
-
break
|
521 |
-
|
522 |
-
text = ''
|
523 |
-
conclusion_text = ''
|
524 |
-
summary_text = ''
|
525 |
-
summary_text += "<summary>" + "\n[$TODO$]\n" + "\n <Method summary>:\n" + "\n[$TODO$]\n"
|
526 |
-
if conclusion_key != '':
|
527 |
-
# conclusion
|
528 |
-
conclusion_text += paper.section_text_dict[conclusion_key]
|
529 |
-
text = summary_text + "\n\n<Conclusion>:\n\n" + conclusion_text
|
530 |
-
else:
|
531 |
-
text = summary_text
|
532 |
-
chat_conclusion_text = ""
|
533 |
-
try:
|
534 |
-
chat_conclusion_text = self.chat_conclusion(text=text)
|
535 |
-
except Exception as e:
|
536 |
-
print("conclusion_error:", e)
|
537 |
-
import sys
|
538 |
-
exc_type, exc_obj, exc_tb = sys.exc_info()
|
539 |
-
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
|
540 |
-
print(exc_type, fname, exc_tb.tb_lineno)
|
541 |
-
if "maximum context" in str(e):
|
542 |
-
current_tokens_index = str(e).find("your messages resulted in") + len(
|
543 |
-
"your messages resulted in") + 1
|
544 |
-
offset = int(str(e)[current_tokens_index:current_tokens_index + 4])
|
545 |
-
conclusion_prompt_token = offset + 800 + 150
|
546 |
-
chat_conclusion_text = self.chat_conclusion(text=text,
|
547 |
-
conclusion_prompt_token=conclusion_prompt_token)
|
548 |
-
htmls.append(chat_conclusion_text)
|
549 |
-
|
550 |
-
for i, content in enumerate(htmls):
|
551 |
-
with open(f'{i}.txt', 'w') as f:
|
552 |
-
for c in content:
|
553 |
-
f.write(f'{c["content"]}\n')
|
554 |
-
|
555 |
-
# # 整合成一个文件,打包保存下来。
|
556 |
-
date_str = str(datetime.datetime.now())[:13].replace(' ', '-')
|
557 |
-
export_path = os.path.join(self.root_path, 'export')
|
558 |
-
if not os.path.exists(export_path):
|
559 |
-
os.makedirs(export_path)
|
560 |
-
mode = 'w' if paper_index == 0 else 'a'
|
561 |
-
file_name = os.path.join(export_path,
|
562 |
-
date_str + '-' + self.validateTitle(paper.title[:80]) + "." + self.file_format)
|
563 |
-
self.export_to_markdown("\n".join(htmls), file_name=file_name, mode=mode)
|
564 |
-
|
565 |
-
# file_name = os.path.join(export_path, date_str+'-'+self.validateTitle(paper.title)+".md")
|
566 |
-
# self.export_to_markdown("\n".join(htmls), file_name=file_name, mode=mode)
|
567 |
-
htmls = []
|
568 |
-
|
569 |
-
def summary(self, paper_list):
|
570 |
-
paper = paper_list[0]
|
571 |
-
# 第一步先用title,abs,和introduction进行总结。
|
572 |
-
text = ''
|
573 |
-
text += 'Title:' + paper.title
|
574 |
-
text += 'Url:' + paper.url
|
575 |
-
text += 'Abstract:' + paper.abs
|
576 |
-
text += 'Paper_info:' + paper.section_text_dict['paper_info']
|
577 |
-
# intro
|
578 |
-
text += list(paper.section_text_dict.values())[0]
|
579 |
-
content = self.chat_summary(text=text)
|
580 |
-
|
581 |
-
result = ''
|
582 |
-
for c in content:
|
583 |
-
result += f'{c["content"]}\n'
|
584 |
-
return result
|
585 |
-
|
586 |
-
def method(self, paper_list, summary_content):
|
587 |
-
paper = paper_list[0]
|
588 |
-
# 第二步总结方法:
|
589 |
-
# TODO,由于有些文章的方法章节名是算法名,所以简单的通过关键词来筛选,很难获取,后面需要用其他的方案去优化。
|
590 |
-
method_key = ''
|
591 |
-
for parse_key in paper.section_text_dict.keys():
|
592 |
-
if 'method' in parse_key.lower() or 'approach' in parse_key.lower():
|
593 |
-
method_key = parse_key
|
594 |
-
break
|
595 |
-
|
596 |
-
if method_key != '':
|
597 |
-
method_text = ''
|
598 |
-
summary_text = ''
|
599 |
-
summary_text += "<summary>" + f"\n{summary_content}\n"
|
600 |
-
# methods
|
601 |
-
method_text += paper.section_text_dict[method_key]
|
602 |
-
text = summary_text + "\n\n<Methods>:\n\n" + method_text
|
603 |
-
content = self.chat_method(text=text)
|
604 |
-
|
605 |
-
result = ''
|
606 |
-
for c in content:
|
607 |
-
result += f'{c["content"]}\n'
|
608 |
-
return result
|
609 |
-
else:
|
610 |
-
return 'Method not found!'
|
611 |
-
|
612 |
-
def conclusion(self, paper_list, summary, method):
|
613 |
-
paper = paper_list[0]
|
614 |
-
# 第三步总结全文,并打分:
|
615 |
-
conclusion_key = ''
|
616 |
-
for parse_key in paper.section_text_dict.keys():
|
617 |
-
if 'conclu' in parse_key.lower():
|
618 |
-
conclusion_key = parse_key
|
619 |
-
break
|
620 |
-
|
621 |
-
text = ''
|
622 |
-
conclusion_text = ''
|
623 |
-
summary_text = ''
|
624 |
-
summary_text += "<summary>" + f"\n{summary}\n" + "\n <Method summary>:\n" + f"\n{method}\n"
|
625 |
-
if conclusion_key != '':
|
626 |
-
# conclusion
|
627 |
-
conclusion_text += paper.section_text_dict[conclusion_key]
|
628 |
-
text = summary_text + "\n\n<Conclusion>:\n\n" + conclusion_text
|
629 |
-
else:
|
630 |
-
text = summary_text
|
631 |
-
content = self.chat_conclusion(text=text)
|
632 |
-
result = ''
|
633 |
-
for c in content:
|
634 |
-
result += f'{c["content"]}\n'
|
635 |
-
return result
|
636 |
-
|
637 |
-
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
|
638 |
-
stop=tenacity.stop_after_attempt(5),
|
639 |
-
reraise=True)
|
640 |
-
def chat_conclusion(self, text, conclusion_prompt_token=800):
|
641 |
-
openai.api_key = self.chat_api_list[self.cur_api]
|
642 |
-
self.cur_api += 1
|
643 |
-
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list) - 1 else self.cur_api
|
644 |
-
text_token = len(self.encoding.encode(text))
|
645 |
-
clip_text_index = int(len(text) * (self.max_token_num - conclusion_prompt_token) / text_token)
|
646 |
-
clip_text = text[:clip_text_index]
|
647 |
-
|
648 |
-
messages = [
|
649 |
-
{"role": "system",
|
650 |
-
"content": "You are a reviewer in the field of [" + self.key_word + "] and you need to critically review this article"},
|
651 |
-
# chatgpt 角色
|
652 |
-
{"role": "assistant",
|
653 |
-
"content": "This is the <summary> and <conclusion> part of an English literature, where <summary> you have already summarized, but <conclusion> part, I need your help to summarize the following questions:" + clip_text},
|
654 |
-
# 背景知识,可以参考OpenReview的审稿流程
|
655 |
-
{"role": "user", "content": """
|
656 |
-
8. Make the following summary.Be sure to use {} answers (proper nouns need to be marked in English).
|
657 |
-
- (1):What is the significance of this piece of work?
|
658 |
-
- (2):Summarize the strengths and weaknesses of this article in three dimensions: innovation point, performance, and workload.
|
659 |
-
.......
|
660 |
-
Follow the format of the output later:
|
661 |
-
8. Conclusion: \n\n
|
662 |
-
- (1):xxx;\n
|
663 |
-
- (2):Innovation point: xxx; Performance: xxx; Workload: xxx;\n
|
664 |
-
|
665 |
-
Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous <summary>, the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write.
|
666 |
-
""".format(self.language, self.language)},
|
667 |
-
]
|
668 |
-
|
669 |
-
return messages
|
670 |
-
|
671 |
-
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
|
672 |
-
stop=tenacity.stop_after_attempt(5),
|
673 |
-
reraise=True)
|
674 |
-
def chat_method(self, text, method_prompt_token=800):
|
675 |
-
openai.api_key = self.chat_api_list[self.cur_api]
|
676 |
-
self.cur_api += 1
|
677 |
-
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list) - 1 else self.cur_api
|
678 |
-
text_token = len(self.encoding.encode(text))
|
679 |
-
clip_text_index = int(len(text) * (self.max_token_num - method_prompt_token) / text_token)
|
680 |
-
clip_text = text[:clip_text_index]
|
681 |
-
messages = [
|
682 |
-
{"role": "system",
|
683 |
-
"content": "You are a researcher in the field of [" + self.key_word + "] who is good at summarizing papers using concise statements"},
|
684 |
-
# chatgpt 角色
|
685 |
-
{"role": "assistant",
|
686 |
-
"content": "This is the <summary> and <Method> part of an English document, where <summary> you have summarized, but the <Methods> part, I need your help to read and summarize the following questions." + clip_text},
|
687 |
-
# 背景知识
|
688 |
-
{"role": "user", "content": """
|
689 |
-
7. Describe in detail the methodological idea of this article. Be sure to use {} answers (proper nouns need to be marked in English). For example, its steps are.
|
690 |
-
- (1):...
|
691 |
-
- (2):...
|
692 |
-
- (3):...
|
693 |
-
- .......
|
694 |
-
Follow the format of the output that follows:
|
695 |
-
7. Methods: \n\n
|
696 |
-
- (1):xxx;\n
|
697 |
-
- (2):xxx;\n
|
698 |
-
- (3):xxx;\n
|
699 |
-
....... \n\n
|
700 |
-
|
701 |
-
Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous <summary>, the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write.
|
702 |
-
""".format(self.language, self.language)},
|
703 |
-
]
|
704 |
-
return messages
|
705 |
-
|
706 |
-
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
|
707 |
-
stop=tenacity.stop_after_attempt(5),
|
708 |
-
reraise=True)
|
709 |
-
def chat_summary(self, text, summary_prompt_token=1100):
|
710 |
-
openai.api_key = self.chat_api_list[self.cur_api]
|
711 |
-
self.cur_api += 1
|
712 |
-
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list) - 1 else self.cur_api
|
713 |
-
text_token = len(self.encoding.encode(text))
|
714 |
-
clip_text_index = int(len(text) * (self.max_token_num - summary_prompt_token) / text_token)
|
715 |
-
clip_text = text[:clip_text_index]
|
716 |
-
messages = [
|
717 |
-
{"role": "system",
|
718 |
-
"content": "You are a researcher in the field of [" + self.key_word + "] who is good at summarizing papers using concise statements"},
|
719 |
-
{"role": "assistant",
|
720 |
-
"content": "This is the title, author, link, abstract and introduction of an English document. I need your help to read and summarize the following questions: " + clip_text},
|
721 |
-
{"role": "user", "content": """
|
722 |
-
1. Mark the title of the paper (with Chinese translation)
|
723 |
-
2. list all the authors' names (use English)
|
724 |
-
3. mark the first author's affiliation (output {} translation only)
|
725 |
-
4. mark the keywords of this article (use English)
|
726 |
-
5. link to the paper, Github code link (if available, fill in Github:None if not)
|
727 |
-
6. summarize according to the following four points.Be sure to use {} answers (proper nouns need to be marked in English)
|
728 |
-
- (1):What is the research background of this article?
|
729 |
-
- (2):What are the past methods? What are the problems with them? Is the approach well motivated?
|
730 |
-
- (3):What is the research methodology proposed in this paper?
|
731 |
-
- (4):On what task and what performance is achieved by the methods in this paper? Can the performance support their goals?
|
732 |
-
Follow the format of the output that follows:
|
733 |
-
1. Title: xxx\n\n
|
734 |
-
2. Authors: xxx\n\n
|
735 |
-
3. Affiliation: xxx\n\n
|
736 |
-
4. Keywords: xxx\n\n
|
737 |
-
5. Urls: xxx or xxx , xxx \n\n
|
738 |
-
6. Summary: \n\n
|
739 |
-
- (1):xxx;\n
|
740 |
-
- (2):xxx;\n
|
741 |
-
- (3):xxx;\n
|
742 |
-
- (4):xxx.\n\n
|
743 |
-
|
744 |
-
Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not have too much repetitive information, numerical values using the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed.
|
745 |
-
""".format(self.language, self.language, self.language)},
|
746 |
-
]
|
747 |
-
|
748 |
-
return messages
|
749 |
-
|
750 |
-
def export_to_markdown(self, text, file_name, mode='w'):
|
751 |
-
# 使用markdown模块的convert方法,将文本转换为html格式
|
752 |
-
# html = markdown.markdown(text)
|
753 |
-
# 打开一个文件,以写入模式
|
754 |
-
with open(file_name, mode, encoding="utf-8") as f:
|
755 |
-
# 将html格式的内容写入文件
|
756 |
-
f.write(text)
|
757 |
-
|
758 |
-
# 定义一个方法,打印出读者信息
|
759 |
-
|
760 |
-
def show_info(self):
|
761 |
-
print(f"Key word: {self.key_word}")
|
762 |
-
print(f"Query: {self.query}")
|
763 |
-
print(f"Sort: {self.sort}")
|
764 |
-
|
765 |
-
|
766 |
-
def chat_paper_main(args):
|
767 |
-
# 创建一个Reader对象,并调用show_info方法
|
768 |
-
if args.sort == 'Relevance':
|
769 |
-
sort = arxiv.SortCriterion.Relevance
|
770 |
-
elif args.sort == 'LastUpdatedDate':
|
771 |
-
sort = arxiv.SortCriterion.LastUpdatedDate
|
772 |
-
else:
|
773 |
-
sort = arxiv.SortCriterion.Relevance
|
774 |
-
|
775 |
-
if args.pdf_path:
|
776 |
-
reader1 = Reader(key_word=args.key_word,
|
777 |
-
query=args.query,
|
778 |
-
filter_keys=args.filter_keys,
|
779 |
-
sort=sort,
|
780 |
-
args=args
|
781 |
-
)
|
782 |
-
reader1.show_info()
|
783 |
-
# 开始判断是路径还是文件:
|
784 |
-
paper_list = []
|
785 |
-
if args.pdf_path.endswith(".pdf"):
|
786 |
-
paper_list.append(Paper(path=args.pdf_path))
|
787 |
-
else:
|
788 |
-
for root, dirs, files in os.walk(args.pdf_path):
|
789 |
-
print("root:", root, "dirs:", dirs, 'files:', files) # 当前目录路径
|
790 |
-
for filename in files:
|
791 |
-
# 如果找到PDF文件,则将其复制到目标文件夹中
|
792 |
-
if filename.endswith(".pdf"):
|
793 |
-
paper_list.append(Paper(path=os.path.join(root, filename)))
|
794 |
-
print("------------------paper_num: {}------------------".format(len(paper_list)))
|
795 |
-
[print(paper_index, paper_name.path.split('\\')[-1]) for paper_index, paper_name in enumerate(paper_list)]
|
796 |
-
return reader1, paper_list
|
797 |
-
else:
|
798 |
-
reader1 = Reader(key_word=args.key_word,
|
799 |
-
query=args.query,
|
800 |
-
filter_keys=args.filter_keys,
|
801 |
-
sort=sort,
|
802 |
-
args=args
|
803 |
-
)
|
804 |
-
reader1.show_info()
|
805 |
-
filter_results = reader1.filter_arxiv(max_results=args.max_results)
|
806 |
-
paper_list = reader1.download_pdf(filter_results)
|
807 |
-
reader1.summary_with_chat(paper_list=paper_list)
|
808 |
-
|
809 |
-
|
810 |
-
def upload(topic_str, file):
|
811 |
-
global reader1, paper_list
|
812 |
-
|
813 |
-
args = parser.parse_args()
|
814 |
-
args.pdf_path = file.name
|
815 |
-
args.key_word = 'education' if len(topic_str) < 1 else topic_str
|
816 |
-
reader1, paper_list = chat_paper_main(args=args)
|
817 |
-
|
818 |
-
return reader1.summary(paper_list)
|
819 |
-
|
820 |
-
|
821 |
-
if __name__ == '__main__':
|
822 |
-
parser = argparse.ArgumentParser()
|
823 |
-
parser.add_argument("--pdf_path", type=str, default=r'demo.pdf', help="if none, the bot will download from arxiv with query")
|
824 |
-
# parser.add_argument("--pdf_path", type=str, default=r'C:\Users\Administrator\Desktop\DHER\RHER_Reset\ChatPaper', help="if none, the bot will download from arxiv with query")
|
825 |
-
# parser.add_argument("--pdf_path", type=str, default='', help="if none, the bot will download from arxiv with query")
|
826 |
-
parser.add_argument("--query", type=str, default='all: ChatGPT robot',
|
827 |
-
help="the query string, ti: xx, au: xx, all: xx,")
|
828 |
-
parser.add_argument("--key_word", type=str, default='reinforcement learning',
|
829 |
-
help="the key word of user research fields")
|
830 |
-
parser.add_argument("--filter_keys", type=str, default='ChatGPT robot',
|
831 |
-
help="the filter key words, 摘要中每个单词都得有,才会被筛选为目标论文")
|
832 |
-
parser.add_argument("--max_results", type=int, default=1, help="the maximum number of results")
|
833 |
-
# arxiv.SortCriterion.Relevance
|
834 |
-
parser.add_argument("--sort", type=str, default="Relevance", help="another is LastUpdatedDate")
|
835 |
-
parser.add_argument("--save_image", default=False,
|
836 |
-
help="save image? It takes a minute or two to save a picture! But pretty")
|
837 |
-
parser.add_argument("--file_format", type=str, default='md', help="导出的文件格式,如果存图片的话,最好是md,如果不是的话,txt的不会乱")
|
838 |
-
parser.add_argument("--language", type=str, default='en', help="The other output lauguage is English, is en")
|
839 |
-
import time
|
840 |
-
|
841 |
-
start_time = time.time()
|
842 |
-
reader1, paper_list = None, None
|
843 |
-
|
844 |
-
import gradio as gr
|
845 |
-
|
846 |
-
with gr.Blocks(css=".output-image {height: 800px !important}") as demo:
|
847 |
-
with gr.Column():
|
848 |
-
topic = gr.Text(label='Topic')
|
849 |
-
pdf_file = gr.File(label="上传论文(必须为PDF)")
|
850 |
-
summary_input = gr.Text(label='Summary Input')
|
851 |
-
summary_output = gr.Text(label='Summary Output')
|
852 |
-
method_input_button = gr.Button(value='Next')
|
853 |
-
method_input = gr.Text(label='Method Input', value='')
|
854 |
-
method_output = gr.Text(label='Method Output')
|
855 |
-
conclusion_input_button = gr.Button(value='Next')
|
856 |
-
conclusion_input = gr.Text(label='Conclusion Input', value='')
|
857 |
-
conclusion_output = gr.Text(label='Conclusion Output')
|
858 |
-
result_button = gr.Button(value='Next')
|
859 |
-
result = gr.Text(label='Result')
|
860 |
-
pdf_file.upload(upload, [topic, pdf_file], [summary_input])
|
861 |
-
method_input_button.click(lambda x: reader1.method(paper_list, x), [summary_output], [method_input])
|
862 |
-
conclusion_input_button.click(lambda x, y: reader1.conclusion(paper_list, x, y), [summary_output, method_output], [conclusion_input])
|
863 |
-
result_button.click(lambda x, y, z: '\n'.join([x, y, z]), [summary_output, method_output, conclusion_output], [result])
|
864 |
-
|
865 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/insetgan.py
DELETED
@@ -1,448 +0,0 @@
|
|
1 |
-
# Copyright (c) SenseTime Research. All rights reserved.
|
2 |
-
|
3 |
-
|
4 |
-
import torch
|
5 |
-
import torch.nn.functional as F
|
6 |
-
from tqdm import tqdm
|
7 |
-
from lpips import LPIPS
|
8 |
-
import numpy as np
|
9 |
-
from torch_utils.models import Generator as bodyGAN
|
10 |
-
from torch_utils.models_face import Generator as FaceGAN
|
11 |
-
import dlib
|
12 |
-
from utils.face_alignment import align_face_for_insetgan
|
13 |
-
from utils.util import visual, tensor_to_numpy, numpy_to_tensor
|
14 |
-
import legacy
|
15 |
-
import os
|
16 |
-
import click
|
17 |
-
|
18 |
-
|
19 |
-
class InsetGAN(torch.nn.Module):
|
20 |
-
def __init__(self, stylebody_ckpt, styleface_ckpt):
|
21 |
-
super().__init__()
|
22 |
-
|
23 |
-
# convert pkl to pth
|
24 |
-
if not os.path.exists(stylebody_ckpt.replace('.pkl', '.pth')):
|
25 |
-
legacy.convert(
|
26 |
-
stylebody_ckpt, stylebody_ckpt.replace('.pkl', '.pth'))
|
27 |
-
stylebody_ckpt = stylebody_ckpt.replace('.pkl', '.pth')
|
28 |
-
|
29 |
-
if not os.path.exists(styleface_ckpt.replace('.pkl', '.pth')):
|
30 |
-
legacy.convert(
|
31 |
-
styleface_ckpt, styleface_ckpt.replace('.pkl', '.pth'))
|
32 |
-
styleface_ckpt = styleface_ckpt.replace('.pkl', '.pth')
|
33 |
-
|
34 |
-
# dual generator
|
35 |
-
config = {"latent": 512, "n_mlp": 8, "channel_multiplier": 2}
|
36 |
-
self.body_generator = bodyGAN(
|
37 |
-
size=1024,
|
38 |
-
style_dim=config["latent"],
|
39 |
-
n_mlp=config["n_mlp"],
|
40 |
-
channel_multiplier=config["channel_multiplier"]
|
41 |
-
)
|
42 |
-
self.body_generator.load_state_dict(
|
43 |
-
torch.load(stylebody_ckpt)['g_ema'])
|
44 |
-
self.body_generator.eval().requires_grad_(False).cuda()
|
45 |
-
|
46 |
-
self.face_generator = FaceGAN(
|
47 |
-
size=1024,
|
48 |
-
style_dim=config["latent"],
|
49 |
-
n_mlp=config["n_mlp"],
|
50 |
-
channel_multiplier=config["channel_multiplier"]
|
51 |
-
)
|
52 |
-
self.face_generator.load_state_dict(
|
53 |
-
torch.load(styleface_ckpt)['g_ema'])
|
54 |
-
self.face_generator.eval().requires_grad_(False).cuda()
|
55 |
-
# crop function
|
56 |
-
self.dlib_predictor = dlib.shape_predictor(
|
57 |
-
'./pretrained_models/shape_predictor_68_face_landmarks.dat')
|
58 |
-
self.dlib_cnn_face_detector = dlib.cnn_face_detection_model_v1(
|
59 |
-
"pretrained_models/mmod_human_face_detector.dat")
|
60 |
-
|
61 |
-
# criterion
|
62 |
-
self.lpips_loss = LPIPS(net='alex').cuda().eval()
|
63 |
-
self.l1_loss = torch.nn.L1Loss(reduction='mean')
|
64 |
-
|
65 |
-
def loss_coarse(self, A_face, B, p1=500, p2=0.05):
|
66 |
-
A_face = F.interpolate(A_face, size=(64, 64), mode='area')
|
67 |
-
B = F.interpolate(B, size=(64, 64), mode='area')
|
68 |
-
loss_l1 = p1 * self.l1_loss(A_face, B)
|
69 |
-
loss_lpips = p2 * self.lpips_loss(A_face, B)
|
70 |
-
return loss_l1 + loss_lpips
|
71 |
-
|
72 |
-
@staticmethod
|
73 |
-
def get_border_mask(A, x, spec):
|
74 |
-
mask = torch.zeros_like(A)
|
75 |
-
mask[:, :, :x, ] = 1
|
76 |
-
mask[:, :, -x:, ] = 1
|
77 |
-
mask[:, :, :, :x] = 1
|
78 |
-
mask[:, :, :, -x:] = 1
|
79 |
-
return mask
|
80 |
-
|
81 |
-
@staticmethod
|
82 |
-
def get_body_mask(A, crop, padding=4):
|
83 |
-
mask = torch.ones_like(A)
|
84 |
-
mask[:, :, crop[1]-padding:crop[3]+padding,
|
85 |
-
crop[0]-padding:crop[2]+padding] = 0
|
86 |
-
return mask
|
87 |
-
|
88 |
-
def loss_border(self, A_face, B, p1=10000, p2=2, spec=None):
|
89 |
-
mask = self.get_border_mask(A_face, 8, spec)
|
90 |
-
loss_l1 = p1 * self.l1_loss(A_face*mask, B*mask)
|
91 |
-
loss_lpips = p2 * self.lpips_loss(A_face*mask, B*mask)
|
92 |
-
return loss_l1 + loss_lpips
|
93 |
-
|
94 |
-
def loss_body(self, A, B, crop, p1=9000, p2=0.1):
|
95 |
-
padding = int((crop[3] - crop[1]) / 20)
|
96 |
-
mask = self.get_body_mask(A, crop, padding)
|
97 |
-
loss_l1 = p1 * self.l1_loss(A*mask, B*mask)
|
98 |
-
loss_lpips = p2 * self.lpips_loss(A*mask, B*mask)
|
99 |
-
return loss_l1+loss_lpips
|
100 |
-
|
101 |
-
def loss_face(self, A, B, crop, p1=5000, p2=1.75):
|
102 |
-
mask = 1 - self.get_body_mask(A, crop)
|
103 |
-
loss_l1 = p1 * self.l1_loss(A*mask, B*mask)
|
104 |
-
loss_lpips = p2 * self.lpips_loss(A*mask, B*mask)
|
105 |
-
return loss_l1+loss_lpips
|
106 |
-
|
107 |
-
def loss_reg(self, w, w_mean, p1, w_plus_delta=None, p2=None):
|
108 |
-
return p1 * torch.mean(((w - w_mean) ** 2)) + p2 * torch.mean(w_plus_delta ** 2)
|
109 |
-
|
110 |
-
# FFHQ type
|
111 |
-
def detect_face_dlib(self, img):
|
112 |
-
# tensor to numpy array rgb uint8
|
113 |
-
img = tensor_to_numpy(img)
|
114 |
-
aligned_image, crop, rect = align_face_for_insetgan(img=img,
|
115 |
-
detector=self.dlib_cnn_face_detector,
|
116 |
-
predictor=self.dlib_predictor,
|
117 |
-
output_size=256)
|
118 |
-
|
119 |
-
aligned_image = np.array(aligned_image)
|
120 |
-
aligned_image = numpy_to_tensor(aligned_image)
|
121 |
-
return aligned_image, crop, rect
|
122 |
-
|
123 |
-
# joint optimization
|
124 |
-
def dual_optimizer(self,
|
125 |
-
face_w,
|
126 |
-
body_w,
|
127 |
-
joint_steps=500,
|
128 |
-
face_initial_learning_rate=0.02,
|
129 |
-
body_initial_learning_rate=0.05,
|
130 |
-
lr_rampdown_length=0.25,
|
131 |
-
lr_rampup_length=0.05,
|
132 |
-
seed=None,
|
133 |
-
output_path=None,
|
134 |
-
video=0):
|
135 |
-
'''
|
136 |
-
Given a face_w, optimize a body_w with suitable body pose & shape for face_w
|
137 |
-
'''
|
138 |
-
def visual_(path, synth_body, synth_face, body_crop, step, both=False, init_body_with_face=None):
|
139 |
-
tmp = synth_body.clone().detach()
|
140 |
-
tmp[:, :, body_crop[1]:body_crop[3],
|
141 |
-
body_crop[0]:body_crop[2]] = synth_face
|
142 |
-
if both:
|
143 |
-
tmp = torch.cat([synth_body, tmp], dim=3)
|
144 |
-
save_path = os.path.join(path, f"{step:04d}.jpg")
|
145 |
-
visual(tmp, save_path)
|
146 |
-
|
147 |
-
def forward(face_w_opt,
|
148 |
-
body_w_opt,
|
149 |
-
face_w_delta,
|
150 |
-
body_w_delta,
|
151 |
-
body_crop,
|
152 |
-
update_crop=False
|
153 |
-
):
|
154 |
-
if face_w_opt.shape[1] != 18:
|
155 |
-
face_ws = (face_w_opt).repeat([1, 18, 1])
|
156 |
-
else:
|
157 |
-
face_ws = face_w_opt.clone()
|
158 |
-
face_ws = face_ws + face_w_delta
|
159 |
-
synth_face, _ = self.face_generator(
|
160 |
-
[face_ws], input_is_latent=True, randomize_noise=False)
|
161 |
-
|
162 |
-
body_ws = (body_w_opt).repeat([1, 18, 1])
|
163 |
-
body_ws = body_ws + body_w_delta
|
164 |
-
synth_body, _ = self.body_generator(
|
165 |
-
[body_ws], input_is_latent=True, randomize_noise=False)
|
166 |
-
|
167 |
-
if update_crop:
|
168 |
-
old_r = (body_crop[3]-body_crop[1]
|
169 |
-
) // 2, (body_crop[2]-body_crop[0]) // 2
|
170 |
-
_, body_crop, _ = self.detect_face_dlib(synth_body)
|
171 |
-
center = (body_crop[1] + body_crop[3]
|
172 |
-
) // 2, (body_crop[0] + body_crop[2]) // 2
|
173 |
-
body_crop = (center[1] - old_r[1], center[0] - old_r[0],
|
174 |
-
center[1] + old_r[1], center[0] + old_r[0])
|
175 |
-
|
176 |
-
synth_body_face = synth_body[:, :, body_crop[1]:body_crop[3], body_crop[0]:body_crop[2]]
|
177 |
-
|
178 |
-
if synth_face.shape[2] > body_crop[3]-body_crop[1]:
|
179 |
-
synth_face_resize = F.interpolate(synth_face, size=(
|
180 |
-
body_crop[3]-body_crop[1], body_crop[2]-body_crop[0]), mode='area')
|
181 |
-
|
182 |
-
return synth_body, synth_body_face, synth_face, synth_face_resize, body_crop
|
183 |
-
|
184 |
-
def update_lr(init_lr, step, num_steps, lr_rampdown_length, lr_rampup_length):
|
185 |
-
t = step / num_steps
|
186 |
-
lr_ramp = min(1.0, (1.0 - t) / lr_rampdown_length)
|
187 |
-
lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi)
|
188 |
-
lr_ramp = lr_ramp * min(1.0, t / lr_rampup_length)
|
189 |
-
lr = init_lr * lr_ramp
|
190 |
-
return lr
|
191 |
-
|
192 |
-
# update output_path
|
193 |
-
output_path = os.path.join(output_path, seed)
|
194 |
-
os.makedirs(output_path, exist_ok=True)
|
195 |
-
|
196 |
-
# define optimized params
|
197 |
-
body_w_mean = self.body_generator.mean_latent(10000).detach()
|
198 |
-
face_w_opt = face_w.clone().detach().requires_grad_(True)
|
199 |
-
body_w_opt = body_w.clone().detach().requires_grad_(True)
|
200 |
-
face_w_delta = torch.zeros_like(
|
201 |
-
face_w.repeat([1, 18, 1])).requires_grad_(True)
|
202 |
-
body_w_delta = torch.zeros_like(
|
203 |
-
body_w.repeat([1, 18, 1])).requires_grad_(True)
|
204 |
-
# generate ref face & body
|
205 |
-
ref_body, _ = self.body_generator(
|
206 |
-
[body_w.repeat([1, 18, 1])], input_is_latent=True, randomize_noise=False)
|
207 |
-
# for inversion
|
208 |
-
ref_face, _ = self.face_generator(
|
209 |
-
[face_w.repeat([1, 18, 1])], input_is_latent=True, randomize_noise=False)
|
210 |
-
# get initilized crop
|
211 |
-
_, body_crop, _ = self.detect_face_dlib(ref_body)
|
212 |
-
# NOTE: this is face rect only. no FFHQ type.
|
213 |
-
_, _, face_crop = self.detect_face_dlib(ref_face)
|
214 |
-
# create optimizer
|
215 |
-
face_optimizer = torch.optim.Adam([face_w_opt, face_w_delta], betas=(
|
216 |
-
0.9, 0.999), lr=face_initial_learning_rate)
|
217 |
-
body_optimizer = torch.optim.Adam([body_w_opt, body_w_delta], betas=(
|
218 |
-
0.9, 0.999), lr=body_initial_learning_rate)
|
219 |
-
|
220 |
-
global_step = 0
|
221 |
-
# Stage1: remove background of face image
|
222 |
-
face_steps = 25
|
223 |
-
pbar = tqdm(range(face_steps))
|
224 |
-
for step in pbar:
|
225 |
-
face_lr = update_lr(face_initial_learning_rate / 2, step,
|
226 |
-
face_steps, lr_rampdown_length, lr_rampup_length)
|
227 |
-
for param_group in face_optimizer.param_groups:
|
228 |
-
param_group['lr'] = face_lr
|
229 |
-
synth_body, synth_body_face, synth_face_raw, synth_face, body_crop = forward(face_w_opt,
|
230 |
-
body_w_opt,
|
231 |
-
face_w_delta,
|
232 |
-
body_w_delta,
|
233 |
-
body_crop)
|
234 |
-
loss_face = self.loss_face(
|
235 |
-
synth_face_raw, ref_face, face_crop, 5000, 1.75)
|
236 |
-
loss_coarse = self.loss_coarse(
|
237 |
-
synth_face, synth_body_face, 50, 0.05)
|
238 |
-
loss_border = self.loss_border(
|
239 |
-
synth_face, synth_body_face, 1000, 0.1)
|
240 |
-
loss = loss_coarse + loss_border + loss_face
|
241 |
-
face_optimizer.zero_grad()
|
242 |
-
loss.backward()
|
243 |
-
face_optimizer.step()
|
244 |
-
# visualization
|
245 |
-
if video:
|
246 |
-
visual_(output_path, synth_body,
|
247 |
-
synth_face, body_crop, global_step)
|
248 |
-
pbar.set_description(
|
249 |
-
(
|
250 |
-
f"face: {step:.4f}, lr: {face_lr}, loss: {loss.item():.2f}, loss_coarse: {loss_coarse.item():.2f};"
|
251 |
-
f"loss_border: {loss_border.item():.2f}, loss_face: {loss_face.item():.2f};"
|
252 |
-
)
|
253 |
-
)
|
254 |
-
global_step += 1
|
255 |
-
|
256 |
-
# Stage2: find a suitable body
|
257 |
-
body_steps = 150
|
258 |
-
pbar = tqdm(range(body_steps))
|
259 |
-
for step in pbar:
|
260 |
-
body_lr = update_lr(body_initial_learning_rate, step,
|
261 |
-
body_steps, lr_rampdown_length, lr_rampup_length)
|
262 |
-
update_crop = True if (step % 50 == 0) else False
|
263 |
-
# update_crop = False
|
264 |
-
for param_group in body_optimizer.param_groups:
|
265 |
-
param_group['lr'] = body_lr
|
266 |
-
synth_body, synth_body_face, synth_face_raw, synth_face, body_crop = forward(face_w_opt,
|
267 |
-
body_w_opt,
|
268 |
-
face_w_delta,
|
269 |
-
body_w_delta,
|
270 |
-
body_crop,
|
271 |
-
update_crop=update_crop)
|
272 |
-
loss_coarse = self.loss_coarse(
|
273 |
-
synth_face, synth_body_face, 500, 0.05)
|
274 |
-
loss_border = self.loss_border(
|
275 |
-
synth_face, synth_body_face, 2500, 0)
|
276 |
-
loss_body = self.loss_body(
|
277 |
-
synth_body, ref_body, body_crop, 9000, 0.1)
|
278 |
-
loss_reg = self.loss_reg(
|
279 |
-
body_w_opt, body_w_mean, 15000, body_w_delta, 0)
|
280 |
-
loss = loss_coarse + loss_border + loss_body + loss_reg
|
281 |
-
body_optimizer.zero_grad()
|
282 |
-
loss.backward()
|
283 |
-
body_optimizer.step()
|
284 |
-
|
285 |
-
# visualization
|
286 |
-
if video:
|
287 |
-
visual_(output_path, synth_body,
|
288 |
-
synth_face, body_crop, global_step)
|
289 |
-
pbar.set_description(
|
290 |
-
(
|
291 |
-
f"body: {step:.4f}, lr: {body_lr}, loss: {loss.item():.2f}, loss_coarse: {loss_coarse.item():.2f};"
|
292 |
-
f"loss_border: {loss_border.item():.2f}, loss_body: {loss_body.item():.2f}, loss_reg: {loss_reg:.2f}"
|
293 |
-
)
|
294 |
-
)
|
295 |
-
global_step += 1
|
296 |
-
|
297 |
-
# Stage3: joint optimization
|
298 |
-
interval = 50
|
299 |
-
joint_face_steps = joint_steps // 2
|
300 |
-
joint_body_steps = joint_steps // 2
|
301 |
-
face_step = 0
|
302 |
-
body_step = 0
|
303 |
-
pbar = tqdm(range(joint_steps))
|
304 |
-
flag = -1
|
305 |
-
for step in pbar:
|
306 |
-
if step % interval == 0:
|
307 |
-
flag += 1
|
308 |
-
text_flag = 'optimize_face' if flag % 2 == 0 else 'optimize_body'
|
309 |
-
synth_body, synth_body_face, synth_face_raw, synth_face, body_crop = forward(face_w_opt,
|
310 |
-
body_w_opt,
|
311 |
-
face_w_delta,
|
312 |
-
body_w_delta,
|
313 |
-
body_crop)
|
314 |
-
if text_flag == 'optimize_face':
|
315 |
-
face_lr = update_lr(face_initial_learning_rate, face_step,
|
316 |
-
joint_face_steps, lr_rampdown_length, lr_rampup_length)
|
317 |
-
for param_group in face_optimizer.param_groups:
|
318 |
-
param_group['lr'] = face_lr
|
319 |
-
loss_face = self.loss_face(
|
320 |
-
synth_face_raw, ref_face, face_crop, 5000, 1.75)
|
321 |
-
loss_coarse = self.loss_coarse(
|
322 |
-
synth_face, synth_body_face, 500, 0.05)
|
323 |
-
loss_border = self.loss_border(
|
324 |
-
synth_face, synth_body_face, 25000, 0)
|
325 |
-
loss = loss_coarse + loss_border + loss_face
|
326 |
-
face_optimizer.zero_grad()
|
327 |
-
loss.backward()
|
328 |
-
face_optimizer.step()
|
329 |
-
pbar.set_description(
|
330 |
-
(
|
331 |
-
f"face: {step}, lr: {face_lr:.4f}, loss: {loss.item():.2f}, loss_coarse: {loss_coarse.item():.2f};"
|
332 |
-
f"loss_border: {loss_border.item():.2f}, loss_face: {loss_face.item():.2f};"
|
333 |
-
)
|
334 |
-
)
|
335 |
-
face_step += 1
|
336 |
-
else:
|
337 |
-
body_lr = update_lr(body_initial_learning_rate, body_step,
|
338 |
-
joint_body_steps, lr_rampdown_length, lr_rampup_length)
|
339 |
-
for param_group in body_optimizer.param_groups:
|
340 |
-
param_group['lr'] = body_lr
|
341 |
-
loss_coarse = self.loss_coarse(
|
342 |
-
synth_face, synth_body_face, 500, 0.05)
|
343 |
-
loss_border = self.loss_border(
|
344 |
-
synth_face, synth_body_face, 2500, 0)
|
345 |
-
loss_body = self.loss_body(
|
346 |
-
synth_body, ref_body, body_crop, 9000, 0.1)
|
347 |
-
loss_reg = self.loss_reg(
|
348 |
-
body_w_opt, body_w_mean, 25000, body_w_delta, 0)
|
349 |
-
loss = loss_coarse + loss_border + loss_body + loss_reg
|
350 |
-
body_optimizer.zero_grad()
|
351 |
-
loss.backward()
|
352 |
-
body_optimizer.step()
|
353 |
-
pbar.set_description(
|
354 |
-
(
|
355 |
-
f"body: {step}, lr: {body_lr:.4f}, loss: {loss.item():.2f}, loss_coarse: {loss_coarse.item():.2f};"
|
356 |
-
f"loss_border: {loss_border.item():.2f}, loss_body: {loss_body.item():.2f}, loss_reg: {loss_reg:.2f}"
|
357 |
-
)
|
358 |
-
)
|
359 |
-
body_step += 1
|
360 |
-
if video:
|
361 |
-
visual_(output_path, synth_body,
|
362 |
-
synth_face, body_crop, global_step)
|
363 |
-
global_step += 1
|
364 |
-
return face_w_opt.repeat([1, 18, 1])+face_w_delta, body_w_opt.repeat([1, 18, 1])+body_w_delta, body_crop
|
365 |
-
|
366 |
-
|
367 |
-
"""
|
368 |
-
Jointly combine and optimize generated faces and bodies .
|
369 |
-
Examples:
|
370 |
-
|
371 |
-
\b
|
372 |
-
# Combine the generate human full-body image from the provided StyleGAN-Human pre-trained model
|
373 |
-
# and the generated face image from FFHQ model, optimize both latent codes to produce the coherent face-body image
|
374 |
-
python insetgan.py --body_network=pretrained_models/stylegan_human_v2_1024.pkl --face_network=pretrained_models/ffhq.pkl \\
|
375 |
-
--body_seed=82 --face_seed=43 --trunc=0.6 --outdir=outputs/insetgan/ --video 1
|
376 |
-
"""
|
377 |
-
|
378 |
-
|
379 |
-
@click.command()
|
380 |
-
@click.pass_context
|
381 |
-
@click.option('--face_network', default="./pretrained_models/ffhq.pkl", help='Network pickle filename', required=True)
|
382 |
-
@click.option('--body_network', default='./pretrained_models/stylegan2_1024.pkl', help='Network pickle filename', required=True)
|
383 |
-
@click.option('--face_seed', type=int, default=82, help='selected random seed')
|
384 |
-
@click.option('--body_seed', type=int, default=43, help='selected random seed')
|
385 |
-
@click.option('--joint_steps', type=int, default=500, help='num steps for joint optimization')
|
386 |
-
@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=0.6, show_default=True)
|
387 |
-
@click.option('--outdir', help='Where to save the output images', default="outputs/insetgan/", type=str, required=True, metavar='DIR')
|
388 |
-
@click.option('--video', help="set to 1 if want to save video", type=int, default=0)
|
389 |
-
def main(
|
390 |
-
ctx: click.Context,
|
391 |
-
face_network: str,
|
392 |
-
body_network: str,
|
393 |
-
face_seed: int,
|
394 |
-
body_seed: int,
|
395 |
-
joint_steps: int,
|
396 |
-
truncation_psi: float,
|
397 |
-
outdir: str,
|
398 |
-
video: int):
|
399 |
-
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
400 |
-
insgan = InsetGAN(body_network, face_network)
|
401 |
-
os.makedirs(outdir, exist_ok=True)
|
402 |
-
face_z = np.random.RandomState(face_seed).randn(1, 512).astype(np.float32)
|
403 |
-
face_mean = insgan.face_generator.mean_latent(3000)
|
404 |
-
face_w = insgan.face_generator.get_latent(
|
405 |
-
torch.from_numpy(face_z).to(device)) # [N, L, C]
|
406 |
-
face_w = truncation_psi * face_w + (1-truncation_psi) * face_mean
|
407 |
-
face_img, _ = insgan.face_generator([face_w], input_is_latent=True)
|
408 |
-
|
409 |
-
body_z = np.random.RandomState(body_seed).randn(1, 512).astype(np.float32)
|
410 |
-
body_mean = insgan.body_generator.mean_latent(3000)
|
411 |
-
body_w = insgan.body_generator.get_latent(
|
412 |
-
torch.from_numpy(body_z).to(device)) # [N, L, C]
|
413 |
-
body_w = truncation_psi * body_w + (1-truncation_psi) * body_mean
|
414 |
-
body_img, _ = insgan.body_generator([body_w], input_is_latent=True)
|
415 |
-
|
416 |
-
_, body_crop, _ = insgan.detect_face_dlib(body_img)
|
417 |
-
face_img = F.interpolate(face_img, size=(
|
418 |
-
body_crop[3]-body_crop[1], body_crop[2]-body_crop[0]), mode='area')
|
419 |
-
cp_body = body_img.clone()
|
420 |
-
cp_body[:, :, body_crop[1]:body_crop[3],
|
421 |
-
body_crop[0]:body_crop[2]] = face_img
|
422 |
-
|
423 |
-
optim_face_w, optim_body_w, crop = insgan.dual_optimizer(
|
424 |
-
face_w,
|
425 |
-
body_w,
|
426 |
-
joint_steps=joint_steps,
|
427 |
-
seed=f'{face_seed:04d}_{body_seed:04d}',
|
428 |
-
output_path=outdir,
|
429 |
-
video=video
|
430 |
-
)
|
431 |
-
|
432 |
-
if video:
|
433 |
-
ffmpeg_cmd = f"ffmpeg -hide_banner -loglevel error -i ./{outdir}/{face_seed:04d}_{body_seed:04d}/%04d.jpg -c:v libx264 -vf fps=30 -pix_fmt yuv420p ./{outdir}/{face_seed:04d}_{body_seed:04d}.mp4"
|
434 |
-
os.system(ffmpeg_cmd)
|
435 |
-
new_face_img, _ = insgan.face_generator(
|
436 |
-
[optim_face_w], input_is_latent=True)
|
437 |
-
new_shape = crop[3] - crop[1], crop[2] - crop[0]
|
438 |
-
new_face_img_crop = F.interpolate(
|
439 |
-
new_face_img, size=new_shape, mode='area')
|
440 |
-
seamless_body, _ = insgan.body_generator(
|
441 |
-
[optim_body_w], input_is_latent=True)
|
442 |
-
seamless_body[:, :, crop[1]:crop[3], crop[0]:crop[2]] = new_face_img_crop
|
443 |
-
temp = torch.cat([cp_body, seamless_body], dim=3)
|
444 |
-
visual(temp, f"{outdir}/{face_seed:04d}_{body_seed:04d}.png")
|
445 |
-
|
446 |
-
|
447 |
-
if __name__ == "__main__":
|
448 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/op_edit/upfirdn2d.cpp
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
// Copyright (c) SenseTime Research. All rights reserved.
|
2 |
-
|
3 |
-
#include <torch/extension.h>
|
4 |
-
|
5 |
-
|
6 |
-
torch::Tensor upfirdn2d_op(const torch::Tensor& input, const torch::Tensor& kernel,
|
7 |
-
int up_x, int up_y, int down_x, int down_y,
|
8 |
-
int pad_x0, int pad_x1, int pad_y0, int pad_y1);
|
9 |
-
|
10 |
-
#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
|
11 |
-
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
|
12 |
-
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
|
13 |
-
|
14 |
-
torch::Tensor upfirdn2d(const torch::Tensor& input, const torch::Tensor& kernel,
|
15 |
-
int up_x, int up_y, int down_x, int down_y,
|
16 |
-
int pad_x0, int pad_x1, int pad_y0, int pad_y1) {
|
17 |
-
CHECK_CUDA(input);
|
18 |
-
CHECK_CUDA(kernel);
|
19 |
-
|
20 |
-
return upfirdn2d_op(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1);
|
21 |
-
}
|
22 |
-
|
23 |
-
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
24 |
-
m.def("upfirdn2d", &upfirdn2d, "upfirdn2d (CUDA)");
|
25 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_unclip.py
DELETED
@@ -1,287 +0,0 @@
|
|
1 |
-
import types
|
2 |
-
from typing import List, Optional, Tuple, Union
|
3 |
-
|
4 |
-
import torch
|
5 |
-
from transformers import CLIPTextModelWithProjection, CLIPTokenizer
|
6 |
-
from transformers.models.clip.modeling_clip import CLIPTextModelOutput
|
7 |
-
|
8 |
-
from diffusers.models import PriorTransformer
|
9 |
-
from diffusers.pipelines import DiffusionPipeline, StableDiffusionImageVariationPipeline
|
10 |
-
from diffusers.schedulers import UnCLIPScheduler
|
11 |
-
from diffusers.utils import logging, randn_tensor
|
12 |
-
|
13 |
-
|
14 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
15 |
-
|
16 |
-
|
17 |
-
def _encode_image(self, image, device, num_images_per_prompt, do_classifier_free_guidance):
|
18 |
-
image = image.to(device=device)
|
19 |
-
image_embeddings = image # take image as image_embeddings
|
20 |
-
image_embeddings = image_embeddings.unsqueeze(1)
|
21 |
-
|
22 |
-
# duplicate image embeddings for each generation per prompt, using mps friendly method
|
23 |
-
bs_embed, seq_len, _ = image_embeddings.shape
|
24 |
-
image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1)
|
25 |
-
image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
26 |
-
|
27 |
-
if do_classifier_free_guidance:
|
28 |
-
uncond_embeddings = torch.zeros_like(image_embeddings)
|
29 |
-
|
30 |
-
# For classifier free guidance, we need to do two forward passes.
|
31 |
-
# Here we concatenate the unconditional and text embeddings into a single batch
|
32 |
-
# to avoid doing two forward passes
|
33 |
-
image_embeddings = torch.cat([uncond_embeddings, image_embeddings])
|
34 |
-
|
35 |
-
return image_embeddings
|
36 |
-
|
37 |
-
|
38 |
-
class StableUnCLIPPipeline(DiffusionPipeline):
|
39 |
-
def __init__(
|
40 |
-
self,
|
41 |
-
prior: PriorTransformer,
|
42 |
-
tokenizer: CLIPTokenizer,
|
43 |
-
text_encoder: CLIPTextModelWithProjection,
|
44 |
-
prior_scheduler: UnCLIPScheduler,
|
45 |
-
decoder_pipe_kwargs: Optional[dict] = None,
|
46 |
-
):
|
47 |
-
super().__init__()
|
48 |
-
|
49 |
-
decoder_pipe_kwargs = {"image_encoder": None} if decoder_pipe_kwargs is None else decoder_pipe_kwargs
|
50 |
-
|
51 |
-
decoder_pipe_kwargs["torch_dtype"] = decoder_pipe_kwargs.get("torch_dtype", None) or prior.dtype
|
52 |
-
|
53 |
-
self.decoder_pipe = StableDiffusionImageVariationPipeline.from_pretrained(
|
54 |
-
"lambdalabs/sd-image-variations-diffusers", **decoder_pipe_kwargs
|
55 |
-
)
|
56 |
-
|
57 |
-
# replace `_encode_image` method
|
58 |
-
self.decoder_pipe._encode_image = types.MethodType(_encode_image, self.decoder_pipe)
|
59 |
-
|
60 |
-
self.register_modules(
|
61 |
-
prior=prior,
|
62 |
-
tokenizer=tokenizer,
|
63 |
-
text_encoder=text_encoder,
|
64 |
-
prior_scheduler=prior_scheduler,
|
65 |
-
)
|
66 |
-
|
67 |
-
def _encode_prompt(
|
68 |
-
self,
|
69 |
-
prompt,
|
70 |
-
device,
|
71 |
-
num_images_per_prompt,
|
72 |
-
do_classifier_free_guidance,
|
73 |
-
text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None,
|
74 |
-
text_attention_mask: Optional[torch.Tensor] = None,
|
75 |
-
):
|
76 |
-
if text_model_output is None:
|
77 |
-
batch_size = len(prompt) if isinstance(prompt, list) else 1
|
78 |
-
# get prompt text embeddings
|
79 |
-
text_inputs = self.tokenizer(
|
80 |
-
prompt,
|
81 |
-
padding="max_length",
|
82 |
-
max_length=self.tokenizer.model_max_length,
|
83 |
-
return_tensors="pt",
|
84 |
-
)
|
85 |
-
text_input_ids = text_inputs.input_ids
|
86 |
-
text_mask = text_inputs.attention_mask.bool().to(device)
|
87 |
-
|
88 |
-
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
|
89 |
-
removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
|
90 |
-
logger.warning(
|
91 |
-
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
92 |
-
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
93 |
-
)
|
94 |
-
text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
|
95 |
-
|
96 |
-
text_encoder_output = self.text_encoder(text_input_ids.to(device))
|
97 |
-
|
98 |
-
text_embeddings = text_encoder_output.text_embeds
|
99 |
-
text_encoder_hidden_states = text_encoder_output.last_hidden_state
|
100 |
-
|
101 |
-
else:
|
102 |
-
batch_size = text_model_output[0].shape[0]
|
103 |
-
text_embeddings, text_encoder_hidden_states = text_model_output[0], text_model_output[1]
|
104 |
-
text_mask = text_attention_mask
|
105 |
-
|
106 |
-
text_embeddings = text_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
|
107 |
-
text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
|
108 |
-
text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0)
|
109 |
-
|
110 |
-
if do_classifier_free_guidance:
|
111 |
-
uncond_tokens = [""] * batch_size
|
112 |
-
|
113 |
-
uncond_input = self.tokenizer(
|
114 |
-
uncond_tokens,
|
115 |
-
padding="max_length",
|
116 |
-
max_length=self.tokenizer.model_max_length,
|
117 |
-
truncation=True,
|
118 |
-
return_tensors="pt",
|
119 |
-
)
|
120 |
-
uncond_text_mask = uncond_input.attention_mask.bool().to(device)
|
121 |
-
uncond_embeddings_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device))
|
122 |
-
|
123 |
-
uncond_embeddings = uncond_embeddings_text_encoder_output.text_embeds
|
124 |
-
uncond_text_encoder_hidden_states = uncond_embeddings_text_encoder_output.last_hidden_state
|
125 |
-
|
126 |
-
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
127 |
-
|
128 |
-
seq_len = uncond_embeddings.shape[1]
|
129 |
-
uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt)
|
130 |
-
uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len)
|
131 |
-
|
132 |
-
seq_len = uncond_text_encoder_hidden_states.shape[1]
|
133 |
-
uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1)
|
134 |
-
uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view(
|
135 |
-
batch_size * num_images_per_prompt, seq_len, -1
|
136 |
-
)
|
137 |
-
uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0)
|
138 |
-
|
139 |
-
# done duplicates
|
140 |
-
|
141 |
-
# For classifier free guidance, we need to do two forward passes.
|
142 |
-
# Here we concatenate the unconditional and text embeddings into a single batch
|
143 |
-
# to avoid doing two forward passes
|
144 |
-
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
145 |
-
text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states])
|
146 |
-
|
147 |
-
text_mask = torch.cat([uncond_text_mask, text_mask])
|
148 |
-
|
149 |
-
return text_embeddings, text_encoder_hidden_states, text_mask
|
150 |
-
|
151 |
-
@property
|
152 |
-
def _execution_device(self):
|
153 |
-
r"""
|
154 |
-
Returns the device on which the pipeline's models will be executed. After calling
|
155 |
-
`pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
|
156 |
-
hooks.
|
157 |
-
"""
|
158 |
-
if self.device != torch.device("meta") or not hasattr(self.prior, "_hf_hook"):
|
159 |
-
return self.device
|
160 |
-
for module in self.prior.modules():
|
161 |
-
if (
|
162 |
-
hasattr(module, "_hf_hook")
|
163 |
-
and hasattr(module._hf_hook, "execution_device")
|
164 |
-
and module._hf_hook.execution_device is not None
|
165 |
-
):
|
166 |
-
return torch.device(module._hf_hook.execution_device)
|
167 |
-
return self.device
|
168 |
-
|
169 |
-
def prepare_latents(self, shape, dtype, device, generator, latents, scheduler):
|
170 |
-
if latents is None:
|
171 |
-
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
172 |
-
else:
|
173 |
-
if latents.shape != shape:
|
174 |
-
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
|
175 |
-
latents = latents.to(device)
|
176 |
-
|
177 |
-
latents = latents * scheduler.init_noise_sigma
|
178 |
-
return latents
|
179 |
-
|
180 |
-
def to(self, torch_device: Optional[Union[str, torch.device]] = None):
|
181 |
-
self.decoder_pipe.to(torch_device)
|
182 |
-
super().to(torch_device)
|
183 |
-
|
184 |
-
@torch.no_grad()
|
185 |
-
def __call__(
|
186 |
-
self,
|
187 |
-
prompt: Optional[Union[str, List[str]]] = None,
|
188 |
-
height: Optional[int] = None,
|
189 |
-
width: Optional[int] = None,
|
190 |
-
num_images_per_prompt: int = 1,
|
191 |
-
prior_num_inference_steps: int = 25,
|
192 |
-
generator: Optional[torch.Generator] = None,
|
193 |
-
prior_latents: Optional[torch.FloatTensor] = None,
|
194 |
-
text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None,
|
195 |
-
text_attention_mask: Optional[torch.Tensor] = None,
|
196 |
-
prior_guidance_scale: float = 4.0,
|
197 |
-
decoder_guidance_scale: float = 8.0,
|
198 |
-
decoder_num_inference_steps: int = 50,
|
199 |
-
decoder_num_images_per_prompt: Optional[int] = 1,
|
200 |
-
decoder_eta: float = 0.0,
|
201 |
-
output_type: Optional[str] = "pil",
|
202 |
-
return_dict: bool = True,
|
203 |
-
):
|
204 |
-
if prompt is not None:
|
205 |
-
if isinstance(prompt, str):
|
206 |
-
batch_size = 1
|
207 |
-
elif isinstance(prompt, list):
|
208 |
-
batch_size = len(prompt)
|
209 |
-
else:
|
210 |
-
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
211 |
-
else:
|
212 |
-
batch_size = text_model_output[0].shape[0]
|
213 |
-
|
214 |
-
device = self._execution_device
|
215 |
-
|
216 |
-
batch_size = batch_size * num_images_per_prompt
|
217 |
-
|
218 |
-
do_classifier_free_guidance = prior_guidance_scale > 1.0 or decoder_guidance_scale > 1.0
|
219 |
-
|
220 |
-
text_embeddings, text_encoder_hidden_states, text_mask = self._encode_prompt(
|
221 |
-
prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output, text_attention_mask
|
222 |
-
)
|
223 |
-
|
224 |
-
# prior
|
225 |
-
|
226 |
-
self.prior_scheduler.set_timesteps(prior_num_inference_steps, device=device)
|
227 |
-
prior_timesteps_tensor = self.prior_scheduler.timesteps
|
228 |
-
|
229 |
-
embedding_dim = self.prior.config.embedding_dim
|
230 |
-
|
231 |
-
prior_latents = self.prepare_latents(
|
232 |
-
(batch_size, embedding_dim),
|
233 |
-
text_embeddings.dtype,
|
234 |
-
device,
|
235 |
-
generator,
|
236 |
-
prior_latents,
|
237 |
-
self.prior_scheduler,
|
238 |
-
)
|
239 |
-
|
240 |
-
for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)):
|
241 |
-
# expand the latents if we are doing classifier free guidance
|
242 |
-
latent_model_input = torch.cat([prior_latents] * 2) if do_classifier_free_guidance else prior_latents
|
243 |
-
|
244 |
-
predicted_image_embedding = self.prior(
|
245 |
-
latent_model_input,
|
246 |
-
timestep=t,
|
247 |
-
proj_embedding=text_embeddings,
|
248 |
-
encoder_hidden_states=text_encoder_hidden_states,
|
249 |
-
attention_mask=text_mask,
|
250 |
-
).predicted_image_embedding
|
251 |
-
|
252 |
-
if do_classifier_free_guidance:
|
253 |
-
predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2)
|
254 |
-
predicted_image_embedding = predicted_image_embedding_uncond + prior_guidance_scale * (
|
255 |
-
predicted_image_embedding_text - predicted_image_embedding_uncond
|
256 |
-
)
|
257 |
-
|
258 |
-
if i + 1 == prior_timesteps_tensor.shape[0]:
|
259 |
-
prev_timestep = None
|
260 |
-
else:
|
261 |
-
prev_timestep = prior_timesteps_tensor[i + 1]
|
262 |
-
|
263 |
-
prior_latents = self.prior_scheduler.step(
|
264 |
-
predicted_image_embedding,
|
265 |
-
timestep=t,
|
266 |
-
sample=prior_latents,
|
267 |
-
generator=generator,
|
268 |
-
prev_timestep=prev_timestep,
|
269 |
-
).prev_sample
|
270 |
-
|
271 |
-
prior_latents = self.prior.post_process_latents(prior_latents)
|
272 |
-
|
273 |
-
image_embeddings = prior_latents
|
274 |
-
|
275 |
-
output = self.decoder_pipe(
|
276 |
-
image=image_embeddings,
|
277 |
-
height=height,
|
278 |
-
width=width,
|
279 |
-
num_inference_steps=decoder_num_inference_steps,
|
280 |
-
guidance_scale=decoder_guidance_scale,
|
281 |
-
generator=generator,
|
282 |
-
output_type=output_type,
|
283 |
-
return_dict=return_dict,
|
284 |
-
num_images_per_prompt=decoder_num_images_per_prompt,
|
285 |
-
eta=decoder_eta,
|
286 |
-
)
|
287 |
-
return output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/apcnet/apcnet_r101-d8_512x1024_40k_cityscapes.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './apcnet_r50-d8_512x1024_40k_cityscapes.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/fp16/deeplabv3_r101-d8_512x1024_80k_fp16_cityscapes.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
_base_ = '../deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes.py'
|
2 |
-
# fp16 settings
|
3 |
-
optimizer_config = dict(type='Fp16OptimizerHook', loss_scale=512.)
|
4 |
-
# fp16 placeholder
|
5 |
-
fp16 = dict()
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/nonlocal_r50-d8.py',
|
3 |
-
'../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py',
|
4 |
-
'../_base_/schedules/schedule_40k.py'
|
5 |
-
]
|
6 |
-
model = dict(
|
7 |
-
decode_head=dict(align_corners=True),
|
8 |
-
auxiliary_head=dict(align_corners=True),
|
9 |
-
test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anthony-Ml/covid_predictor/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Covid19 Predictitor
|
3 |
-
emoji: 📉
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.48.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Apex-X/nono/roop/metadata.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
name = 'roop'
|
2 |
-
version = '1.3.1'
|
|
|
|
|
|
spaces/Arthur678/vits-uma-genshin-honkai/models.py
DELETED
@@ -1,534 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import torch
|
3 |
-
from torch import nn
|
4 |
-
from torch.nn import functional as F
|
5 |
-
|
6 |
-
import commons
|
7 |
-
import modules
|
8 |
-
import attentions
|
9 |
-
import monotonic_align
|
10 |
-
|
11 |
-
from torch.nn import Conv1d, ConvTranspose1d, Conv2d
|
12 |
-
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
|
13 |
-
from commons import init_weights, get_padding
|
14 |
-
|
15 |
-
|
16 |
-
class StochasticDurationPredictor(nn.Module):
|
17 |
-
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
|
18 |
-
super().__init__()
|
19 |
-
filter_channels = in_channels # it needs to be removed from future version.
|
20 |
-
self.in_channels = in_channels
|
21 |
-
self.filter_channels = filter_channels
|
22 |
-
self.kernel_size = kernel_size
|
23 |
-
self.p_dropout = p_dropout
|
24 |
-
self.n_flows = n_flows
|
25 |
-
self.gin_channels = gin_channels
|
26 |
-
|
27 |
-
self.log_flow = modules.Log()
|
28 |
-
self.flows = nn.ModuleList()
|
29 |
-
self.flows.append(modules.ElementwiseAffine(2))
|
30 |
-
for i in range(n_flows):
|
31 |
-
self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
|
32 |
-
self.flows.append(modules.Flip())
|
33 |
-
|
34 |
-
self.post_pre = nn.Conv1d(1, filter_channels, 1)
|
35 |
-
self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
|
36 |
-
self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
|
37 |
-
self.post_flows = nn.ModuleList()
|
38 |
-
self.post_flows.append(modules.ElementwiseAffine(2))
|
39 |
-
for i in range(4):
|
40 |
-
self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
|
41 |
-
self.post_flows.append(modules.Flip())
|
42 |
-
|
43 |
-
self.pre = nn.Conv1d(in_channels, filter_channels, 1)
|
44 |
-
self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
|
45 |
-
self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
|
46 |
-
if gin_channels != 0:
|
47 |
-
self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
|
48 |
-
|
49 |
-
def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
|
50 |
-
x = torch.detach(x)
|
51 |
-
x = self.pre(x)
|
52 |
-
if g is not None:
|
53 |
-
g = torch.detach(g)
|
54 |
-
x = x + self.cond(g)
|
55 |
-
x = self.convs(x, x_mask)
|
56 |
-
x = self.proj(x) * x_mask
|
57 |
-
|
58 |
-
if not reverse:
|
59 |
-
flows = self.flows
|
60 |
-
assert w is not None
|
61 |
-
|
62 |
-
logdet_tot_q = 0
|
63 |
-
h_w = self.post_pre(w)
|
64 |
-
h_w = self.post_convs(h_w, x_mask)
|
65 |
-
h_w = self.post_proj(h_w) * x_mask
|
66 |
-
e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
|
67 |
-
z_q = e_q
|
68 |
-
for flow in self.post_flows:
|
69 |
-
z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
|
70 |
-
logdet_tot_q += logdet_q
|
71 |
-
z_u, z1 = torch.split(z_q, [1, 1], 1)
|
72 |
-
u = torch.sigmoid(z_u) * x_mask
|
73 |
-
z0 = (w - u) * x_mask
|
74 |
-
logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2])
|
75 |
-
logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q
|
76 |
-
|
77 |
-
logdet_tot = 0
|
78 |
-
z0, logdet = self.log_flow(z0, x_mask)
|
79 |
-
logdet_tot += logdet
|
80 |
-
z = torch.cat([z0, z1], 1)
|
81 |
-
for flow in flows:
|
82 |
-
z, logdet = flow(z, x_mask, g=x, reverse=reverse)
|
83 |
-
logdet_tot = logdet_tot + logdet
|
84 |
-
nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot
|
85 |
-
return nll + logq # [b]
|
86 |
-
else:
|
87 |
-
flows = list(reversed(self.flows))
|
88 |
-
flows = flows[:-2] + [flows[-1]] # remove a useless vflow
|
89 |
-
z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
|
90 |
-
for flow in flows:
|
91 |
-
z = flow(z, x_mask, g=x, reverse=reverse)
|
92 |
-
z0, z1 = torch.split(z, [1, 1], 1)
|
93 |
-
logw = z0
|
94 |
-
return logw
|
95 |
-
|
96 |
-
|
97 |
-
class DurationPredictor(nn.Module):
|
98 |
-
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
|
99 |
-
super().__init__()
|
100 |
-
|
101 |
-
self.in_channels = in_channels
|
102 |
-
self.filter_channels = filter_channels
|
103 |
-
self.kernel_size = kernel_size
|
104 |
-
self.p_dropout = p_dropout
|
105 |
-
self.gin_channels = gin_channels
|
106 |
-
|
107 |
-
self.drop = nn.Dropout(p_dropout)
|
108 |
-
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)
|
109 |
-
self.norm_1 = modules.LayerNorm(filter_channels)
|
110 |
-
self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)
|
111 |
-
self.norm_2 = modules.LayerNorm(filter_channels)
|
112 |
-
self.proj = nn.Conv1d(filter_channels, 1, 1)
|
113 |
-
|
114 |
-
if gin_channels != 0:
|
115 |
-
self.cond = nn.Conv1d(gin_channels, in_channels, 1)
|
116 |
-
|
117 |
-
def forward(self, x, x_mask, g=None):
|
118 |
-
x = torch.detach(x)
|
119 |
-
if g is not None:
|
120 |
-
g = torch.detach(g)
|
121 |
-
x = x + self.cond(g)
|
122 |
-
x = self.conv_1(x * x_mask)
|
123 |
-
x = torch.relu(x)
|
124 |
-
x = self.norm_1(x)
|
125 |
-
x = self.drop(x)
|
126 |
-
x = self.conv_2(x * x_mask)
|
127 |
-
x = torch.relu(x)
|
128 |
-
x = self.norm_2(x)
|
129 |
-
x = self.drop(x)
|
130 |
-
x = self.proj(x * x_mask)
|
131 |
-
return x * x_mask
|
132 |
-
|
133 |
-
|
134 |
-
class TextEncoder(nn.Module):
|
135 |
-
def __init__(self,
|
136 |
-
n_vocab,
|
137 |
-
out_channels,
|
138 |
-
hidden_channels,
|
139 |
-
filter_channels,
|
140 |
-
n_heads,
|
141 |
-
n_layers,
|
142 |
-
kernel_size,
|
143 |
-
p_dropout):
|
144 |
-
super().__init__()
|
145 |
-
self.n_vocab = n_vocab
|
146 |
-
self.out_channels = out_channels
|
147 |
-
self.hidden_channels = hidden_channels
|
148 |
-
self.filter_channels = filter_channels
|
149 |
-
self.n_heads = n_heads
|
150 |
-
self.n_layers = n_layers
|
151 |
-
self.kernel_size = kernel_size
|
152 |
-
self.p_dropout = p_dropout
|
153 |
-
|
154 |
-
self.emb = nn.Embedding(n_vocab, hidden_channels)
|
155 |
-
nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
|
156 |
-
|
157 |
-
self.encoder = attentions.Encoder(
|
158 |
-
hidden_channels,
|
159 |
-
filter_channels,
|
160 |
-
n_heads,
|
161 |
-
n_layers,
|
162 |
-
kernel_size,
|
163 |
-
p_dropout)
|
164 |
-
self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
165 |
-
|
166 |
-
def forward(self, x, x_lengths):
|
167 |
-
x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
|
168 |
-
x = torch.transpose(x, 1, -1) # [b, h, t]
|
169 |
-
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
|
170 |
-
|
171 |
-
x = self.encoder(x * x_mask, x_mask)
|
172 |
-
stats = self.proj(x) * x_mask
|
173 |
-
|
174 |
-
m, logs = torch.split(stats, self.out_channels, dim=1)
|
175 |
-
return x, m, logs, x_mask
|
176 |
-
|
177 |
-
|
178 |
-
class ResidualCouplingBlock(nn.Module):
|
179 |
-
def __init__(self,
|
180 |
-
channels,
|
181 |
-
hidden_channels,
|
182 |
-
kernel_size,
|
183 |
-
dilation_rate,
|
184 |
-
n_layers,
|
185 |
-
n_flows=4,
|
186 |
-
gin_channels=0):
|
187 |
-
super().__init__()
|
188 |
-
self.channels = channels
|
189 |
-
self.hidden_channels = hidden_channels
|
190 |
-
self.kernel_size = kernel_size
|
191 |
-
self.dilation_rate = dilation_rate
|
192 |
-
self.n_layers = n_layers
|
193 |
-
self.n_flows = n_flows
|
194 |
-
self.gin_channels = gin_channels
|
195 |
-
|
196 |
-
self.flows = nn.ModuleList()
|
197 |
-
for i in range(n_flows):
|
198 |
-
self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
|
199 |
-
self.flows.append(modules.Flip())
|
200 |
-
|
201 |
-
def forward(self, x, x_mask, g=None, reverse=False):
|
202 |
-
if not reverse:
|
203 |
-
for flow in self.flows:
|
204 |
-
x, _ = flow(x, x_mask, g=g, reverse=reverse)
|
205 |
-
else:
|
206 |
-
for flow in reversed(self.flows):
|
207 |
-
x = flow(x, x_mask, g=g, reverse=reverse)
|
208 |
-
return x
|
209 |
-
|
210 |
-
|
211 |
-
class PosteriorEncoder(nn.Module):
|
212 |
-
def __init__(self,
|
213 |
-
in_channels,
|
214 |
-
out_channels,
|
215 |
-
hidden_channels,
|
216 |
-
kernel_size,
|
217 |
-
dilation_rate,
|
218 |
-
n_layers,
|
219 |
-
gin_channels=0):
|
220 |
-
super().__init__()
|
221 |
-
self.in_channels = in_channels
|
222 |
-
self.out_channels = out_channels
|
223 |
-
self.hidden_channels = hidden_channels
|
224 |
-
self.kernel_size = kernel_size
|
225 |
-
self.dilation_rate = dilation_rate
|
226 |
-
self.n_layers = n_layers
|
227 |
-
self.gin_channels = gin_channels
|
228 |
-
|
229 |
-
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
|
230 |
-
self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
|
231 |
-
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
232 |
-
|
233 |
-
def forward(self, x, x_lengths, g=None):
|
234 |
-
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
|
235 |
-
x = self.pre(x) * x_mask
|
236 |
-
x = self.enc(x, x_mask, g=g)
|
237 |
-
stats = self.proj(x) * x_mask
|
238 |
-
m, logs = torch.split(stats, self.out_channels, dim=1)
|
239 |
-
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
|
240 |
-
return z, m, logs, x_mask
|
241 |
-
|
242 |
-
|
243 |
-
class Generator(torch.nn.Module):
|
244 |
-
def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
|
245 |
-
super(Generator, self).__init__()
|
246 |
-
self.num_kernels = len(resblock_kernel_sizes)
|
247 |
-
self.num_upsamples = len(upsample_rates)
|
248 |
-
self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
|
249 |
-
resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
|
250 |
-
|
251 |
-
self.ups = nn.ModuleList()
|
252 |
-
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
253 |
-
self.ups.append(weight_norm(
|
254 |
-
ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
|
255 |
-
k, u, padding=(k-u)//2)))
|
256 |
-
|
257 |
-
self.resblocks = nn.ModuleList()
|
258 |
-
for i in range(len(self.ups)):
|
259 |
-
ch = upsample_initial_channel//(2**(i+1))
|
260 |
-
for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
|
261 |
-
self.resblocks.append(resblock(ch, k, d))
|
262 |
-
|
263 |
-
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
264 |
-
self.ups.apply(init_weights)
|
265 |
-
|
266 |
-
if gin_channels != 0:
|
267 |
-
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
268 |
-
|
269 |
-
def forward(self, x, g=None):
|
270 |
-
x = self.conv_pre(x)
|
271 |
-
if g is not None:
|
272 |
-
x = x + self.cond(g)
|
273 |
-
|
274 |
-
for i in range(self.num_upsamples):
|
275 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
276 |
-
x = self.ups[i](x)
|
277 |
-
xs = None
|
278 |
-
for j in range(self.num_kernels):
|
279 |
-
if xs is None:
|
280 |
-
xs = self.resblocks[i*self.num_kernels+j](x)
|
281 |
-
else:
|
282 |
-
xs += self.resblocks[i*self.num_kernels+j](x)
|
283 |
-
x = xs / self.num_kernels
|
284 |
-
x = F.leaky_relu(x)
|
285 |
-
x = self.conv_post(x)
|
286 |
-
x = torch.tanh(x)
|
287 |
-
|
288 |
-
return x
|
289 |
-
|
290 |
-
def remove_weight_norm(self):
|
291 |
-
print('Removing weight norm...')
|
292 |
-
for l in self.ups:
|
293 |
-
remove_weight_norm(l)
|
294 |
-
for l in self.resblocks:
|
295 |
-
l.remove_weight_norm()
|
296 |
-
|
297 |
-
|
298 |
-
class DiscriminatorP(torch.nn.Module):
|
299 |
-
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
|
300 |
-
super(DiscriminatorP, self).__init__()
|
301 |
-
self.period = period
|
302 |
-
self.use_spectral_norm = use_spectral_norm
|
303 |
-
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
304 |
-
self.convs = nn.ModuleList([
|
305 |
-
norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
306 |
-
norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
307 |
-
norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
308 |
-
norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
309 |
-
norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
|
310 |
-
])
|
311 |
-
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
|
312 |
-
|
313 |
-
def forward(self, x):
|
314 |
-
fmap = []
|
315 |
-
|
316 |
-
# 1d to 2d
|
317 |
-
b, c, t = x.shape
|
318 |
-
if t % self.period != 0: # pad first
|
319 |
-
n_pad = self.period - (t % self.period)
|
320 |
-
x = F.pad(x, (0, n_pad), "reflect")
|
321 |
-
t = t + n_pad
|
322 |
-
x = x.view(b, c, t // self.period, self.period)
|
323 |
-
|
324 |
-
for l in self.convs:
|
325 |
-
x = l(x)
|
326 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
327 |
-
fmap.append(x)
|
328 |
-
x = self.conv_post(x)
|
329 |
-
fmap.append(x)
|
330 |
-
x = torch.flatten(x, 1, -1)
|
331 |
-
|
332 |
-
return x, fmap
|
333 |
-
|
334 |
-
|
335 |
-
class DiscriminatorS(torch.nn.Module):
|
336 |
-
def __init__(self, use_spectral_norm=False):
|
337 |
-
super(DiscriminatorS, self).__init__()
|
338 |
-
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
339 |
-
self.convs = nn.ModuleList([
|
340 |
-
norm_f(Conv1d(1, 16, 15, 1, padding=7)),
|
341 |
-
norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
|
342 |
-
norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
|
343 |
-
norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
|
344 |
-
norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
|
345 |
-
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
|
346 |
-
])
|
347 |
-
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
|
348 |
-
|
349 |
-
def forward(self, x):
|
350 |
-
fmap = []
|
351 |
-
|
352 |
-
for l in self.convs:
|
353 |
-
x = l(x)
|
354 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
355 |
-
fmap.append(x)
|
356 |
-
x = self.conv_post(x)
|
357 |
-
fmap.append(x)
|
358 |
-
x = torch.flatten(x, 1, -1)
|
359 |
-
|
360 |
-
return x, fmap
|
361 |
-
|
362 |
-
|
363 |
-
class MultiPeriodDiscriminator(torch.nn.Module):
|
364 |
-
def __init__(self, use_spectral_norm=False):
|
365 |
-
super(MultiPeriodDiscriminator, self).__init__()
|
366 |
-
periods = [2,3,5,7,11]
|
367 |
-
|
368 |
-
discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
|
369 |
-
discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
|
370 |
-
self.discriminators = nn.ModuleList(discs)
|
371 |
-
|
372 |
-
def forward(self, y, y_hat):
|
373 |
-
y_d_rs = []
|
374 |
-
y_d_gs = []
|
375 |
-
fmap_rs = []
|
376 |
-
fmap_gs = []
|
377 |
-
for i, d in enumerate(self.discriminators):
|
378 |
-
y_d_r, fmap_r = d(y)
|
379 |
-
y_d_g, fmap_g = d(y_hat)
|
380 |
-
y_d_rs.append(y_d_r)
|
381 |
-
y_d_gs.append(y_d_g)
|
382 |
-
fmap_rs.append(fmap_r)
|
383 |
-
fmap_gs.append(fmap_g)
|
384 |
-
|
385 |
-
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
class SynthesizerTrn(nn.Module):
|
390 |
-
"""
|
391 |
-
Synthesizer for Training
|
392 |
-
"""
|
393 |
-
|
394 |
-
def __init__(self,
|
395 |
-
n_vocab,
|
396 |
-
spec_channels,
|
397 |
-
segment_size,
|
398 |
-
inter_channels,
|
399 |
-
hidden_channels,
|
400 |
-
filter_channels,
|
401 |
-
n_heads,
|
402 |
-
n_layers,
|
403 |
-
kernel_size,
|
404 |
-
p_dropout,
|
405 |
-
resblock,
|
406 |
-
resblock_kernel_sizes,
|
407 |
-
resblock_dilation_sizes,
|
408 |
-
upsample_rates,
|
409 |
-
upsample_initial_channel,
|
410 |
-
upsample_kernel_sizes,
|
411 |
-
n_speakers=0,
|
412 |
-
gin_channels=0,
|
413 |
-
use_sdp=True,
|
414 |
-
**kwargs):
|
415 |
-
|
416 |
-
super().__init__()
|
417 |
-
self.n_vocab = n_vocab
|
418 |
-
self.spec_channels = spec_channels
|
419 |
-
self.inter_channels = inter_channels
|
420 |
-
self.hidden_channels = hidden_channels
|
421 |
-
self.filter_channels = filter_channels
|
422 |
-
self.n_heads = n_heads
|
423 |
-
self.n_layers = n_layers
|
424 |
-
self.kernel_size = kernel_size
|
425 |
-
self.p_dropout = p_dropout
|
426 |
-
self.resblock = resblock
|
427 |
-
self.resblock_kernel_sizes = resblock_kernel_sizes
|
428 |
-
self.resblock_dilation_sizes = resblock_dilation_sizes
|
429 |
-
self.upsample_rates = upsample_rates
|
430 |
-
self.upsample_initial_channel = upsample_initial_channel
|
431 |
-
self.upsample_kernel_sizes = upsample_kernel_sizes
|
432 |
-
self.segment_size = segment_size
|
433 |
-
self.n_speakers = n_speakers
|
434 |
-
self.gin_channels = gin_channels
|
435 |
-
|
436 |
-
self.use_sdp = use_sdp
|
437 |
-
|
438 |
-
self.enc_p = TextEncoder(n_vocab,
|
439 |
-
inter_channels,
|
440 |
-
hidden_channels,
|
441 |
-
filter_channels,
|
442 |
-
n_heads,
|
443 |
-
n_layers,
|
444 |
-
kernel_size,
|
445 |
-
p_dropout)
|
446 |
-
self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
|
447 |
-
self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
|
448 |
-
self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
|
449 |
-
|
450 |
-
if use_sdp:
|
451 |
-
self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
|
452 |
-
else:
|
453 |
-
self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
|
454 |
-
|
455 |
-
if n_speakers > 1:
|
456 |
-
self.emb_g = nn.Embedding(n_speakers, gin_channels)
|
457 |
-
|
458 |
-
def forward(self, x, x_lengths, y, y_lengths, sid=None):
|
459 |
-
|
460 |
-
x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
|
461 |
-
if self.n_speakers > 0:
|
462 |
-
g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
|
463 |
-
else:
|
464 |
-
g = None
|
465 |
-
|
466 |
-
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
467 |
-
z_p = self.flow(z, y_mask, g=g)
|
468 |
-
|
469 |
-
with torch.no_grad():
|
470 |
-
# negative cross-entropy
|
471 |
-
s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
|
472 |
-
neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]
|
473 |
-
neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
|
474 |
-
neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
|
475 |
-
neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]
|
476 |
-
neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
|
477 |
-
|
478 |
-
attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
|
479 |
-
attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
|
480 |
-
|
481 |
-
w = attn.sum(2)
|
482 |
-
if self.use_sdp:
|
483 |
-
l_length = self.dp(x, x_mask, w, g=g)
|
484 |
-
l_length = l_length / torch.sum(x_mask)
|
485 |
-
else:
|
486 |
-
logw_ = torch.log(w + 1e-6) * x_mask
|
487 |
-
logw = self.dp(x, x_mask, g=g)
|
488 |
-
l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging
|
489 |
-
|
490 |
-
# expand prior
|
491 |
-
m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
|
492 |
-
logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
|
493 |
-
|
494 |
-
z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
|
495 |
-
o = self.dec(z_slice, g=g)
|
496 |
-
return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
497 |
-
|
498 |
-
def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None):
|
499 |
-
device = next(self.parameters()).device # 获取模型所在的设备
|
500 |
-
x, m_p, logs_p, x_mask = self.enc_p(x.to(device), x_lengths.to(device))
|
501 |
-
if self.n_speakers > 0:
|
502 |
-
g = self.emb_g(sid.to(device)).unsqueeze(-1) # [b, h, 1]
|
503 |
-
else:
|
504 |
-
g = None
|
505 |
-
|
506 |
-
if self.use_sdp:
|
507 |
-
logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
|
508 |
-
else:
|
509 |
-
logw = self.dp(x, x_mask, g=g)
|
510 |
-
w = torch.exp(logw) * x_mask * length_scale
|
511 |
-
w_ceil = torch.ceil(w)
|
512 |
-
y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
|
513 |
-
y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
|
514 |
-
attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
|
515 |
-
attn = commons.generate_path(w_ceil, attn_mask)
|
516 |
-
|
517 |
-
m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
|
518 |
-
logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
|
519 |
-
|
520 |
-
z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
|
521 |
-
z = self.flow(z_p, y_mask, g=g, reverse=True)
|
522 |
-
o = self.dec((z * y_mask)[:,:,:max_len], g=g)
|
523 |
-
return o, attn, y_mask, (z, z_p, m_p, logs_p)
|
524 |
-
|
525 |
-
def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):
|
526 |
-
assert self.n_speakers > 0, "n_speakers have to be larger than 0."
|
527 |
-
g_src = self.emb_g(sid_src).unsqueeze(-1)
|
528 |
-
g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)
|
529 |
-
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)
|
530 |
-
z_p = self.flow(z, y_mask, g=g_src)
|
531 |
-
z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)
|
532 |
-
o_hat = self.dec(z_hat * y_mask, g=g_tgt)
|
533 |
-
return o_hat, y_mask, (z, z_p, z_hat)
|
534 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Audio-AGI/WavJourney/wavjourney_cli.py
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
import time
|
2 |
-
import argparse
|
3 |
-
|
4 |
-
import utils
|
5 |
-
import pipeline
|
6 |
-
|
7 |
-
parser = argparse.ArgumentParser()
|
8 |
-
parser.add_argument('-f', '--full', action='store_true', help='Go through the full process')
|
9 |
-
parser.add_argument('--input-text', type=str, default='', help='input text or text file')
|
10 |
-
parser.add_argument('--session-id', type=str, default='', help='session id, if set to empty, system will allocate an id')
|
11 |
-
args = parser.parse_args()
|
12 |
-
|
13 |
-
if args.full:
|
14 |
-
input_text = args.input_text
|
15 |
-
|
16 |
-
start_time = time.time()
|
17 |
-
session_id = pipeline.init_session(args.session_id)
|
18 |
-
api_key = utils.get_api_key()
|
19 |
-
|
20 |
-
assert api_key != None, "Please set your openai_key in the environment variable."
|
21 |
-
|
22 |
-
print(f"Session {session_id} is created.")
|
23 |
-
|
24 |
-
pipeline.full_steps(session_id, input_text, api_key)
|
25 |
-
end_time = time.time()
|
26 |
-
|
27 |
-
print(f"WavJourney took {end_time - start_time:.2f} seconds to complete.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/coco_panoptic.py
DELETED
@@ -1,228 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import copy
|
3 |
-
import json
|
4 |
-
import os
|
5 |
-
|
6 |
-
from detectron2.data import DatasetCatalog, MetadataCatalog
|
7 |
-
from detectron2.utils.file_io import PathManager
|
8 |
-
|
9 |
-
from .coco import load_coco_json, load_sem_seg
|
10 |
-
|
11 |
-
__all__ = ["register_coco_panoptic", "register_coco_panoptic_separated"]
|
12 |
-
|
13 |
-
|
14 |
-
def load_coco_panoptic_json(json_file, image_dir, gt_dir, meta):
|
15 |
-
"""
|
16 |
-
Args:
|
17 |
-
image_dir (str): path to the raw dataset. e.g., "~/coco/train2017".
|
18 |
-
gt_dir (str): path to the raw annotations. e.g., "~/coco/panoptic_train2017".
|
19 |
-
json_file (str): path to the json file. e.g., "~/coco/annotations/panoptic_train2017.json".
|
20 |
-
|
21 |
-
Returns:
|
22 |
-
list[dict]: a list of dicts in Detectron2 standard format. (See
|
23 |
-
`Using Custom Datasets </tutorials/datasets.html>`_ )
|
24 |
-
"""
|
25 |
-
|
26 |
-
def _convert_category_id(segment_info, meta):
|
27 |
-
if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]:
|
28 |
-
segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][
|
29 |
-
segment_info["category_id"]
|
30 |
-
]
|
31 |
-
segment_info["isthing"] = True
|
32 |
-
else:
|
33 |
-
segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][
|
34 |
-
segment_info["category_id"]
|
35 |
-
]
|
36 |
-
segment_info["isthing"] = False
|
37 |
-
return segment_info
|
38 |
-
|
39 |
-
with PathManager.open(json_file) as f:
|
40 |
-
json_info = json.load(f)
|
41 |
-
|
42 |
-
ret = []
|
43 |
-
for ann in json_info["annotations"]:
|
44 |
-
image_id = int(ann["image_id"])
|
45 |
-
# TODO: currently we assume image and label has the same filename but
|
46 |
-
# different extension, and images have extension ".jpg" for COCO. Need
|
47 |
-
# to make image extension a user-provided argument if we extend this
|
48 |
-
# function to support other COCO-like datasets.
|
49 |
-
image_file = os.path.join(image_dir, os.path.splitext(ann["file_name"])[0] + ".jpg")
|
50 |
-
label_file = os.path.join(gt_dir, ann["file_name"])
|
51 |
-
segments_info = [_convert_category_id(x, meta) for x in ann["segments_info"]]
|
52 |
-
ret.append(
|
53 |
-
{
|
54 |
-
"file_name": image_file,
|
55 |
-
"image_id": image_id,
|
56 |
-
"pan_seg_file_name": label_file,
|
57 |
-
"segments_info": segments_info,
|
58 |
-
}
|
59 |
-
)
|
60 |
-
assert len(ret), f"No images found in {image_dir}!"
|
61 |
-
assert PathManager.isfile(ret[0]["file_name"]), ret[0]["file_name"]
|
62 |
-
assert PathManager.isfile(ret[0]["pan_seg_file_name"]), ret[0]["pan_seg_file_name"]
|
63 |
-
return ret
|
64 |
-
|
65 |
-
|
66 |
-
def register_coco_panoptic(
|
67 |
-
name, metadata, image_root, panoptic_root, panoptic_json, instances_json=None
|
68 |
-
):
|
69 |
-
"""
|
70 |
-
Register a "standard" version of COCO panoptic segmentation dataset named `name`.
|
71 |
-
The dictionaries in this registered dataset follows detectron2's standard format.
|
72 |
-
Hence it's called "standard".
|
73 |
-
|
74 |
-
Args:
|
75 |
-
name (str): the name that identifies a dataset,
|
76 |
-
e.g. "coco_2017_train_panoptic"
|
77 |
-
metadata (dict): extra metadata associated with this dataset.
|
78 |
-
image_root (str): directory which contains all the images
|
79 |
-
panoptic_root (str): directory which contains panoptic annotation images in COCO format
|
80 |
-
panoptic_json (str): path to the json panoptic annotation file in COCO format
|
81 |
-
sem_seg_root (none): not used, to be consistent with
|
82 |
-
`register_coco_panoptic_separated`.
|
83 |
-
instances_json (str): path to the json instance annotation file
|
84 |
-
"""
|
85 |
-
panoptic_name = name
|
86 |
-
DatasetCatalog.register(
|
87 |
-
panoptic_name,
|
88 |
-
lambda: load_coco_panoptic_json(panoptic_json, image_root, panoptic_root, metadata),
|
89 |
-
)
|
90 |
-
MetadataCatalog.get(panoptic_name).set(
|
91 |
-
panoptic_root=panoptic_root,
|
92 |
-
image_root=image_root,
|
93 |
-
panoptic_json=panoptic_json,
|
94 |
-
json_file=instances_json,
|
95 |
-
evaluator_type="coco_panoptic_seg",
|
96 |
-
ignore_label=255,
|
97 |
-
label_divisor=1000,
|
98 |
-
**metadata,
|
99 |
-
)
|
100 |
-
|
101 |
-
|
102 |
-
def register_coco_panoptic_separated(
|
103 |
-
name, metadata, image_root, panoptic_root, panoptic_json, sem_seg_root, instances_json
|
104 |
-
):
|
105 |
-
"""
|
106 |
-
Register a "separated" version of COCO panoptic segmentation dataset named `name`.
|
107 |
-
The annotations in this registered dataset will contain both instance annotations and
|
108 |
-
semantic annotations, each with its own contiguous ids. Hence it's called "separated".
|
109 |
-
|
110 |
-
It follows the setting used by the PanopticFPN paper:
|
111 |
-
|
112 |
-
1. The instance annotations directly come from polygons in the COCO
|
113 |
-
instances annotation task, rather than from the masks in the COCO panoptic annotations.
|
114 |
-
|
115 |
-
The two format have small differences:
|
116 |
-
Polygons in the instance annotations may have overlaps.
|
117 |
-
The mask annotations are produced by labeling the overlapped polygons
|
118 |
-
with depth ordering.
|
119 |
-
|
120 |
-
2. The semantic annotations are converted from panoptic annotations, where
|
121 |
-
all "things" are assigned a semantic id of 0.
|
122 |
-
All semantic categories will therefore have ids in contiguous
|
123 |
-
range [1, #stuff_categories].
|
124 |
-
|
125 |
-
This function will also register a pure semantic segmentation dataset
|
126 |
-
named ``name + '_stuffonly'``.
|
127 |
-
|
128 |
-
Args:
|
129 |
-
name (str): the name that identifies a dataset,
|
130 |
-
e.g. "coco_2017_train_panoptic"
|
131 |
-
metadata (dict): extra metadata associated with this dataset.
|
132 |
-
image_root (str): directory which contains all the images
|
133 |
-
panoptic_root (str): directory which contains panoptic annotation images
|
134 |
-
panoptic_json (str): path to the json panoptic annotation file
|
135 |
-
sem_seg_root (str): directory which contains all the ground truth segmentation annotations.
|
136 |
-
instances_json (str): path to the json instance annotation file
|
137 |
-
"""
|
138 |
-
panoptic_name = name + "_separated"
|
139 |
-
DatasetCatalog.register(
|
140 |
-
panoptic_name,
|
141 |
-
lambda: merge_to_panoptic(
|
142 |
-
load_coco_json(instances_json, image_root, panoptic_name),
|
143 |
-
load_sem_seg(sem_seg_root, image_root),
|
144 |
-
),
|
145 |
-
)
|
146 |
-
MetadataCatalog.get(panoptic_name).set(
|
147 |
-
panoptic_root=panoptic_root,
|
148 |
-
image_root=image_root,
|
149 |
-
panoptic_json=panoptic_json,
|
150 |
-
sem_seg_root=sem_seg_root,
|
151 |
-
json_file=instances_json, # TODO rename
|
152 |
-
evaluator_type="coco_panoptic_seg",
|
153 |
-
ignore_label=255,
|
154 |
-
**metadata,
|
155 |
-
)
|
156 |
-
|
157 |
-
semantic_name = name + "_stuffonly"
|
158 |
-
DatasetCatalog.register(semantic_name, lambda: load_sem_seg(sem_seg_root, image_root))
|
159 |
-
MetadataCatalog.get(semantic_name).set(
|
160 |
-
sem_seg_root=sem_seg_root,
|
161 |
-
image_root=image_root,
|
162 |
-
evaluator_type="sem_seg",
|
163 |
-
ignore_label=255,
|
164 |
-
**metadata,
|
165 |
-
)
|
166 |
-
|
167 |
-
|
168 |
-
def merge_to_panoptic(detection_dicts, sem_seg_dicts):
|
169 |
-
"""
|
170 |
-
Create dataset dicts for panoptic segmentation, by
|
171 |
-
merging two dicts using "file_name" field to match their entries.
|
172 |
-
|
173 |
-
Args:
|
174 |
-
detection_dicts (list[dict]): lists of dicts for object detection or instance segmentation.
|
175 |
-
sem_seg_dicts (list[dict]): lists of dicts for semantic segmentation.
|
176 |
-
|
177 |
-
Returns:
|
178 |
-
list[dict] (one per input image): Each dict contains all (key, value) pairs from dicts in
|
179 |
-
both detection_dicts and sem_seg_dicts that correspond to the same image.
|
180 |
-
The function assumes that the same key in different dicts has the same value.
|
181 |
-
"""
|
182 |
-
results = []
|
183 |
-
sem_seg_file_to_entry = {x["file_name"]: x for x in sem_seg_dicts}
|
184 |
-
assert len(sem_seg_file_to_entry) > 0
|
185 |
-
|
186 |
-
for det_dict in detection_dicts:
|
187 |
-
dic = copy.copy(det_dict)
|
188 |
-
dic.update(sem_seg_file_to_entry[dic["file_name"]])
|
189 |
-
results.append(dic)
|
190 |
-
return results
|
191 |
-
|
192 |
-
|
193 |
-
if __name__ == "__main__":
|
194 |
-
"""
|
195 |
-
Test the COCO panoptic dataset loader.
|
196 |
-
|
197 |
-
Usage:
|
198 |
-
python -m detectron2.data.datasets.coco_panoptic \
|
199 |
-
path/to/image_root path/to/panoptic_root path/to/panoptic_json dataset_name 10
|
200 |
-
|
201 |
-
"dataset_name" can be "coco_2017_train_panoptic", or other
|
202 |
-
pre-registered ones
|
203 |
-
"""
|
204 |
-
from detectron2.utils.logger import setup_logger
|
205 |
-
from detectron2.utils.visualizer import Visualizer
|
206 |
-
import detectron2.data.datasets # noqa # add pre-defined metadata
|
207 |
-
import sys
|
208 |
-
from PIL import Image
|
209 |
-
import numpy as np
|
210 |
-
|
211 |
-
logger = setup_logger(name=__name__)
|
212 |
-
assert sys.argv[4] in DatasetCatalog.list()
|
213 |
-
meta = MetadataCatalog.get(sys.argv[4])
|
214 |
-
|
215 |
-
dicts = load_coco_panoptic_json(sys.argv[3], sys.argv[1], sys.argv[2], meta.as_dict())
|
216 |
-
logger.info("Done loading {} samples.".format(len(dicts)))
|
217 |
-
|
218 |
-
dirname = "coco-data-vis"
|
219 |
-
os.makedirs(dirname, exist_ok=True)
|
220 |
-
num_imgs_to_vis = int(sys.argv[5])
|
221 |
-
for i, d in enumerate(dicts):
|
222 |
-
img = np.array(Image.open(d["file_name"]))
|
223 |
-
visualizer = Visualizer(img, metadata=meta)
|
224 |
-
vis = visualizer.draw_dataset_dict(d)
|
225 |
-
fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
|
226 |
-
vis.save(fpath)
|
227 |
-
if i + 1 >= num_imgs_to_vis:
|
228 |
-
break
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/data/test_detection_utils.py
DELETED
@@ -1,176 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
|
3 |
-
import copy
|
4 |
-
import numpy as np
|
5 |
-
import os
|
6 |
-
import unittest
|
7 |
-
import pycocotools.mask as mask_util
|
8 |
-
|
9 |
-
from detectron2.data import MetadataCatalog, detection_utils
|
10 |
-
from detectron2.data import transforms as T
|
11 |
-
from detectron2.structures import BitMasks, BoxMode
|
12 |
-
from detectron2.utils.file_io import PathManager
|
13 |
-
|
14 |
-
|
15 |
-
class TestTransformAnnotations(unittest.TestCase):
|
16 |
-
def test_transform_simple_annotation(self):
|
17 |
-
transforms = T.TransformList([T.HFlipTransform(400)])
|
18 |
-
anno = {
|
19 |
-
"bbox": np.asarray([10, 10, 200, 300]),
|
20 |
-
"bbox_mode": BoxMode.XYXY_ABS,
|
21 |
-
"category_id": 3,
|
22 |
-
"segmentation": [[10, 10, 100, 100, 100, 10], [150, 150, 200, 150, 200, 200]],
|
23 |
-
}
|
24 |
-
|
25 |
-
output = detection_utils.transform_instance_annotations(anno, transforms, (400, 400))
|
26 |
-
self.assertTrue(np.allclose(output["bbox"], [200, 10, 390, 300]))
|
27 |
-
self.assertEqual(len(output["segmentation"]), len(anno["segmentation"]))
|
28 |
-
self.assertTrue(np.allclose(output["segmentation"][0], [390, 10, 300, 100, 300, 10]))
|
29 |
-
|
30 |
-
detection_utils.annotations_to_instances([output, output], (400, 400))
|
31 |
-
|
32 |
-
def test_transform_empty_annotation(self):
|
33 |
-
detection_utils.annotations_to_instances([], (400, 400))
|
34 |
-
|
35 |
-
def test_flip_keypoints(self):
|
36 |
-
transforms = T.TransformList([T.HFlipTransform(400)])
|
37 |
-
anno = {
|
38 |
-
"bbox": np.asarray([10, 10, 200, 300]),
|
39 |
-
"bbox_mode": BoxMode.XYXY_ABS,
|
40 |
-
"keypoints": np.random.rand(17, 3) * 50 + 15,
|
41 |
-
}
|
42 |
-
|
43 |
-
output = detection_utils.transform_instance_annotations(
|
44 |
-
copy.deepcopy(anno),
|
45 |
-
transforms,
|
46 |
-
(400, 400),
|
47 |
-
keypoint_hflip_indices=detection_utils.create_keypoint_hflip_indices(
|
48 |
-
["keypoints_coco_2017_train"]
|
49 |
-
),
|
50 |
-
)
|
51 |
-
# The first keypoint is nose
|
52 |
-
self.assertTrue(np.allclose(output["keypoints"][0, 0], 400 - anno["keypoints"][0, 0]))
|
53 |
-
# The last 16 keypoints are 8 left-right pairs
|
54 |
-
self.assertTrue(
|
55 |
-
np.allclose(
|
56 |
-
output["keypoints"][1:, 0].reshape(-1, 2)[:, ::-1],
|
57 |
-
400 - anno["keypoints"][1:, 0].reshape(-1, 2),
|
58 |
-
)
|
59 |
-
)
|
60 |
-
self.assertTrue(
|
61 |
-
np.allclose(
|
62 |
-
output["keypoints"][1:, 1:].reshape(-1, 2, 2)[:, ::-1, :],
|
63 |
-
anno["keypoints"][1:, 1:].reshape(-1, 2, 2),
|
64 |
-
)
|
65 |
-
)
|
66 |
-
|
67 |
-
def test_crop(self):
|
68 |
-
transforms = T.TransformList([T.CropTransform(300, 300, 10, 10)])
|
69 |
-
keypoints = np.random.rand(17, 3) * 50 + 15
|
70 |
-
keypoints[:, 2] = 2
|
71 |
-
anno = {
|
72 |
-
"bbox": np.asarray([10, 10, 200, 400]),
|
73 |
-
"bbox_mode": BoxMode.XYXY_ABS,
|
74 |
-
"keypoints": keypoints,
|
75 |
-
}
|
76 |
-
|
77 |
-
output = detection_utils.transform_instance_annotations(
|
78 |
-
copy.deepcopy(anno), transforms, (10, 10)
|
79 |
-
)
|
80 |
-
# box is shifted and cropped
|
81 |
-
self.assertTrue((output["bbox"] == np.asarray([0, 0, 0, 10])).all())
|
82 |
-
# keypoints are no longer visible
|
83 |
-
self.assertTrue((output["keypoints"][:, 2] == 0).all())
|
84 |
-
|
85 |
-
def test_transform_RLE(self):
|
86 |
-
transforms = T.TransformList([T.HFlipTransform(400)])
|
87 |
-
mask = np.zeros((300, 400), order="F").astype("uint8")
|
88 |
-
mask[:, :200] = 1
|
89 |
-
|
90 |
-
anno = {
|
91 |
-
"bbox": np.asarray([10, 10, 200, 300]),
|
92 |
-
"bbox_mode": BoxMode.XYXY_ABS,
|
93 |
-
"segmentation": mask_util.encode(mask[:, :, None])[0],
|
94 |
-
"category_id": 3,
|
95 |
-
}
|
96 |
-
output = detection_utils.transform_instance_annotations(
|
97 |
-
copy.deepcopy(anno), transforms, (300, 400)
|
98 |
-
)
|
99 |
-
mask = output["segmentation"]
|
100 |
-
self.assertTrue((mask[:, 200:] == 1).all())
|
101 |
-
self.assertTrue((mask[:, :200] == 0).all())
|
102 |
-
|
103 |
-
inst = detection_utils.annotations_to_instances(
|
104 |
-
[output, output], (400, 400), mask_format="bitmask"
|
105 |
-
)
|
106 |
-
self.assertTrue(isinstance(inst.gt_masks, BitMasks))
|
107 |
-
|
108 |
-
def test_transform_RLE_resize(self):
|
109 |
-
transforms = T.TransformList(
|
110 |
-
[T.HFlipTransform(400), T.ScaleTransform(300, 400, 400, 400, "bilinear")]
|
111 |
-
)
|
112 |
-
mask = np.zeros((300, 400), order="F").astype("uint8")
|
113 |
-
mask[:, :200] = 1
|
114 |
-
|
115 |
-
anno = {
|
116 |
-
"bbox": np.asarray([10, 10, 200, 300]),
|
117 |
-
"bbox_mode": BoxMode.XYXY_ABS,
|
118 |
-
"segmentation": mask_util.encode(mask[:, :, None])[0],
|
119 |
-
"category_id": 3,
|
120 |
-
}
|
121 |
-
output = detection_utils.transform_instance_annotations(
|
122 |
-
copy.deepcopy(anno), transforms, (400, 400)
|
123 |
-
)
|
124 |
-
|
125 |
-
inst = detection_utils.annotations_to_instances(
|
126 |
-
[output, output], (400, 400), mask_format="bitmask"
|
127 |
-
)
|
128 |
-
self.assertTrue(isinstance(inst.gt_masks, BitMasks))
|
129 |
-
|
130 |
-
def test_gen_crop(self):
|
131 |
-
instance = {"bbox": [10, 10, 100, 100], "bbox_mode": BoxMode.XYXY_ABS}
|
132 |
-
t = detection_utils.gen_crop_transform_with_instance((10, 10), (150, 150), instance)
|
133 |
-
# the box center must fall into the cropped region
|
134 |
-
self.assertTrue(t.x0 <= 55 <= t.x0 + t.w)
|
135 |
-
|
136 |
-
def test_gen_crop_outside_boxes(self):
|
137 |
-
instance = {"bbox": [10, 10, 100, 100], "bbox_mode": BoxMode.XYXY_ABS}
|
138 |
-
with self.assertRaises(AssertionError):
|
139 |
-
detection_utils.gen_crop_transform_with_instance((10, 10), (15, 15), instance)
|
140 |
-
|
141 |
-
def test_read_sem_seg(self):
|
142 |
-
cityscapes_dir = MetadataCatalog.get("cityscapes_fine_sem_seg_val").gt_dir
|
143 |
-
sem_seg_gt_path = os.path.join(
|
144 |
-
cityscapes_dir, "frankfurt", "frankfurt_000001_083852_gtFine_labelIds.png"
|
145 |
-
)
|
146 |
-
if not PathManager.exists(sem_seg_gt_path):
|
147 |
-
raise unittest.SkipTest(
|
148 |
-
"Semantic segmentation ground truth {} not found.".format(sem_seg_gt_path)
|
149 |
-
)
|
150 |
-
sem_seg = detection_utils.read_image(sem_seg_gt_path, "L")
|
151 |
-
self.assertEqual(sem_seg.ndim, 3)
|
152 |
-
self.assertEqual(sem_seg.shape[2], 1)
|
153 |
-
self.assertEqual(sem_seg.dtype, np.uint8)
|
154 |
-
self.assertEqual(sem_seg.max(), 32)
|
155 |
-
self.assertEqual(sem_seg.min(), 1)
|
156 |
-
|
157 |
-
def test_read_exif_orientation(self):
|
158 |
-
# https://github.com/recurser/exif-orientation-examples/raw/master/Landscape_5.jpg
|
159 |
-
URL = "detectron2://assets/Landscape_5.jpg"
|
160 |
-
img = detection_utils.read_image(URL, "RGB")
|
161 |
-
self.assertEqual(img.ndim, 3)
|
162 |
-
self.assertEqual(img.dtype, np.uint8)
|
163 |
-
self.assertEqual(img.shape, (1200, 1800, 3)) # check that shape is not transposed
|
164 |
-
|
165 |
-
def test_opencv_exif_orientation(self):
|
166 |
-
import cv2
|
167 |
-
|
168 |
-
URL = "detectron2://assets/Landscape_5.jpg"
|
169 |
-
with PathManager.open(URL, "rb") as f:
|
170 |
-
img = cv2.imdecode(np.frombuffer(f.read(), dtype="uint8"), cv2.IMREAD_COLOR)
|
171 |
-
self.assertEqual(img.dtype, np.uint8)
|
172 |
-
self.assertEqual(img.shape, (1200, 1800, 3))
|
173 |
-
|
174 |
-
|
175 |
-
if __name__ == "__main__":
|
176 |
-
unittest.main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AzinZ/vitscn/utils.py
DELETED
@@ -1,258 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import glob
|
3 |
-
import sys
|
4 |
-
import argparse
|
5 |
-
import logging
|
6 |
-
import json
|
7 |
-
import subprocess
|
8 |
-
import numpy as np
|
9 |
-
from scipy.io.wavfile import read
|
10 |
-
import torch
|
11 |
-
|
12 |
-
MATPLOTLIB_FLAG = False
|
13 |
-
|
14 |
-
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
|
15 |
-
logger = logging
|
16 |
-
|
17 |
-
|
18 |
-
def load_checkpoint(checkpoint_path, model, optimizer=None):
|
19 |
-
assert os.path.isfile(checkpoint_path)
|
20 |
-
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
|
21 |
-
iteration = checkpoint_dict['iteration']
|
22 |
-
learning_rate = checkpoint_dict['learning_rate']
|
23 |
-
if optimizer is not None:
|
24 |
-
optimizer.load_state_dict(checkpoint_dict['optimizer'])
|
25 |
-
saved_state_dict = checkpoint_dict['model']
|
26 |
-
if hasattr(model, 'module'):
|
27 |
-
state_dict = model.module.state_dict()
|
28 |
-
else:
|
29 |
-
state_dict = model.state_dict()
|
30 |
-
new_state_dict= {}
|
31 |
-
for k, v in state_dict.items():
|
32 |
-
try:
|
33 |
-
new_state_dict[k] = saved_state_dict[k]
|
34 |
-
except:
|
35 |
-
logger.info("%s is not in the checkpoint" % k)
|
36 |
-
new_state_dict[k] = v
|
37 |
-
if hasattr(model, 'module'):
|
38 |
-
model.module.load_state_dict(new_state_dict)
|
39 |
-
else:
|
40 |
-
model.load_state_dict(new_state_dict)
|
41 |
-
logger.info("Loaded checkpoint '{}' (iteration {})" .format(
|
42 |
-
checkpoint_path, iteration))
|
43 |
-
return model, optimizer, learning_rate, iteration
|
44 |
-
|
45 |
-
|
46 |
-
def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
|
47 |
-
logger.info("Saving model and optimizer state at iteration {} to {}".format(
|
48 |
-
iteration, checkpoint_path))
|
49 |
-
if hasattr(model, 'module'):
|
50 |
-
state_dict = model.module.state_dict()
|
51 |
-
else:
|
52 |
-
state_dict = model.state_dict()
|
53 |
-
torch.save({'model': state_dict,
|
54 |
-
'iteration': iteration,
|
55 |
-
'optimizer': optimizer.state_dict(),
|
56 |
-
'learning_rate': learning_rate}, checkpoint_path)
|
57 |
-
|
58 |
-
|
59 |
-
def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050):
|
60 |
-
for k, v in scalars.items():
|
61 |
-
writer.add_scalar(k, v, global_step)
|
62 |
-
for k, v in histograms.items():
|
63 |
-
writer.add_histogram(k, v, global_step)
|
64 |
-
for k, v in images.items():
|
65 |
-
writer.add_image(k, v, global_step, dataformats='HWC')
|
66 |
-
for k, v in audios.items():
|
67 |
-
writer.add_audio(k, v, global_step, audio_sampling_rate)
|
68 |
-
|
69 |
-
|
70 |
-
def latest_checkpoint_path(dir_path, regex="G_*.pth"):
|
71 |
-
f_list = glob.glob(os.path.join(dir_path, regex))
|
72 |
-
f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
|
73 |
-
x = f_list[-1]
|
74 |
-
print(x)
|
75 |
-
return x
|
76 |
-
|
77 |
-
|
78 |
-
def plot_spectrogram_to_numpy(spectrogram):
|
79 |
-
global MATPLOTLIB_FLAG
|
80 |
-
if not MATPLOTLIB_FLAG:
|
81 |
-
import matplotlib
|
82 |
-
matplotlib.use("Agg")
|
83 |
-
MATPLOTLIB_FLAG = True
|
84 |
-
mpl_logger = logging.getLogger('matplotlib')
|
85 |
-
mpl_logger.setLevel(logging.WARNING)
|
86 |
-
import matplotlib.pylab as plt
|
87 |
-
import numpy as np
|
88 |
-
|
89 |
-
fig, ax = plt.subplots(figsize=(10,2))
|
90 |
-
im = ax.imshow(spectrogram, aspect="auto", origin="lower",
|
91 |
-
interpolation='none')
|
92 |
-
plt.colorbar(im, ax=ax)
|
93 |
-
plt.xlabel("Frames")
|
94 |
-
plt.ylabel("Channels")
|
95 |
-
plt.tight_layout()
|
96 |
-
|
97 |
-
fig.canvas.draw()
|
98 |
-
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
|
99 |
-
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
100 |
-
plt.close()
|
101 |
-
return data
|
102 |
-
|
103 |
-
|
104 |
-
def plot_alignment_to_numpy(alignment, info=None):
|
105 |
-
global MATPLOTLIB_FLAG
|
106 |
-
if not MATPLOTLIB_FLAG:
|
107 |
-
import matplotlib
|
108 |
-
matplotlib.use("Agg")
|
109 |
-
MATPLOTLIB_FLAG = True
|
110 |
-
mpl_logger = logging.getLogger('matplotlib')
|
111 |
-
mpl_logger.setLevel(logging.WARNING)
|
112 |
-
import matplotlib.pylab as plt
|
113 |
-
import numpy as np
|
114 |
-
|
115 |
-
fig, ax = plt.subplots(figsize=(6, 4))
|
116 |
-
im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
|
117 |
-
interpolation='none')
|
118 |
-
fig.colorbar(im, ax=ax)
|
119 |
-
xlabel = 'Decoder timestep'
|
120 |
-
if info is not None:
|
121 |
-
xlabel += '\n\n' + info
|
122 |
-
plt.xlabel(xlabel)
|
123 |
-
plt.ylabel('Encoder timestep')
|
124 |
-
plt.tight_layout()
|
125 |
-
|
126 |
-
fig.canvas.draw()
|
127 |
-
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
|
128 |
-
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
129 |
-
plt.close()
|
130 |
-
return data
|
131 |
-
|
132 |
-
|
133 |
-
def load_wav_to_torch(full_path):
|
134 |
-
sampling_rate, data = read(full_path)
|
135 |
-
return torch.FloatTensor(data.astype(np.float32)), sampling_rate
|
136 |
-
|
137 |
-
|
138 |
-
def load_filepaths_and_text(filename, split="|"):
|
139 |
-
with open(filename, encoding='utf-8') as f:
|
140 |
-
filepaths_and_text = [line.strip().split(split) for line in f]
|
141 |
-
return filepaths_and_text
|
142 |
-
|
143 |
-
|
144 |
-
def get_hparams(init=True):
|
145 |
-
parser = argparse.ArgumentParser()
|
146 |
-
parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
|
147 |
-
help='JSON file for configuration')
|
148 |
-
parser.add_argument('-m', '--model', type=str, required=True,
|
149 |
-
help='Model name')
|
150 |
-
|
151 |
-
args = parser.parse_args()
|
152 |
-
model_dir = os.path.join("./logs", args.model)
|
153 |
-
|
154 |
-
if not os.path.exists(model_dir):
|
155 |
-
os.makedirs(model_dir)
|
156 |
-
|
157 |
-
config_path = args.config
|
158 |
-
config_save_path = os.path.join(model_dir, "config.json")
|
159 |
-
if init:
|
160 |
-
with open(config_path, "r") as f:
|
161 |
-
data = f.read()
|
162 |
-
with open(config_save_path, "w") as f:
|
163 |
-
f.write(data)
|
164 |
-
else:
|
165 |
-
with open(config_save_path, "r") as f:
|
166 |
-
data = f.read()
|
167 |
-
config = json.loads(data)
|
168 |
-
|
169 |
-
hparams = HParams(**config)
|
170 |
-
hparams.model_dir = model_dir
|
171 |
-
return hparams
|
172 |
-
|
173 |
-
|
174 |
-
def get_hparams_from_dir(model_dir):
|
175 |
-
config_save_path = os.path.join(model_dir, "config.json")
|
176 |
-
with open(config_save_path, "r") as f:
|
177 |
-
data = f.read()
|
178 |
-
config = json.loads(data)
|
179 |
-
|
180 |
-
hparams =HParams(**config)
|
181 |
-
hparams.model_dir = model_dir
|
182 |
-
return hparams
|
183 |
-
|
184 |
-
|
185 |
-
def get_hparams_from_file(config_path):
|
186 |
-
with open(config_path, "r") as f:
|
187 |
-
data = f.read()
|
188 |
-
config = json.loads(data)
|
189 |
-
|
190 |
-
hparams =HParams(**config)
|
191 |
-
return hparams
|
192 |
-
|
193 |
-
|
194 |
-
def check_git_hash(model_dir):
|
195 |
-
source_dir = os.path.dirname(os.path.realpath(__file__))
|
196 |
-
if not os.path.exists(os.path.join(source_dir, ".git")):
|
197 |
-
logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
|
198 |
-
source_dir
|
199 |
-
))
|
200 |
-
return
|
201 |
-
|
202 |
-
cur_hash = subprocess.getoutput("git rev-parse HEAD")
|
203 |
-
|
204 |
-
path = os.path.join(model_dir, "githash")
|
205 |
-
if os.path.exists(path):
|
206 |
-
saved_hash = open(path).read()
|
207 |
-
if saved_hash != cur_hash:
|
208 |
-
logger.warn("git hash values are different. {}(saved) != {}(current)".format(
|
209 |
-
saved_hash[:8], cur_hash[:8]))
|
210 |
-
else:
|
211 |
-
open(path, "w").write(cur_hash)
|
212 |
-
|
213 |
-
|
214 |
-
def get_logger(model_dir, filename="train.log"):
|
215 |
-
global logger
|
216 |
-
logger = logging.getLogger(os.path.basename(model_dir))
|
217 |
-
logger.setLevel(logging.DEBUG)
|
218 |
-
|
219 |
-
formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
|
220 |
-
if not os.path.exists(model_dir):
|
221 |
-
os.makedirs(model_dir)
|
222 |
-
h = logging.FileHandler(os.path.join(model_dir, filename))
|
223 |
-
h.setLevel(logging.DEBUG)
|
224 |
-
h.setFormatter(formatter)
|
225 |
-
logger.addHandler(h)
|
226 |
-
return logger
|
227 |
-
|
228 |
-
|
229 |
-
class HParams():
|
230 |
-
def __init__(self, **kwargs):
|
231 |
-
for k, v in kwargs.items():
|
232 |
-
if type(v) == dict:
|
233 |
-
v = HParams(**v)
|
234 |
-
self[k] = v
|
235 |
-
|
236 |
-
def keys(self):
|
237 |
-
return self.__dict__.keys()
|
238 |
-
|
239 |
-
def items(self):
|
240 |
-
return self.__dict__.items()
|
241 |
-
|
242 |
-
def values(self):
|
243 |
-
return self.__dict__.values()
|
244 |
-
|
245 |
-
def __len__(self):
|
246 |
-
return len(self.__dict__)
|
247 |
-
|
248 |
-
def __getitem__(self, key):
|
249 |
-
return getattr(self, key)
|
250 |
-
|
251 |
-
def __setitem__(self, key, value):
|
252 |
-
return setattr(self, key, value)
|
253 |
-
|
254 |
-
def __contains__(self, key):
|
255 |
-
return key in self.__dict__
|
256 |
-
|
257 |
-
def __repr__(self):
|
258 |
-
return self.__dict__.__repr__()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Amanda El Aventurero Apk Android Oyun Club.md
DELETED
@@ -1,70 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Hide N Seek APK Mod: Un juego divertido y emocionante para los usuarios de Android</h1>
|
3 |
-
<p>¿Te gusta jugar al escondite con tus amigos? ¿Quieres experimentar la emoción de esconderte y buscar en diferentes entornos y escenarios? Si es así, entonces usted debe probar Hide N Seek APK Mod, un juego divertido y emocionante para los usuarios de Android. En este juego, puedes jugar como un escondite o un buscador, y disfrutar de los niveles sin fin de acciones casuales de ocultar y buscar. También puedes personalizar a tu personaje, desbloquear nuevas pieles, armas y mapas, y desafiar a otros jugadores en línea. En este artículo, le diremos todo lo que necesita saber sobre Hide N Seek APK Mod, incluyendo lo que es, cómo descargarlo e instalarlo en su dispositivo, por qué debe jugar, y algunas preguntas frecuentes.</p>
|
4 |
-
<h2>¿Qué es Hide N Seek APK Mod? </h2>
|
5 |
-
<p>Hide N Seek APK Mod es una versión modificada del juego original Hide N Seek desarrollado por Supersonic Studios LTD. Es un juego de árcade casual que te permite jugar a las escondidas con otros jugadores en varios entornos. Puedes elegir ser un escondite o un buscador, e intentar ser más astuto que tus oponentes. Como escondite, puedes esconderte detrás de objetos, transformarte en accesorios y usar armas para defenderte. Como buscador, puedes usar tu radar, linterna y armas para encontrar y eliminar a los escondites. El juego tiene cientos de niveles diferentes con un juego emocionante y agradable. </p>
|
6 |
-
<h2>amanda el aventurero apk android oyun club</h2><br /><p><b><b>DOWNLOAD</b> ✦ <a href="https://bltlly.com/2v6IOk">https://bltlly.com/2v6IOk</a></b></p><br /><br />
|
7 |
-
<h3>El juego de Hide N Seek</h3>
|
8 |
-
|
9 |
-
<h3>Las características de Hide N Seek APK Mod</h3>
|
10 |
-
<p>Hide N Seek APK Mod tiene muchas características que lo hacen más divertido y emocionante que el juego original. Algunas de estas características son:</p>
|
11 |
-
<ul>
|
12 |
-
<li>Dinero ilimitado: Puedes obtener dinero ilimitado en el juego, que puedes usar para comprar nuevas pieles, armas y mapas. </li>
|
13 |
-
<li>Todas las pieles desbloqueadas: Puedes desbloquear todas las pieles en el juego, lo que puede hacer que tu personaje se vea más fresco y único. </li>
|
14 |
-
<li>Todas las armas desbloqueadas: Puedes desbloquear todas las armas en el juego, lo que puede darte más opciones y ventajas en combate. </li>
|
15 |
-
<li>Todos los mapas desbloqueados: Puedes desbloquear todos los mapas en el juego, lo que puede darte más variedad y desafíos en la ocultación y la búsqueda. </li>
|
16 |
-
<li>Sin anuncios: Puedes disfrutar del juego sin ningún anuncio molesto que pueda interrumpir tu juego. </li>
|
17 |
-
</ul>
|
18 |
-
<h2>¿Cómo descargar e instalar Hide N Seek APK Mod en su dispositivo? </h2>
|
19 |
-
<p>Si desea descargar e instalar Hide N Seek APK Mod en su dispositivo, debe seguir estos pasos:</p>
|
20 |
-
<h3>Los requisitos para Ocultar N Buscar APK Mod</h3>
|
21 |
-
<p>Antes de descargar e instalar Hide N Seek APK Mod en su dispositivo, es necesario asegurarse de que su dispositivo cumple con estos requisitos:</p> <p>- Android 4.4 o superior: Es necesario tener un dispositivo Android que se ejecuta en Android 4.4 o superior, como este es el sistema operativo mínimo requerido para Hide N Seek APK Mod.</p>
|
22 |
-
<p>- 100 MB de espacio de almacenamiento gratuito: Es necesario tener al menos 100 MB de espacio de almacenamiento gratuito en su dispositivo, ya que este es el tamaño del archivo Hide N Seek APK Mod que necesita descargar e instalar. </p>
|
23 |
-
<p>- Habilitar fuentes desconocidas: Es necesario habilitar fuentes desconocidas en el dispositivo, ya que esto le permitirá instalar aplicaciones que no son de Google Play Store. Para hacer esto, debe ir a la configuración del dispositivo, luego a la seguridad, luego a fuentes desconocidas y activarlo. </p>
|
24 |
-
<p></p>
|
25 |
-
<h3>Los pasos para descargar e instalar Hide N Seek APK Mod</h3>
|
26 |
-
|
27 |
-
<ol>
|
28 |
-
<li>Descargue el archivo Hide N Seek APK Mod de una fuente de confianza, como [APKdone]( 1 ). Puede utilizar el siguiente enlace para descargarlo directamente. </li>
|
29 |
-
<li>Busque el archivo descargado en su dispositivo, y toque en él para iniciar el proceso de instalación. Es posible que necesite conceder algunos permisos para que la aplicación se instale. </li>
|
30 |
-
<li>Espere a que termine la instalación, y luego inicie la aplicación desde la pantalla de inicio o el cajón de aplicaciones. Ahora se puede disfrutar de jugar Hide N Seek APK Mod con dinero ilimitado, pieles, armas, mapas, y sin anuncios. </li>
|
31 |
-
</ol>
|
32 |
-
<h2>¿Por qué debe jugar Hide N Seek APK Mod? </h2>
|
33 |
-
<p>Hide N Seek APK Mod es un juego que usted debe jugar si usted está buscando una manera divertida y emocionante para pasar su tiempo libre. Estas son algunas de las razones por las que debe jugar Hide N Seek APK Mod:</p>
|
34 |
-
<h3>Los beneficios de jugar Hide N Seek APK Mod</h3>
|
35 |
-
<p>Jugar Hide N Seek APK Mod puede traer muchos beneficios, tales como:</p>
|
36 |
-
<ul>
|
37 |
-
<li>Puede mejorar su creatividad e imaginación, ya que puede transformarse en diferentes objetos y esconderse en varios lugares. </li>
|
38 |
-
<li>Puede mejorar sus habilidades de pensamiento estratégico y resolución de problemas, ya que puede planificar sus estrategias de ocultación y búsqueda y superar diferentes desafíos. </li>
|
39 |
-
<li> Puede aumentar sus reflejos y tiempo de reacción, ya que puede esquivar y atacar de forma rápida y precisa. </li>
|
40 |
-
<li>Puede reducir el estrés y el aburrimiento, ya que puede divertirse y relajarse con el juego casual y agradable. </li>
|
41 |
-
<li> Puede aumentar su interacción social y la comunicación, ya que puede jugar con sus amigos en línea y chatear con ellos en el juego. </li>
|
42 |
-
</ul>
|
43 |
-
<h3>Los consejos y trucos para jugar Hide N Seek APK Mod</h3>
|
44 |
-
<p>Si quieres jugar Hide N Seek APK Mod mejor y divertirse más, puede seguir estos consejos y trucos:</p>
|
45 |
-
<ul>
|
46 |
-
<li>Como escondite, trata de esconderte en lugares que no sean obvios o comunes, como detrás de puertas, debajo de mesas o dentro de gabinetes. </li>
|
47 |
-
|
48 |
-
<li>Como un escondite, cambiar su apoyo con frecuencia y al azar, ya que esto puede confundir al buscador y hacer más difícil de encontrar. </li>
|
49 |
-
<li>Como un buscador, utilice su radar y linterna con eficacia, ya que pueden ayudarle a localizar los escondites. Sin embargo, tenga cuidado de no usarlos demasiado o demasiado tiempo, ya que pueden drenar la batería y hacerlo vulnerable. </li>
|
50 |
-
<li>Como buscador, preste atención a los detalles y pistas en el mapa, como objetos en movimiento, sonidos o huellas. Pueden indicar dónde se esconden o se mueven los escondites. </li>
|
51 |
-
<li>Como buscador, sé paciente y persistente, ya que algunos escondites pueden ser muy buenos para esconderse o escapar. No te rindas fácilmente y sigue buscando hasta que los encuentres todos. </li>
|
52 |
-
</ul>
|
53 |
-
<h2>Conclusión</h2>
|
54 |
-
<p>En conclusión, Hide N Seek APK Mod es un juego divertido y emocionante para los usuarios de Android que aman jugar a las escondidas con sus amigos. Tiene un juego simple y fácil, cientos de niveles diferentes, varias pieles, armas, mapas, dinero ilimitado, sin anuncios y más. Puedes descargarlo e instalarlo en tu dispositivo siguiendo los pasos anteriores. También puede mejorar su creatividad, pensamiento estratégico, reflejos, alivio del estrés, interacción social jugando este juego. También puedes usar los consejos y trucos anteriores para jugar mejor y divertirte más. ¿Qué estás esperando? Descargar Hide N Seek APK Mod ahora y disfrutar de esconderse y buscar con otros jugadores en línea! </p>
|
55 |
-
<h2>Preguntas frecuentes</h2>
|
56 |
-
<p>Aquí están algunas de las preguntas más frecuentes sobre Hide N Seek APK Mod:</p>
|
57 |
-
<ol>
|
58 |
-
<li> ¿Es seguro descargar e instalar Hide N Seek APK Mod? </li>
|
59 |
-
<p>Sí, Hide N Seek APK Mod es seguro para descargar e instalar, siempre y cuando lo obtenga de una fuente de confianza, como [APKdone]. Sin embargo, siempre debe escanear el archivo con un software antivirus antes de instalarlo, y tenga cuidado de no dar ninguna información sensible o permisos a la aplicación. </p>
|
60 |
-
<li> ¿Es Hide N Seek APK Mod compatible con todos los dispositivos Android? </li>
|
61 |
-
|
62 |
-
<li>¿Puedo jugar Hide N Seek APK Mod sin conexión? </li>
|
63 |
-
<p>No, no se puede jugar Hide N Seek APK Mod offline. Es necesario tener una conexión a Internet para jugar el juego, ya que es un juego multijugador en línea. Puedes jugar con tus amigos o con otros jugadores de todo el mundo. </p>
|
64 |
-
<li>¿Puedo jugar Hide N Seek APK Mod en PC? </li>
|
65 |
-
<p>Sí, se puede jugar Hide N Seek APK Mod en el PC, pero es necesario utilizar un emulador de Android, como [BlueStacks] o [NoxPlayer]. Estos son software que puede simular un entorno Android en su PC, y le permiten ejecutar aplicaciones y juegos Android en él. Puede descargar e instalar un emulador de Android en su PC, y luego descargar e instalar Hide N Seek APK Mod en él. </p>
|
66 |
-
<li>¿Cómo puedo contactar con el desarrollador de Hide N Seek APK Mod? </li>
|
67 |
-
<p>Si usted tiene alguna pregunta, comentarios, o problemas con respecto a Hide N Seek APK Mod, puede ponerse en contacto con el desarrollador del juego por correo electrónico a [email protected]. También puede visitar su sitio web en https://www.supersonic.com/ o seguirlos en Facebook en https://www.facebook.com/SupersonicStudiosLtd/</p>
|
68 |
-
</ol></p> 64aa2da5cf<br />
|
69 |
-
<br />
|
70 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Carx Carretera Carreras V1 74.3 Apk Mod.md
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
|
2 |
-
<tabla>
|
3 |
-
<tr>
|
4 |
-
<td>
|
5 |
-
<h1>CarX Highway Racing v1 74.3 APK Mod: Un emocionante juego de carreras para Android</h1>
|
6 |
-
<p>¿Te encantan los juegos de carreras? ¿Quieres experimentar física de conducción realista, adrenalina de alta velocidad y gráficos increíbles en tu dispositivo Android? Si es así, entonces usted debe probar CarX Highway Racing v1 74.3 APK Mod, un impresionante juego de carreras que le mantendrá enganchado durante horas. </p>
|
7 |
-
<h2>Introducción</h2>
|
8 |
-
<p>CarX Highway Racing es un juego de carreras desarrollado por CarX Technologies, una empresa que se especializa en la creación de simulaciones físicas realistas de automóviles. El juego cuenta con más de 40 coches diferentes, desde autos deportivos hasta coches deportivos, que puedes personalizar y actualizar según tus preferencias. También puedes elegir entre varios modos, como el modo de campaña, el modo de ataque temporal, el modo de supervivencia y el modo en línea, donde puedes competir con otros jugadores de todo el mundo. </p>
|
9 |
-
<h2>carx carretera carreras v1 74.3 apk mod</h2><br /><p><b><b>DOWNLOAD</b> ››› <a href="https://bltlly.com/2v6ITw">https://bltlly.com/2v6ITw</a></b></p><br /><br />
|
10 |
-
<p>El juego también cuenta con impresionantes gráficos, efectos de sonido realistas, condiciones climáticas dinámicas, ciclos de día y noche, y desafiantes escenarios de tráfico y policía que pondrán a prueba sus habilidades de conducción y reflejos. Usted puede disfrutar de todas estas características y más con CarX Highway Racing v1 74.3 APK Mod, una versión modificada del juego que le da dinero ilimitado y oro, coches desbloqueados y pistas, sin anuncios, y más. </p>
|
11 |
-
<p>Si usted está buscando un emocionante juego de carreras que le hará sentir como usted está conduciendo un coche real en una carretera real, entonces usted debe descargar CarX Highway Racing v1 74.3 APK Mod hoy. </p>
|
12 |
-
<h2>Cómo descargar e instalar CarX Highway Racing v1 74.3 APK Mod</h2>
|
13 |
-
<p>Descargar e instalar CarX Highway Racing v1 74.3 APK Mod es muy fácil y simple. Solo tienes que seguir estos pasos:</p>
|
14 |
-
<ol>
|
15 |
-
<li>Ir al enlace y descargar el CarX Highway Racing v1 74.3 APK Mod archivo en su dispositivo Android. </li>
|
16 |
-
<li>Ir a la configuración del dispositivo y permitir la instalación de aplicaciones de fuentes desconocidas. </li>
|
17 |
-
<li>Busque el archivo descargado y toque en él para iniciar el proceso de instalación. </li>
|
18 |
-
|
19 |
-
<li> Iniciar el juego y disfrutar de las características modded. </li>
|
20 |
-
</ol>
|
21 |
-
<p>Mediante el uso de CarX Highway Racing v1 74.3 APK Mod, usted será capaz de acceder a todos los coches y pistas en el juego, así como obtener dinero ilimitado y oro para actualizar su coche y comprar nuevos. También podrás jugar el juego sin anuncios ni interrupciones, y disfrutar de tiempos de carga más rápidos y un rendimiento más suave. Tendrás ventaja sobre tus oponentes y podrás dominar la escena de las carreras en carretera. </p>
|
22 |
-
<h2>Cómo jugar CarX Highway Racing v1 74.3 APK Mod</h2>
|
23 |
-
<p>Jugar CarX Highway Racing v1 74.3 APK Mod es muy divertido y emocionante. Solo tienes que seguir estos pasos:</p>
|
24 |
-
<ol>
|
25 |
-
<li>Comience el juego y elija su coche desde el garaje. Puede personalizar el color de su coche, ruedas, spoilers, calcomanías y más. </li>
|
26 |
-
<li>Elige tu modo en el menú principal. Puedes jugar en el modo de campaña, donde tienes que completar varias misiones y desafíos, o en el modo de ataque temporal, donde tienes que batir tus propios récords o los de otros jugadores. También puedes jugar en el modo de supervivencia, donde tienes que evitar estrellarte o quedar atrapado por la policía, o en el modo online, donde puedes competir contra otros jugadores de todo el mundo. </li>
|
27 |
-
<li>Seleccione su pista del mapa. Puede elegir entre diferentes lugares, como desierto, ciudad, bosque, montaña y más. </li>
|
28 |
-
<li>Comienza tu carrera y controla tu coche usando los botones en pantalla o inclina tu dispositivo. También puede ajustar la sensibilidad y las opciones de dirección en el menú de configuración. </li>
|
29 |
-
<li>Utilice el botón nitro para aumentar su velocidad y superar a sus rivales. También puede utilizar el botón de freno para reducir la velocidad o la deriva alrededor de las esquinas. </li>
|
30 |
-
<li>Evite golpear otros coches, obstáculos, tráfico y coches de policía. Dañarán su coche y reducirán su velocidad. </li>
|
31 |
-
<li>Llegar a la línea de meta antes de que sus oponentes o antes de que acabe el tiempo. Usted ganará dinero y oro basado en su rendimiento. </li>
|
32 |
-
</ol>
|
33 |
-
|
34 |
-
<h2>Consejos y trucos para CarX Highway Racing v1 74.3 APK Mod</h2>
|
35 |
-
<p>Si quieres mejorar tus habilidades y ganar más carreras en CarX Highway Racing v1 74.3 APK Mod, debes seguir estos consejos y trucos:</p>
|
36 |
-
<p></p>
|
37 |
-
<ul>
|
38 |
-
<li>Actualizar su coche con regularidad utilizando el dinero y el oro que gana de las carreras. Puede actualizar el motor de su coche, transmisión, suspensión, frenos, neumáticos, nitro y más. </li>
|
39 |
-
<li>Desbloquear nuevos coches utilizando el dinero y el oro que gana de las carreras. Puede desbloquear más de 40 coches diferentes, cada uno con sus propias características y características. </li>
|
40 |
-
<li>Evite el tráfico y la policía tanto como sea posible. Le ralentizarán y le harán perder tiempo o dinero. Puede utilizar el radar en la esquina superior izquierda de la pantalla para ver dónde están. </li>
|
41 |
-
<li>Usa nitro sabiamente. No lo desperdicies en carreteras rectas o cuando ya estés por delante de tus oponentes. Guárdelo para cuando necesite adelantarlos o escapar del tráfico o la policía. </li>
|
42 |
-
<li>Deriva alrededor de las esquinas para mantener su velocidad y ganar más nitro. También puede utilizar la deriva para evitar colisiones u obstáculos. </li>
|
43 |
-
<li>Tenga cuidado con los cambios climáticos y los ciclos de día y noche. Afectarán su visibilidad y las condiciones de conducción. Puede utilizar faros o limpiaparabrisas para mejorar su vista. </li>
|
44 |
-
</ul>
|
45 |
-
<p>Siguiendo estos consejos y trucos, usted será capaz de dominar CarX Highway Racing v1 74.3 APK Mod y convertirse en una leyenda de las carreras de carretera. </p>
|
46 |
-
<h2>Pros y contras de CarX Highway Racing v1 74.3 APK Mod</h2>
|
47 |
-
<p>CarX Highway Racing v1 74.3 APK Mod es un gran juego de carreras que tiene muchos pros y contras. Aquí están algunos de ellos:</p>
|
48 |
-
<borde de la tabla="1">
|
49 |
-
<tr><th>Pros</th><th>Contras</th></tr>
|
50 |
-
<tr><td>- Física realista de conducción y simulaciones de coches</td><td>- - Gráficos de alta calidad y efectos de sonido</td><td>- Requiere mucho espacio de almacenamiento y memoria</td></tr>
|
51 |
-
<tr><td>- Varios modos, coches y pistas para elegir</td><td>- Algunos modos y pistas pueden ser repetitivos o aburridos</td></tr>
|
52 |
-
|
53 |
-
<tr><td>- Escenarios desafiantes de tráfico y policía</td><td>- El tráfico y la policía pueden ser demasiado agresivos o injustos</td></tr>
|
54 |
-
<tr><td>- Modo en línea para competir con otros jugadores</td><td>- El modo en línea puede tener problemas de conexión o retraso</td></tr>
|
55 |
-
<tr><td>- Versión modificada con dinero ilimitado y oro, coches desbloqueados y pistas, sin anuncios, y más</td><td>- Versión modificada puede no ser compatible con algunos dispositivos o actualizaciones</td></tr>
|
56 |
-
</tabla>
|
57 |
-
<p>Como puedes ver, CarX Highway Racing v1 74.3 APK Mod tiene sus pros y sus contras, pero en general, es un juego de carreras divertido y emocionante que usted debe probar. </p>
|
58 |
-
<h2>Conclusión</h2>
|
59 |
-
<p>En conclusión, CarX Highway Racing v1 74.3 APK Mod es un emocionante juego de carreras para Android que te hará sentir como si estuvieras conduciendo un coche real en una carretera real. Tiene física de conducción realista, adrenalina de alta velocidad, gráficos increíbles, varios modos, coches y pistas, clima dinámico y ciclos de día y noche, tráfico desafiante y escenarios policiales, modo en línea para competir con otros jugadores, y versión modificada con dinero ilimitado y oro, Desbloqueado coches y pistas, sin anuncios, y más. Es un gran juego para los entusiastas de las carreras y los jugadores casuales por igual. </p>
|
60 |
-
<p>Si usted está buscando un juego de carreras que le mantendrá enganchado durante horas, entonces usted debe descargar CarX Highway Racing v1 74.3 APK Mod hoy. No te arrepentirás. </p>
|
61 |
-
<h2>Preguntas frecuentes</h2>
|
62 |
-
<p>Aquí hay algunas preguntas frecuentes sobre CarX Highway Racing v1 74.3 APK Mod:</p>
|
63 |
-
<ol>
|
64 |
-
<li>Q: ¿Es seguro descargar e instalar CarX Highway Racing v1 74.3 APK Mod? </li>
|
65 |
-
<li>A: Sí, CarX Highway Racing v1 74.3 APK Mod es seguro de descargar e instalar. No contiene ningún virus o malware. Sin embargo, siempre debe descargarlo de una fuente confiable y escanearlo antes de instalarlo en su dispositivo. </li>
|
66 |
-
<li>Q: Es CarX Highway Racing v1 74.3 APK Mod libre para jugar? </li>
|
67 |
-
|
68 |
-
<li>Q: ¿Cómo puedo contactar a los desarrolladores de CarX Highway Racing v1 74.3 APK Mod? </li>
|
69 |
-
<li>A: Puede ponerse en contacto con los desarrolladores de CarX Highway Racing v1 74.3 APK Mod visitando su sitio web o su página de Facebook. También puede enviarles un correo electrónico a [email protected]. </li>
|
70 |
-
<li>Q: ¿Cómo puedo actualizar CarX Highway Racing v1 74.3 APK Mod? </li>
|
71 |
-
<li>A: Puede actualizar CarX Highway Racing v1 74.3 APK Mod mediante la descarga de la última versión del enlace . También puedes buscar actualizaciones en el menú de configuración del juego. </li>
|
72 |
-
<li>Q: ¿Cómo puedo desinstalar CarX Highway Racing v1 74.3 APK Mod? </li>
|
73 |
-
<li>A: Puede desinstalar CarX Highway Racing v1 74.3 APK Mod yendo a la configuración del dispositivo y seleccionando la opción de administrador de aplicaciones. Luego, encuentra el icono del juego y toca en él para abrir la página de detalles de la aplicación. Luego, toca el botón de desinstalación y confirma tu acción. </li>
|
74 |
-
</ol>
|
75 |
-
<p>Espero que este artículo ha respondido a todas sus preguntas sobre CarX Highway Racing v1 74.3 APK Mod. Si tiene más preguntas o comentarios, déjelos en la sección de comentarios a continuación. </p> 64aa2da5cf<br />
|
76 |
-
<br />
|
77 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Cipherlab 8000 Driver Descargar Ventanas 7.md
DELETED
@@ -1,69 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Cómo descargar Dream League Soccer 2020 de Google Play Store</h1>
|
3 |
-
<p>Dream League Soccer 2020 es uno de los juegos de fútbol más populares en dispositivos Android. Le permite construir su equipo ideal de más de 3.500 jugadores con licencia FIFPro y competir contra los mejores clubes de fútbol del mundo. Si eres un fanático del fútbol y quieres experimentar la emoción de administrar tu propio equipo, entonces deberías descargar Dream League Soccer 2020 de Google Play Store. En este artículo, te mostraremos cómo hacerlo y también te daremos algunos consejos y trucos para jugar el juego. </p>
|
4 |
-
<h2>¿Qué es Dream League Soccer 2020? </h2>
|
5 |
-
<p>Dream League Soccer 2020 es un juego de simulación de fútbol desarrollado por First Touch Games. Es la última entrega de la serie Dream League Soccer, que se ha descargado más de 300 millones de veces en todo el mundo. El juego presenta una nueva y mejorada jugabilidad, gráficos y sonido que lo hacen más realista e inmersivo que nunca. </p>
|
6 |
-
<h2>cipherlab 8000 driver descargar ventanas 7</h2><br /><p><b><b>Download File</b> ✺✺✺ <a href="https://bltlly.com/2v6MzG">https://bltlly.com/2v6MzG</a></b></p><br /><br />
|
7 |
-
<h3>Características de Dream League Soccer 2020</h3>
|
8 |
-
<p>Algunas de las características de Dream League Soccer 2020 son:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Construir y desarrollar su equipo de ensueño de más de 3.500 jugadores con licencia FIFPro</li>
|
11 |
-
<li> Full 3D patadas capturadas por movimiento, tackles, celebraciones y guardas de portero da realismo inigualable</li>
|
12 |
-
<li>Alcance el estatus legendario a medida que se eleva a través de 8 divisiones y competir en más de 10 competiciones de copa</li>
|
13 |
-
<li>Construye tu imperio de fútbol desde tu propio estadio hasta instalaciones médicas, comerciales y de entrenamiento</li>
|
14 |
-
<li>Reclutar agentes y exploradores para ayudar a identificar el mejor talento en el mercado de transferencia</li>
|
15 |
-
<li>Inmersivo y emocionante partido comentario te mantiene en el corazón de la acción</li>
|
16 |
-
<li>Utilice los entrenadores para desarrollar sus jugadores habilidades técnicas y físicas</li>
|
17 |
-
<li>Personaliza el kit y el logo de tu equipo o importa tus propias creaciones</li>
|
18 |
-
<li>Participa en eventos regulares para ganar fantásticos premios</li>
|
19 |
-
<li>Compite contra jugadores de todo el mundo con Dream League Online</li>
|
20 |
-
|
21 |
-
</ul>
|
22 |
-
<h3>Requisitos para Dream League Soccer 2020</h3>
|
23 |
-
<p>Para jugar Dream League Soccer 2020, necesitas un dispositivo Android que cumpla con los siguientes requisitos:</p>
|
24 |
-
<tabla>
|
25 |
-
<tr><th>Requisitos mínimos</th><th>Requisitos recomendados</th></tr>
|
26 |
-
<tr><td>Versión para Android: 5.0 o superior</td><td>Versión para Android: 8.0 o superior</td></tr>
|
27 |
-
<tr><td>RAM: 1 GB o superior</td><td>RAM: 2 GB o superior</td></tr>
|
28 |
-
<tr><td>Espacio de almacenamiento: Al menos 500 MB libres</td><td>Espacio de almacenamiento: Al menos 1 GB libre</td></tr>
|
29 |
-
<tr><td>Conexión a Internet: Requerido para funciones en línea</td><td>Conexión a Internet: Requerido para funciones en línea</td></tr>
|
30 |
-
</tabla>
|
31 |
-
<h2>Cómo descargar Dream League Soccer 2020 de Google Play Store</h2>
|
32 |
-
<p>Si tienes un dispositivo Android que cumple con los requisitos, puedes descargar Dream League Soccer 2020 de Google Play Store siguiendo estos sencillos pasos:</p>
|
33 |
-
<h3>Paso 1: Abra Google Play Store en su dispositivo</h3>
|
34 |
-
<p>El primer paso es abrir Google Play Store en su dispositivo. Puede hacer esto tocando el icono de Google Play en la pantalla de inicio o en el cajón de aplicaciones. Si no tienes Google Play instalado, puedes descargarlo desde el sitio web oficial. </p>
|
35 |
-
<h3>Paso 2: Búsqueda de Dream League Soccer 2020</h3>
|
36 |
-
<p>El siguiente paso es buscar Dream League Soccer 2020 en Google Play Store. Puedes hacer esto escribiendo "Dream League Soccer 2020" en la barra de búsqueda en la parte superior de la pantalla y tocando el icono de la lupa. Alternativamente, puedes usar este enlace para ir directamente a la página del juego en Google Play Store.</p>
|
37 |
-
<h3>Paso 3: Toque en Instalar y espere a que la descarga termine</h3>
|
38 |
-
|
39 |
-
<h3>Paso 4: Iniciar el juego y disfrutar de</h3>
|
40 |
-
<p>El paso final es lanzar el juego y disfrutar jugando Dream League Soccer 2020. Puedes hacer esto tocando el botón Abrir en Google Play Store o tocando el icono del juego en la pantalla de inicio o en el cajón de la aplicación. El juego te pedirá que elijas el idioma, el nombre del equipo, el kit, el logotipo y la apariencia del gerente. También puedes iniciar sesión con tu cuenta de Google Play Games para guardar tu progreso y acceder a las funciones en línea. Después de eso, estás listo para empezar a construir tu equipo de ensueño y competir contra los mejores clubes de fútbol del mundo. </p>
|
41 |
-
<h2>Consejos y trucos para jugar Dream League Soccer 2020</h2>
|
42 |
-
<p>Dream League Soccer 2020 es un juego divertido y adictivo que te mantendrá entretenido durante horas. Sin embargo, también puede ser desafiante y frustrante a veces, especialmente si eres nuevo en el juego o quieres mejorar tus habilidades. Aquí hay algunos consejos y trucos que te ayudarán a jugar mejor y disfrutar más:</p>
|
43 |
-
<p></p>
|
44 |
-
<h3>Personaliza tu equipo y manager</h3>
|
45 |
-
<p>Una de las mejores cosas de Dream League Soccer 2020 es que puedes personalizar a tu equipo y manager de acuerdo a tus preferencias. Puedes cambiar el nombre del equipo, el kit, el logotipo y la apariencia del administrador en cualquier momento desde el menú Personalizar. También puedes importar tus propios kits y logotipos de fuentes externas o crear los tuyos usando el editor del juego. También puedes cambiar tu formación, tácticas y roles de jugador desde el menú Administración de equipos. Experimenta con diferentes combinaciones y encuentra lo que funciona mejor para ti. </p>
|
46 |
-
<h3>Mejora tu estadio e instalaciones</h3>
|
47 |
-
|
48 |
-
<h3>Entrena a tus jugadores y usa entrenadores</h3>
|
49 |
-
<p>Otra forma de mejorar el rendimiento de tu equipo es entrenando a tus jugadores y usando entrenadores. Puedes hacer esto desde el menú de entrenamiento, donde puedes ver los atributos de tus jugadores, potencial, forma, estado físico, moral, edad, estado del contrato, valor y salario. Puedes entrenar a tus jugadores individualmente o en grupo utilizando diferentes ejercicios que se centran en diferentes habilidades como tiro, pase, regate, defensa, rumbo, cruce, tiros libres, penaltis y esquinas. También puedes contratar entrenadores para aumentar los atributos de tus jugadores de forma permanente o temporal. Los entrenadores se pueden encontrar en el mercado de transferencia o en el menú de eventos. Tienen diferentes especialidades y niveles, y cuestan monedas o gemas para alquilar. Puedes usar hasta tres entrenadores a la vez, y caducarán después de un cierto número de partidos. </p>
|
50 |
-
<h3>Juega en línea y modos fuera de línea</h3>
|
51 |
-
<p>Dream League Soccer 2020 te ofrece una variedad de modos para jugar y desafiarte. Puedes jugar sin conexión en el modo Carrera, donde empiezas desde la división más baja e intentas llegar a la cima ganando partidos, copas y trofeos. También puedes jugar sin conexión en el modo Exhibición, donde puedes elegir dos equipos cualquiera y jugar un partido amistoso. También puedes jugar online en el modo Dream League Online, donde puedes competir contra otros jugadores de todo el mundo en diferentes divisiones y tablas de clasificación. También puedes jugar online en el modo Eventos, donde puedes participar en torneos especiales y desafíos que ofrecen recompensas exclusivas. </p>
|
52 |
-
<h2>Conclusión</h2>
|
53 |
-
|
54 |
-
<h2>Preguntas frecuentes</h2>
|
55 |
-
<p>Aquí hay algunas preguntas frecuentes sobre Dream League Soccer 2020:</p>
|
56 |
-
<ul>
|
57 |
-
<li><b>¿Cómo puedo obtener más monedas y gemas en Dream League Soccer 2020? </b></li>
|
58 |
-
<p>Puedes obtener más monedas y gemas en Dream League Soccer 2020 jugando partidos, ganando copas, completando logros, viendo anuncios, participando en eventos o comprándolos con dinero real. </p>
|
59 |
-
<li><b>¿Cómo puedo transferir jugadores en Dream League Soccer 2020? </b></li>
|
60 |
-
<p>Puedes transferir jugadores en Dream League Soccer 2020 yendo al menú Transfer Market, donde puedes ver los jugadores disponibles para la venta o préstamo. También puedes usar agentes y exploradores para encontrar jugadores o posiciones específicas. Puedes pujar por jugadores usando monedas o gemas, y también puedes vender o liberar a tus propios jugadores. </p>
|
61 |
-
<li><b>¿Cómo cambio el nivel de dificultad en Dream League Soccer 2020? </b></li>
|
62 |
-
<p>Puedes cambiar el nivel de dificultad en Dream League Soccer 2020 yendo al menú Configuración, donde puedes elegir entre Fácil, Medio, Duro o Personalizado. El nivel de dificultad afecta la IA de tus oponentes y de tus propios jugadores. </p>
|
63 |
-
<li><b>¿Cómo puedo guardar y restaurar mi progreso en Dream League Soccer 2020? </b></li>
|
64 |
-
<p>Puedes guardar y restaurar tu progreso en Dream League Soccer 2020 al iniciar sesión con tu cuenta de Google Play Games. Esto le permitirá sincronizar sus datos en varios dispositivos y acceder a las funciones en línea. También puede realizar copias de seguridad de sus datos manualmente utilizando la opción Cloud Save en el menú Configuración. </p>
|
65 |
-
<li><b>¿Cómo puedo contactar a los desarrolladores de Dream League Soccer 2020? </b></li>
|
66 |
-
<p>Puede ponerse en contacto con los desarrolladores de Dream League Soccer 2020 enviando un correo electrónico a [email protected] o visitando su sitio web en https:///www.ftgames.com/.</p>
|
67 |
-
</ul></p> 64aa2da5cf<br />
|
68 |
-
<br />
|
69 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Gratis Brawl Estrellas Apk.md
DELETED
@@ -1,69 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Descargar gratis Brawl Stars APK: Cómo jugar el mejor juego móvil Battle Royale</h1>
|
3 |
-
<p>Si estás buscando un juego móvil rápido, divertido y adictivo, deberías probar Brawl Stars. Brawl Stars es un juego multijugador de arena de batalla en línea (MOBA) desarrollado por Supercell, los creadores de Clash of Clans, Clash Royale y Boom Beach. En este juego, puedes hacer equipo con tus amigos o jugar solo en varios modos de juego, desbloquear y actualizar docenas de personajes únicos, y competir con jugadores de todo el mundo. En este artículo, le diremos todo lo que necesita saber sobre Brawl Stars, por qué debe descargar su archivo APK, y cómo hacerlo de forma segura y fácil. </p>
|
4 |
-
<h2>¿Qué es Brawl Stars? </h2>
|
5 |
-
<p>Brawl Stars es un juego móvil gratuito que se lanzó en 2018 para dispositivos Android e iOS. Ha sido descargado más de 100 millones de veces y ha recibido críticas positivas de críticos y jugadores por igual. Es uno de los juegos más populares y exitosos del género MOBA, que involucra a dos o más equipos de jugadores que luchan entre sí en una arena. </p>
|
6 |
-
<h2>descargar gratis brawl estrellas apk</h2><br /><p><b><b>Download File</b> ✓✓✓ <a href="https://bltlly.com/2v6Ldq">https://bltlly.com/2v6Ldq</a></b></p><br /><br />
|
7 |
-
<h3>Características de Brawl Stars</h3>
|
8 |
-
<p>Algunas de las características que hacen que Brawl Stars se destaque de otros juegos de MOBA son:</p>
|
9 |
-
<ul>
|
10 |
-
<li><b>Personajes variados y coloridos:</b> Puedes elegir entre más de 40 personajes diferentes, llamados Brawlers, cada uno con sus propias habilidades, personalidades y pieles. Puedes desbloquearlos jugando el juego, completando misiones o abriendo Brawl Boxes.</li>
|
11 |
-
<li><b>Mejoras de gran alcance:</b> Puede mejorar sus Brawlers nivelándolos, desbloqueando sus Super habilidades, Poderes estelares y Gadgets. Estas son habilidades especiales que pueden darte una ventaja en la batalla. </li>
|
12 |
-
<li><b>Mapas diseñados por jugadores:</b> Puedes jugar en una variedad de mapas creados por otros jugadores o por Supercell. Cada mapa tiene su propio tema, diseño y obstáculos que pueden afectar el juego. </li>
|
13 |
-
</ul>
|
14 |
-
<h3>Modos de juego de Brawl Stars</h3>
|
15 |
-
|
16 |
-
<ul>
|
17 |
-
<li><b>Gem Grab:</b> Un modo 3v3 donde tienes que recoger y mantener 10 gemas para ganar. Si mueres, dejas caer tus gemas y el equipo enemigo puede recogerlas. </li>
|
18 |
-
<li><b>Showdown:</b> Un modo solo o dúo donde tienes que sobrevivir el mayor tiempo posible en un mapa en contracción. Puede recoger power-ups para aumentar su fuerza y eliminar a sus oponentes. </li>
|
19 |
-
<li><b>Brawl Ball:</b> Un modo 3v3 donde tienes que marcar dos goles con una pelota de fútbol antes que el otro equipo. Puedes usar tus ataques para noquear al enemigo o pasarle la pelota a tus compañeros de equipo. </li>
|
20 |
-
<li><b>Bounty:</b> Un modo 3v3 donde tienes que matar tantos enemigos como sea posible para ganar estrellas. El equipo con más estrellas al final del partido gana. </li>
|
21 |
-
<li><b>Heist:</b> Un modo 3v3 donde tienes que proteger tu caja fuerte y tratar de destruir la caja fuerte del enemigo. Puedes usar diferentes estrategias y tácticas para romper sus defensas o defender las tuyas. </li>
|
22 |
-
<li><b>Eventos especiales:</b> Modos de tiempo limitado que ofrecen desafíos y recompensas únicos. Algunos ejemplos son Boss Fight, Robo Rumble, Big Game, etc.</li>
|
23 |
-
<li><b>Championship Challenge:</b> Un modo competitivo donde puedes calificar para la escena de esports de Brawl Stars. Tienes que ganar 15 partidos seguidos para avanzar a la siguiente etapa. </li>
|
24 |
-
</ul>
|
25 |
-
<h2>¿Por qué descargar Brawl Stars APK? </h2>
|
26 |
-
<p>Brawl Stars está disponible en Google Play Store y Apple App Store de forma gratuita. Sin embargo, algunos jugadores pueden preferir descargar el archivo APK del juego en su lugar. APK significa Android Package Kit, y es un formato de archivo que contiene todos los componentes necesarios para instalar una aplicación en un dispositivo Android. Hay varias razones por las que es posible que desee descargar Brawl Stars APK, tales como:</p>
|
27 |
-
<h3>Beneficios de descargar Brawl Stars APK</h3>
|
28 |
-
<p>Algunos de los beneficios de descargar Brawl Stars APK son:</p>
|
29 |
-
<ul>
|
30 |
-
|
31 |
-
<li><b>Eludir las restricciones regionales:</b> Algunos países o regiones pueden no tener acceso a Brawl Stars en las tiendas de aplicaciones debido a varias razones. Al descargar el archivo APK, puede evitar estas restricciones y jugar el juego desde cualquier parte del mundo. </li>
|
32 |
-
<li><b>Ahorre espacio de almacenamiento:</b> El archivo APK de Brawl Stars es generalmente más pequeño que la versión de la tienda de aplicaciones, lo que significa que ocupará menos espacio en su dispositivo. Esto puede ser útil si tiene una capacidad de almacenamiento limitada o desea ahorrar espacio para otras aplicaciones o archivos. </li>
|
33 |
-
</ul>
|
34 |
-
<h3>Los riesgos de descargar Brawl Stars APK</h3>
|
35 |
-
<p>Sin embargo, descargar Brawl Stars APK también viene con algunos riesgos que usted debe ser consciente de, tales como:</p>
|
36 |
-
<ul>
|
37 |
-
<li><b>Malware potencial:</b> No todas las fuentes de archivos APK son confiables o seguras. Algunos de ellos pueden contener virus, spyware u otro software malicioso que puede dañar su dispositivo o robar su información personal. Siempre debe descargar archivos APK de sitios web o plataformas de buena reputación y verificados. </li>
|
38 |
-
<li><b>Posibles prohibiciones:</b> Supercell tiene el derecho de prohibir o suspender cualquier cuenta que viole sus términos de servicio o utilice software o modificaciones no autorizadas. Si descarga Brawl Stars APK de una fuente no aprobada o utilizar cualquier hacks o trucos, es posible que el riesgo de perder su cuenta o hacer frente a otras consecuencias. </li>
|
39 |
-
<li><b>Problemas de incompatibilidad:</b> A veces, el archivo APK de Brawl Stars puede no ser compatible con su dispositivo o sistema operativo. Esto puede causar problemas como accidentes, fallos, errores o un rendimiento deficiente. Siempre debe comprobar la compatibilidad y los requisitos del archivo APK antes de descargarlo e instalarlo. </li>
|
40 |
-
</ul>
|
41 |
-
<h2>Cómo descargar e instalar Brawl Stars APK? </h2>
|
42 |
-
<p>Si ha decidido descargar Brawl Stars APK, es necesario seguir algunos pasos simples para hacerlo de forma segura y correcta. Estos son los pasos que debes seguir:</p>
|
43 |
-
<h3>Paso 1: Encontrar una fuente confiable para Brawl Stars APK</h3>
|
44 |
-
|
45 |
-
<h3>Paso 2: Habilitar fuentes desconocidas en el dispositivo</h3>
|
46 |
-
<p>El siguiente paso es habilitar fuentes desconocidas en su dispositivo. Esta es una configuración de seguridad que le permite instalar aplicaciones desde fuentes distintas de las tiendas de aplicaciones oficiales. Para habilitar fuentes desconocidas, debe ir a la configuración del dispositivo, luego a la seguridad o la privacidad, luego activar la opción que dice "permitir la instalación de aplicaciones de fuentes desconocidas" o algo similar. También es posible que necesite conceder permiso a su navegador o administrador de archivos para instalar aplicaciones de fuentes desconocidas. </p>
|
47 |
-
<h3>Paso 3: Descargar e instalar Brawl Stars APK</h3>
|
48 |
-
<p>El tercer paso es descargar e instalar Brawl Stars APK en su dispositivo. Para ello, es necesario ir a la página web o plataforma donde se encuentra el archivo APK y haga clic en el botón de descarga. Es posible que deba esperar unos segundos o minutos para completar la descarga. Una vez finalizada la descarga, debe abrir el archivo y seguir las instrucciones en la pantalla para instalarlo. Es posible que deba aceptar algunos términos y condiciones o permitir algunos permisos para la aplicación. </p>
|
49 |
-
<h3>Paso 4: Lanzar y disfrutar de las estrellas Brawl</h3>
|
50 |
-
<p>El paso final es lanzar y disfrutar de Brawl Stars en su dispositivo. Para ello, es necesario encontrar el icono de la aplicación en la pantalla de inicio o cajón de aplicaciones y toque en él. Es posible que necesite iniciar sesión con su ID de Supercell o crear uno nuevo si no tiene uno. También puedes vincular tu cuenta de Google Play o Game Center para guardar tu progreso y sincronizar tus datos entre dispositivos. Una vez que estés en el juego, puedes personalizar tu perfil, elegir tu Brawler y empezar a jugar en el modo de juego que quieras. También puedes unirte a un club, chatear con otros jugadores, ver repeticiones y participar en eventos y desafíos. </p>
|
51 |
-
<p></p>
|
52 |
-
<h2>Conclusión</h2>
|
53 |
-
|
54 |
-
<h2>Preguntas frecuentes</h2>
|
55 |
-
<p>Aquí hay algunas preguntas frecuentes sobre Brawl Stars APK:</p>
|
56 |
-
<ul>
|
57 |
-
<li><b>Q: ¿Es seguro Brawl Stars APK? </b></li>
|
58 |
-
<li><b>A: Brawl Stars APK es seguro si lo descarga desde una fuente confiable y verificada. Sin embargo, siempre debe escanear el archivo en busca de virus o malware antes de instalarlo en su dispositivo. </b></li>
|
59 |
-
<li><b>Q: ¿Está libre Brawl Stars APK? </b></li>
|
60 |
-
<li><b>A: Brawl Stars APK es gratis para descargar y jugar. Sin embargo, el juego tiene algunas compras en la aplicación que pueden mejorar su experiencia o acelerar su progreso. Puedes comprar gemas, monedas, pieles u ofertas especiales con dinero real. </b></li>
|
61 |
-
<li><b>Q: ¿Es Brawl Stars APK compatible con mi dispositivo? </b></li>
|
62 |
-
<li><b>A: Brawl Stars APK es compatible con la mayoría de los dispositivos Android que tienen Android 4.3 o superior. Sin embargo, algunos dispositivos pueden no ser compatibles con el juego o ejecutarlo sin problemas. Puede comprobar la compatibilidad y los requisitos del archivo APK antes de descargarlo e instalarlo. </b></li>
|
63 |
-
<li><b>Q: ¿Cómo actualizo Brawl Stars APK? </b></li>
|
64 |
-
<li><b>A: Para actualizar Brawl Stars APK, es necesario descargar la última versión del archivo APK de la misma fuente donde obtuvo la anterior. A continuación, debe desinstalar la versión anterior del juego e instalar la nueva. También puede buscar actualizaciones en la configuración del juego. </b></li>
|
65 |
-
<li><b>Q: ¿Cómo puedo desinstalar Brawl Stars APK? </b></li>
|
66 |
-
<li><b>A: Para desinstalar Brawl Stars APK, es necesario ir a la configuración de su dispositivo, luego aplicaciones o aplicaciones, a continuación, encontrar y seleccionar Brawl Stars. Luego, debe tocar en el botón de desinstalación y confirmar su acción. También puede desinstalar Brawl Stars presionando su icono en la pantalla de inicio o en el cajón de la aplicación y arrastrándolo a la papelera. </b></li>
|
67 |
-
</ul></p> 64aa2da5cf<br />
|
68 |
-
<br />
|
69 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/msgpack/exceptions.py
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
class UnpackException(Exception):
|
2 |
-
"""Base class for some exceptions raised while unpacking.
|
3 |
-
|
4 |
-
NOTE: unpack may raise exception other than subclass of
|
5 |
-
UnpackException. If you want to catch all error, catch
|
6 |
-
Exception instead.
|
7 |
-
"""
|
8 |
-
|
9 |
-
|
10 |
-
class BufferFull(UnpackException):
|
11 |
-
pass
|
12 |
-
|
13 |
-
|
14 |
-
class OutOfData(UnpackException):
|
15 |
-
pass
|
16 |
-
|
17 |
-
|
18 |
-
class FormatError(ValueError, UnpackException):
|
19 |
-
"""Invalid msgpack format"""
|
20 |
-
|
21 |
-
|
22 |
-
class StackError(ValueError, UnpackException):
|
23 |
-
"""Too nested"""
|
24 |
-
|
25 |
-
|
26 |
-
# Deprecated. Use ValueError instead
|
27 |
-
UnpackValueError = ValueError
|
28 |
-
|
29 |
-
|
30 |
-
class ExtraData(UnpackValueError):
|
31 |
-
"""ExtraData is raised when there is trailing data.
|
32 |
-
|
33 |
-
This exception is raised while only one-shot (not streaming)
|
34 |
-
unpack.
|
35 |
-
"""
|
36 |
-
|
37 |
-
def __init__(self, unpacked, extra):
|
38 |
-
self.unpacked = unpacked
|
39 |
-
self.extra = extra
|
40 |
-
|
41 |
-
def __str__(self):
|
42 |
-
return "unpack(b) received extra data."
|
43 |
-
|
44 |
-
|
45 |
-
# Deprecated. Use Exception instead to catch all exception during packing.
|
46 |
-
PackException = Exception
|
47 |
-
PackValueError = ValueError
|
48 |
-
PackOverflowError = OverflowError
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pyparsing/util.py
DELETED
@@ -1,235 +0,0 @@
|
|
1 |
-
# util.py
|
2 |
-
import warnings
|
3 |
-
import types
|
4 |
-
import collections
|
5 |
-
import itertools
|
6 |
-
from functools import lru_cache
|
7 |
-
from typing import List, Union, Iterable
|
8 |
-
|
9 |
-
_bslash = chr(92)
|
10 |
-
|
11 |
-
|
12 |
-
class __config_flags:
|
13 |
-
"""Internal class for defining compatibility and debugging flags"""
|
14 |
-
|
15 |
-
_all_names: List[str] = []
|
16 |
-
_fixed_names: List[str] = []
|
17 |
-
_type_desc = "configuration"
|
18 |
-
|
19 |
-
@classmethod
|
20 |
-
def _set(cls, dname, value):
|
21 |
-
if dname in cls._fixed_names:
|
22 |
-
warnings.warn(
|
23 |
-
"{}.{} {} is {} and cannot be overridden".format(
|
24 |
-
cls.__name__,
|
25 |
-
dname,
|
26 |
-
cls._type_desc,
|
27 |
-
str(getattr(cls, dname)).upper(),
|
28 |
-
)
|
29 |
-
)
|
30 |
-
return
|
31 |
-
if dname in cls._all_names:
|
32 |
-
setattr(cls, dname, value)
|
33 |
-
else:
|
34 |
-
raise ValueError("no such {} {!r}".format(cls._type_desc, dname))
|
35 |
-
|
36 |
-
enable = classmethod(lambda cls, name: cls._set(name, True))
|
37 |
-
disable = classmethod(lambda cls, name: cls._set(name, False))
|
38 |
-
|
39 |
-
|
40 |
-
@lru_cache(maxsize=128)
|
41 |
-
def col(loc: int, strg: str) -> int:
|
42 |
-
"""
|
43 |
-
Returns current column within a string, counting newlines as line separators.
|
44 |
-
The first column is number 1.
|
45 |
-
|
46 |
-
Note: the default parsing behavior is to expand tabs in the input string
|
47 |
-
before starting the parsing process. See
|
48 |
-
:class:`ParserElement.parseString` for more
|
49 |
-
information on parsing strings containing ``<TAB>`` s, and suggested
|
50 |
-
methods to maintain a consistent view of the parsed string, the parse
|
51 |
-
location, and line and column positions within the parsed string.
|
52 |
-
"""
|
53 |
-
s = strg
|
54 |
-
return 1 if 0 < loc < len(s) and s[loc - 1] == "\n" else loc - s.rfind("\n", 0, loc)
|
55 |
-
|
56 |
-
|
57 |
-
@lru_cache(maxsize=128)
|
58 |
-
def lineno(loc: int, strg: str) -> int:
|
59 |
-
"""Returns current line number within a string, counting newlines as line separators.
|
60 |
-
The first line is number 1.
|
61 |
-
|
62 |
-
Note - the default parsing behavior is to expand tabs in the input string
|
63 |
-
before starting the parsing process. See :class:`ParserElement.parseString`
|
64 |
-
for more information on parsing strings containing ``<TAB>`` s, and
|
65 |
-
suggested methods to maintain a consistent view of the parsed string, the
|
66 |
-
parse location, and line and column positions within the parsed string.
|
67 |
-
"""
|
68 |
-
return strg.count("\n", 0, loc) + 1
|
69 |
-
|
70 |
-
|
71 |
-
@lru_cache(maxsize=128)
|
72 |
-
def line(loc: int, strg: str) -> str:
|
73 |
-
"""
|
74 |
-
Returns the line of text containing loc within a string, counting newlines as line separators.
|
75 |
-
"""
|
76 |
-
last_cr = strg.rfind("\n", 0, loc)
|
77 |
-
next_cr = strg.find("\n", loc)
|
78 |
-
return strg[last_cr + 1 : next_cr] if next_cr >= 0 else strg[last_cr + 1 :]
|
79 |
-
|
80 |
-
|
81 |
-
class _UnboundedCache:
|
82 |
-
def __init__(self):
|
83 |
-
cache = {}
|
84 |
-
cache_get = cache.get
|
85 |
-
self.not_in_cache = not_in_cache = object()
|
86 |
-
|
87 |
-
def get(_, key):
|
88 |
-
return cache_get(key, not_in_cache)
|
89 |
-
|
90 |
-
def set_(_, key, value):
|
91 |
-
cache[key] = value
|
92 |
-
|
93 |
-
def clear(_):
|
94 |
-
cache.clear()
|
95 |
-
|
96 |
-
self.size = None
|
97 |
-
self.get = types.MethodType(get, self)
|
98 |
-
self.set = types.MethodType(set_, self)
|
99 |
-
self.clear = types.MethodType(clear, self)
|
100 |
-
|
101 |
-
|
102 |
-
class _FifoCache:
|
103 |
-
def __init__(self, size):
|
104 |
-
self.not_in_cache = not_in_cache = object()
|
105 |
-
cache = collections.OrderedDict()
|
106 |
-
cache_get = cache.get
|
107 |
-
|
108 |
-
def get(_, key):
|
109 |
-
return cache_get(key, not_in_cache)
|
110 |
-
|
111 |
-
def set_(_, key, value):
|
112 |
-
cache[key] = value
|
113 |
-
while len(cache) > size:
|
114 |
-
cache.popitem(last=False)
|
115 |
-
|
116 |
-
def clear(_):
|
117 |
-
cache.clear()
|
118 |
-
|
119 |
-
self.size = size
|
120 |
-
self.get = types.MethodType(get, self)
|
121 |
-
self.set = types.MethodType(set_, self)
|
122 |
-
self.clear = types.MethodType(clear, self)
|
123 |
-
|
124 |
-
|
125 |
-
class LRUMemo:
|
126 |
-
"""
|
127 |
-
A memoizing mapping that retains `capacity` deleted items
|
128 |
-
|
129 |
-
The memo tracks retained items by their access order; once `capacity` items
|
130 |
-
are retained, the least recently used item is discarded.
|
131 |
-
"""
|
132 |
-
|
133 |
-
def __init__(self, capacity):
|
134 |
-
self._capacity = capacity
|
135 |
-
self._active = {}
|
136 |
-
self._memory = collections.OrderedDict()
|
137 |
-
|
138 |
-
def __getitem__(self, key):
|
139 |
-
try:
|
140 |
-
return self._active[key]
|
141 |
-
except KeyError:
|
142 |
-
self._memory.move_to_end(key)
|
143 |
-
return self._memory[key]
|
144 |
-
|
145 |
-
def __setitem__(self, key, value):
|
146 |
-
self._memory.pop(key, None)
|
147 |
-
self._active[key] = value
|
148 |
-
|
149 |
-
def __delitem__(self, key):
|
150 |
-
try:
|
151 |
-
value = self._active.pop(key)
|
152 |
-
except KeyError:
|
153 |
-
pass
|
154 |
-
else:
|
155 |
-
while len(self._memory) >= self._capacity:
|
156 |
-
self._memory.popitem(last=False)
|
157 |
-
self._memory[key] = value
|
158 |
-
|
159 |
-
def clear(self):
|
160 |
-
self._active.clear()
|
161 |
-
self._memory.clear()
|
162 |
-
|
163 |
-
|
164 |
-
class UnboundedMemo(dict):
|
165 |
-
"""
|
166 |
-
A memoizing mapping that retains all deleted items
|
167 |
-
"""
|
168 |
-
|
169 |
-
def __delitem__(self, key):
|
170 |
-
pass
|
171 |
-
|
172 |
-
|
173 |
-
def _escape_regex_range_chars(s: str) -> str:
|
174 |
-
# escape these chars: ^-[]
|
175 |
-
for c in r"\^-[]":
|
176 |
-
s = s.replace(c, _bslash + c)
|
177 |
-
s = s.replace("\n", r"\n")
|
178 |
-
s = s.replace("\t", r"\t")
|
179 |
-
return str(s)
|
180 |
-
|
181 |
-
|
182 |
-
def _collapse_string_to_ranges(
|
183 |
-
s: Union[str, Iterable[str]], re_escape: bool = True
|
184 |
-
) -> str:
|
185 |
-
def is_consecutive(c):
|
186 |
-
c_int = ord(c)
|
187 |
-
is_consecutive.prev, prev = c_int, is_consecutive.prev
|
188 |
-
if c_int - prev > 1:
|
189 |
-
is_consecutive.value = next(is_consecutive.counter)
|
190 |
-
return is_consecutive.value
|
191 |
-
|
192 |
-
is_consecutive.prev = 0
|
193 |
-
is_consecutive.counter = itertools.count()
|
194 |
-
is_consecutive.value = -1
|
195 |
-
|
196 |
-
def escape_re_range_char(c):
|
197 |
-
return "\\" + c if c in r"\^-][" else c
|
198 |
-
|
199 |
-
def no_escape_re_range_char(c):
|
200 |
-
return c
|
201 |
-
|
202 |
-
if not re_escape:
|
203 |
-
escape_re_range_char = no_escape_re_range_char
|
204 |
-
|
205 |
-
ret = []
|
206 |
-
s = "".join(sorted(set(s)))
|
207 |
-
if len(s) > 3:
|
208 |
-
for _, chars in itertools.groupby(s, key=is_consecutive):
|
209 |
-
first = last = next(chars)
|
210 |
-
last = collections.deque(
|
211 |
-
itertools.chain(iter([last]), chars), maxlen=1
|
212 |
-
).pop()
|
213 |
-
if first == last:
|
214 |
-
ret.append(escape_re_range_char(first))
|
215 |
-
else:
|
216 |
-
sep = "" if ord(last) == ord(first) + 1 else "-"
|
217 |
-
ret.append(
|
218 |
-
"{}{}{}".format(
|
219 |
-
escape_re_range_char(first), sep, escape_re_range_char(last)
|
220 |
-
)
|
221 |
-
)
|
222 |
-
else:
|
223 |
-
ret = [escape_re_range_char(c) for c in s]
|
224 |
-
|
225 |
-
return "".join(ret)
|
226 |
-
|
227 |
-
|
228 |
-
def _flatten(ll: list) -> list:
|
229 |
-
ret = []
|
230 |
-
for i in ll:
|
231 |
-
if isinstance(i, list):
|
232 |
-
ret.extend(_flatten(i))
|
233 |
-
else:
|
234 |
-
ret.append(i)
|
235 |
-
return ret
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVH-vn1210/make_hair/minigpt4/datasets/builders/image_text_pair_builder.py
DELETED
@@ -1,86 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Copyright (c) 2022, salesforce.com, inc.
|
3 |
-
All rights reserved.
|
4 |
-
SPDX-License-Identifier: BSD-3-Clause
|
5 |
-
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
6 |
-
"""
|
7 |
-
|
8 |
-
import os
|
9 |
-
|
10 |
-
from minigpt4.common.registry import registry
|
11 |
-
from minigpt4.datasets.builders.base_dataset_builder import BaseDatasetBuilder
|
12 |
-
from minigpt4.datasets.datasets.laion_dataset import LaionDataset
|
13 |
-
from minigpt4.datasets.datasets.cc_combine_dataset import CCCombineDataset, CCAlignDataset
|
14 |
-
|
15 |
-
|
16 |
-
@registry.register_builder("cc_combine")
|
17 |
-
class CCCombineBuilder(BaseDatasetBuilder):
|
18 |
-
train_dataset_cls = CCCombineDataset
|
19 |
-
|
20 |
-
DATASET_CONFIG_DICT = {"default": "configs/datasets/cc_combine/defaults.yaml"}
|
21 |
-
|
22 |
-
def _download_ann(self):
|
23 |
-
pass
|
24 |
-
|
25 |
-
def _download_vis(self):
|
26 |
-
pass
|
27 |
-
|
28 |
-
def build(self):
|
29 |
-
self.build_processors()
|
30 |
-
|
31 |
-
build_info = self.config.build_info
|
32 |
-
|
33 |
-
datasets = dict()
|
34 |
-
split = "train"
|
35 |
-
|
36 |
-
# create datasets
|
37 |
-
# [NOTE] return inner_datasets (wds.DataPipeline)
|
38 |
-
dataset_cls = self.train_dataset_cls
|
39 |
-
datasets[split] = dataset_cls(
|
40 |
-
vis_processor=self.vis_processors[split],
|
41 |
-
text_processor=self.text_processors[split],
|
42 |
-
location=build_info.storage,
|
43 |
-
).inner_dataset
|
44 |
-
|
45 |
-
return datasets
|
46 |
-
|
47 |
-
|
48 |
-
@registry.register_builder("laion")
|
49 |
-
class LaionBuilder(BaseDatasetBuilder):
|
50 |
-
train_dataset_cls = LaionDataset
|
51 |
-
|
52 |
-
DATASET_CONFIG_DICT = {"default": "configs/datasets/laion/defaults.yaml"}
|
53 |
-
|
54 |
-
def _download_ann(self):
|
55 |
-
pass
|
56 |
-
|
57 |
-
def _download_vis(self):
|
58 |
-
pass
|
59 |
-
|
60 |
-
def build(self):
|
61 |
-
self.build_processors()
|
62 |
-
|
63 |
-
build_info = self.config.build_info
|
64 |
-
|
65 |
-
datasets = dict()
|
66 |
-
split = "train"
|
67 |
-
|
68 |
-
# create datasets
|
69 |
-
# [NOTE] return inner_datasets (wds.DataPipeline)
|
70 |
-
dataset_cls = self.train_dataset_cls
|
71 |
-
datasets[split] = dataset_cls(
|
72 |
-
vis_processor=self.vis_processors[split],
|
73 |
-
text_processor=self.text_processors[split],
|
74 |
-
location=build_info.storage,
|
75 |
-
).inner_dataset
|
76 |
-
|
77 |
-
return datasets
|
78 |
-
|
79 |
-
|
80 |
-
@registry.register_builder("cc_align")
|
81 |
-
class CCAlignBuilder(BaseDatasetBuilder):
|
82 |
-
train_dataset_cls = CCAlignDataset
|
83 |
-
|
84 |
-
DATASET_CONFIG_DICT = {
|
85 |
-
"default": "configs/datasets/cc_combine/align.yaml",
|
86 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVH-vn1210/make_hair/minigpt4/processors/base_processor.py
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Copyright (c) 2022, salesforce.com, inc.
|
3 |
-
All rights reserved.
|
4 |
-
SPDX-License-Identifier: BSD-3-Clause
|
5 |
-
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
6 |
-
"""
|
7 |
-
|
8 |
-
from omegaconf import OmegaConf
|
9 |
-
|
10 |
-
|
11 |
-
class BaseProcessor:
|
12 |
-
def __init__(self):
|
13 |
-
self.transform = lambda x: x
|
14 |
-
return
|
15 |
-
|
16 |
-
def __call__(self, item):
|
17 |
-
return self.transform(item)
|
18 |
-
|
19 |
-
@classmethod
|
20 |
-
def from_config(cls, cfg=None):
|
21 |
-
return cls()
|
22 |
-
|
23 |
-
def build(self, **kwargs):
|
24 |
-
cfg = OmegaConf.create(kwargs)
|
25 |
-
|
26 |
-
return self.from_config(cfg)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/detail/type_traits/function_traits.h
DELETED
@@ -1,96 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/type_traits.h>
|
20 |
-
#include <thrust/detail/type_traits/has_nested_type.h>
|
21 |
-
|
22 |
-
namespace thrust
|
23 |
-
{
|
24 |
-
|
25 |
-
// forward definitions for is_commutative
|
26 |
-
template <typename T> struct plus;
|
27 |
-
template <typename T> struct multiplies;
|
28 |
-
template <typename T> struct minimum;
|
29 |
-
template <typename T> struct maximum;
|
30 |
-
template <typename T> struct logical_or;
|
31 |
-
template <typename T> struct logical_and;
|
32 |
-
template <typename T> struct bit_or;
|
33 |
-
template <typename T> struct bit_and;
|
34 |
-
template <typename T> struct bit_xor;
|
35 |
-
|
36 |
-
namespace detail
|
37 |
-
{
|
38 |
-
|
39 |
-
|
40 |
-
// some metafunctions which check for the nested types of the adaptable functions
|
41 |
-
|
42 |
-
__THRUST_DEFINE_HAS_NESTED_TYPE(has_result_type, result_type)
|
43 |
-
|
44 |
-
__THRUST_DEFINE_HAS_NESTED_TYPE(has_argument_type, argument_type)
|
45 |
-
|
46 |
-
__THRUST_DEFINE_HAS_NESTED_TYPE(has_first_argument_type, first_argument_type)
|
47 |
-
|
48 |
-
__THRUST_DEFINE_HAS_NESTED_TYPE(has_second_argument_type, second_argument_type)
|
49 |
-
|
50 |
-
|
51 |
-
template<typename AdaptableBinaryFunction>
|
52 |
-
struct result_type
|
53 |
-
{
|
54 |
-
typedef typename AdaptableBinaryFunction::result_type type;
|
55 |
-
};
|
56 |
-
|
57 |
-
|
58 |
-
template<typename T>
|
59 |
-
struct is_adaptable_unary_function
|
60 |
-
: thrust::detail::and_<
|
61 |
-
has_result_type<T>,
|
62 |
-
has_argument_type<T>
|
63 |
-
>
|
64 |
-
{};
|
65 |
-
|
66 |
-
|
67 |
-
template<typename T>
|
68 |
-
struct is_adaptable_binary_function
|
69 |
-
: thrust::detail::and_<
|
70 |
-
has_result_type<T>,
|
71 |
-
thrust::detail::and_<
|
72 |
-
has_first_argument_type<T>,
|
73 |
-
has_second_argument_type<T>
|
74 |
-
>
|
75 |
-
>
|
76 |
-
{};
|
77 |
-
|
78 |
-
|
79 |
-
template<typename BinaryFunction>
|
80 |
-
struct is_commutative
|
81 |
-
: public thrust::detail::false_type
|
82 |
-
{};
|
83 |
-
|
84 |
-
template<typename T> struct is_commutative< typename thrust::plus<T> > : public thrust::detail::is_arithmetic<T> {};
|
85 |
-
template<typename T> struct is_commutative< typename thrust::multiplies<T> > : public thrust::detail::is_arithmetic<T> {};
|
86 |
-
template<typename T> struct is_commutative< typename thrust::minimum<T> > : public thrust::detail::is_arithmetic<T> {};
|
87 |
-
template<typename T> struct is_commutative< typename thrust::maximum<T> > : public thrust::detail::is_arithmetic<T> {};
|
88 |
-
template<typename T> struct is_commutative< typename thrust::logical_or<T> > : public thrust::detail::is_arithmetic<T> {};
|
89 |
-
template<typename T> struct is_commutative< typename thrust::logical_and<T> > : public thrust::detail::is_arithmetic<T> {};
|
90 |
-
template<typename T> struct is_commutative< typename thrust::bit_or<T> > : public thrust::detail::is_arithmetic<T> {};
|
91 |
-
template<typename T> struct is_commutative< typename thrust::bit_and<T> > : public thrust::detail::is_arithmetic<T> {};
|
92 |
-
template<typename T> struct is_commutative< typename thrust::bit_xor<T> > : public thrust::detail::is_arithmetic<T> {};
|
93 |
-
|
94 |
-
} // end namespace detail
|
95 |
-
} // end namespace thrust
|
96 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/set_operations.h
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// this system inherits the set operations
|
22 |
-
#include <thrust/system/detail/sequential/set_operations.h>
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: RegionCLIP Zero-Shot Object Detection Demo
|
3 |
-
emoji: 🏢
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.0.20
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CamCam17/Alexwww-davide-comic-book-characters/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/Alexwww/davide-comic-book-characters").launch()
|
|
|
|
|
|
|
|
spaces/ChandraMohanNayal/AutoGPT/autogpt/logs.py
DELETED
@@ -1,332 +0,0 @@
|
|
1 |
-
"""Logging module for Auto-GPT."""
|
2 |
-
import json
|
3 |
-
import logging
|
4 |
-
import os
|
5 |
-
import random
|
6 |
-
import re
|
7 |
-
import time
|
8 |
-
import traceback
|
9 |
-
from logging import LogRecord
|
10 |
-
|
11 |
-
from colorama import Fore, Style
|
12 |
-
|
13 |
-
from autogpt.config import Config, Singleton
|
14 |
-
from autogpt.speech import say_text
|
15 |
-
|
16 |
-
CFG = Config()
|
17 |
-
|
18 |
-
|
19 |
-
class Logger(metaclass=Singleton):
|
20 |
-
"""
|
21 |
-
Logger that handle titles in different colors.
|
22 |
-
Outputs logs in console, activity.log, and errors.log
|
23 |
-
For console handler: simulates typing
|
24 |
-
"""
|
25 |
-
|
26 |
-
def __init__(self):
|
27 |
-
# create log directory if it doesn't exist
|
28 |
-
this_files_dir_path = os.path.dirname(__file__)
|
29 |
-
log_dir = os.path.join(this_files_dir_path, "../logs")
|
30 |
-
if not os.path.exists(log_dir):
|
31 |
-
os.makedirs(log_dir)
|
32 |
-
|
33 |
-
log_file = "activity.log"
|
34 |
-
error_file = "error.log"
|
35 |
-
|
36 |
-
console_formatter = AutoGptFormatter("%(title_color)s %(message)s")
|
37 |
-
|
38 |
-
# Create a handler for console which simulate typing
|
39 |
-
self.typing_console_handler = TypingConsoleHandler()
|
40 |
-
self.typing_console_handler.setLevel(logging.INFO)
|
41 |
-
self.typing_console_handler.setFormatter(console_formatter)
|
42 |
-
|
43 |
-
# Create a handler for console without typing simulation
|
44 |
-
self.console_handler = ConsoleHandler()
|
45 |
-
self.console_handler.setLevel(logging.DEBUG)
|
46 |
-
self.console_handler.setFormatter(console_formatter)
|
47 |
-
|
48 |
-
# Info handler in activity.log
|
49 |
-
self.file_handler = logging.FileHandler(
|
50 |
-
os.path.join(log_dir, log_file), "a", "utf-8"
|
51 |
-
)
|
52 |
-
self.file_handler.setLevel(logging.DEBUG)
|
53 |
-
info_formatter = AutoGptFormatter(
|
54 |
-
"%(asctime)s %(levelname)s %(title)s %(message_no_color)s"
|
55 |
-
)
|
56 |
-
self.file_handler.setFormatter(info_formatter)
|
57 |
-
|
58 |
-
# Error handler error.log
|
59 |
-
error_handler = logging.FileHandler(
|
60 |
-
os.path.join(log_dir, error_file), "a", "utf-8"
|
61 |
-
)
|
62 |
-
error_handler.setLevel(logging.ERROR)
|
63 |
-
error_formatter = AutoGptFormatter(
|
64 |
-
"%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s"
|
65 |
-
" %(message_no_color)s"
|
66 |
-
)
|
67 |
-
error_handler.setFormatter(error_formatter)
|
68 |
-
|
69 |
-
self.typing_logger = logging.getLogger("TYPER")
|
70 |
-
self.typing_logger.addHandler(self.typing_console_handler)
|
71 |
-
self.typing_logger.addHandler(self.file_handler)
|
72 |
-
self.typing_logger.addHandler(error_handler)
|
73 |
-
self.typing_logger.setLevel(logging.DEBUG)
|
74 |
-
|
75 |
-
self.logger = logging.getLogger("LOGGER")
|
76 |
-
self.logger.addHandler(self.console_handler)
|
77 |
-
self.logger.addHandler(self.file_handler)
|
78 |
-
self.logger.addHandler(error_handler)
|
79 |
-
self.logger.setLevel(logging.DEBUG)
|
80 |
-
|
81 |
-
def typewriter_log(
|
82 |
-
self, title="", title_color="", content="", speak_text=False, level=logging.INFO
|
83 |
-
):
|
84 |
-
if speak_text and CFG.speak_mode:
|
85 |
-
say_text(f"{title}. {content}")
|
86 |
-
|
87 |
-
if content:
|
88 |
-
if isinstance(content, list):
|
89 |
-
content = " ".join(content)
|
90 |
-
else:
|
91 |
-
content = ""
|
92 |
-
|
93 |
-
self.typing_logger.log(
|
94 |
-
level, content, extra={"title": title, "color": title_color}
|
95 |
-
)
|
96 |
-
|
97 |
-
def debug(
|
98 |
-
self,
|
99 |
-
message,
|
100 |
-
title="",
|
101 |
-
title_color="",
|
102 |
-
):
|
103 |
-
self._log(title, title_color, message, logging.DEBUG)
|
104 |
-
|
105 |
-
def warn(
|
106 |
-
self,
|
107 |
-
message,
|
108 |
-
title="",
|
109 |
-
title_color="",
|
110 |
-
):
|
111 |
-
self._log(title, title_color, message, logging.WARN)
|
112 |
-
|
113 |
-
def error(self, title, message=""):
|
114 |
-
self._log(title, Fore.RED, message, logging.ERROR)
|
115 |
-
|
116 |
-
def _log(self, title="", title_color="", message="", level=logging.INFO):
|
117 |
-
if message:
|
118 |
-
if isinstance(message, list):
|
119 |
-
message = " ".join(message)
|
120 |
-
self.logger.log(level, message, extra={"title": title, "color": title_color})
|
121 |
-
|
122 |
-
def set_level(self, level):
|
123 |
-
self.logger.setLevel(level)
|
124 |
-
self.typing_logger.setLevel(level)
|
125 |
-
|
126 |
-
def double_check(self, additionalText=None):
|
127 |
-
if not additionalText:
|
128 |
-
additionalText = (
|
129 |
-
"Please ensure you've setup and configured everything"
|
130 |
-
" correctly. Read https://github.com/Torantulino/Auto-GPT#readme to "
|
131 |
-
"double check. You can also create a github issue or join the discord"
|
132 |
-
" and ask there!"
|
133 |
-
)
|
134 |
-
|
135 |
-
self.typewriter_log("DOUBLE CHECK CONFIGURATION", Fore.YELLOW, additionalText)
|
136 |
-
|
137 |
-
|
138 |
-
"""
|
139 |
-
Output stream to console using simulated typing
|
140 |
-
"""
|
141 |
-
|
142 |
-
|
143 |
-
class TypingConsoleHandler(logging.StreamHandler):
|
144 |
-
def emit(self, record):
|
145 |
-
min_typing_speed = 0.05
|
146 |
-
max_typing_speed = 0.01
|
147 |
-
|
148 |
-
msg = self.format(record)
|
149 |
-
try:
|
150 |
-
words = msg.split()
|
151 |
-
for i, word in enumerate(words):
|
152 |
-
print(word, end="", flush=True)
|
153 |
-
if i < len(words) - 1:
|
154 |
-
print(" ", end="", flush=True)
|
155 |
-
typing_speed = random.uniform(min_typing_speed, max_typing_speed)
|
156 |
-
time.sleep(typing_speed)
|
157 |
-
# type faster after each word
|
158 |
-
min_typing_speed = min_typing_speed * 0.95
|
159 |
-
max_typing_speed = max_typing_speed * 0.95
|
160 |
-
print()
|
161 |
-
except Exception:
|
162 |
-
self.handleError(record)
|
163 |
-
|
164 |
-
|
165 |
-
class ConsoleHandler(logging.StreamHandler):
|
166 |
-
def emit(self, record) -> None:
|
167 |
-
msg = self.format(record)
|
168 |
-
try:
|
169 |
-
print(msg)
|
170 |
-
except Exception:
|
171 |
-
self.handleError(record)
|
172 |
-
|
173 |
-
|
174 |
-
class AutoGptFormatter(logging.Formatter):
|
175 |
-
"""
|
176 |
-
Allows to handle custom placeholders 'title_color' and 'message_no_color'.
|
177 |
-
To use this formatter, make sure to pass 'color', 'title' as log extras.
|
178 |
-
"""
|
179 |
-
|
180 |
-
def format(self, record: LogRecord) -> str:
|
181 |
-
if hasattr(record, "color"):
|
182 |
-
record.title_color = (
|
183 |
-
getattr(record, "color")
|
184 |
-
+ getattr(record, "title")
|
185 |
-
+ " "
|
186 |
-
+ Style.RESET_ALL
|
187 |
-
)
|
188 |
-
else:
|
189 |
-
record.title_color = getattr(record, "title")
|
190 |
-
if hasattr(record, "msg"):
|
191 |
-
record.message_no_color = remove_color_codes(getattr(record, "msg"))
|
192 |
-
else:
|
193 |
-
record.message_no_color = ""
|
194 |
-
return super().format(record)
|
195 |
-
|
196 |
-
|
197 |
-
def remove_color_codes(s: str) -> str:
|
198 |
-
ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
|
199 |
-
return ansi_escape.sub("", s)
|
200 |
-
|
201 |
-
|
202 |
-
logger = Logger()
|
203 |
-
|
204 |
-
|
205 |
-
def print_assistant_thoughts(ai_name, assistant_reply):
|
206 |
-
"""Prints the assistant's thoughts to the console"""
|
207 |
-
from autogpt.json_utils.json_fix_llm import (
|
208 |
-
attempt_to_fix_json_by_finding_outermost_brackets,
|
209 |
-
fix_and_parse_json,
|
210 |
-
)
|
211 |
-
|
212 |
-
try:
|
213 |
-
try:
|
214 |
-
# Parse and print Assistant response
|
215 |
-
assistant_reply_json = fix_and_parse_json(assistant_reply)
|
216 |
-
except json.JSONDecodeError:
|
217 |
-
logger.error("Error: Invalid JSON in assistant thoughts\n", assistant_reply)
|
218 |
-
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(
|
219 |
-
assistant_reply
|
220 |
-
)
|
221 |
-
if isinstance(assistant_reply_json, str):
|
222 |
-
assistant_reply_json = fix_and_parse_json(assistant_reply_json)
|
223 |
-
|
224 |
-
# Check if assistant_reply_json is a string and attempt to parse
|
225 |
-
# it into a JSON object
|
226 |
-
if isinstance(assistant_reply_json, str):
|
227 |
-
try:
|
228 |
-
assistant_reply_json = json.loads(assistant_reply_json)
|
229 |
-
except json.JSONDecodeError:
|
230 |
-
logger.error("Error: Invalid JSON\n", assistant_reply)
|
231 |
-
assistant_reply_json = (
|
232 |
-
attempt_to_fix_json_by_finding_outermost_brackets(
|
233 |
-
assistant_reply_json
|
234 |
-
)
|
235 |
-
)
|
236 |
-
|
237 |
-
assistant_thoughts_reasoning = None
|
238 |
-
assistant_thoughts_plan = None
|
239 |
-
assistant_thoughts_speak = None
|
240 |
-
assistant_thoughts_criticism = None
|
241 |
-
if not isinstance(assistant_reply_json, dict):
|
242 |
-
assistant_reply_json = {}
|
243 |
-
assistant_thoughts = assistant_reply_json.get("thoughts", {})
|
244 |
-
assistant_thoughts_text = assistant_thoughts.get("text")
|
245 |
-
|
246 |
-
if assistant_thoughts:
|
247 |
-
assistant_thoughts_reasoning = assistant_thoughts.get("reasoning")
|
248 |
-
assistant_thoughts_plan = assistant_thoughts.get("plan")
|
249 |
-
assistant_thoughts_criticism = assistant_thoughts.get("criticism")
|
250 |
-
assistant_thoughts_speak = assistant_thoughts.get("speak")
|
251 |
-
|
252 |
-
logger.typewriter_log(
|
253 |
-
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}"
|
254 |
-
)
|
255 |
-
logger.typewriter_log(
|
256 |
-
"REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}"
|
257 |
-
)
|
258 |
-
|
259 |
-
if assistant_thoughts_plan:
|
260 |
-
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
|
261 |
-
# If it's a list, join it into a string
|
262 |
-
if isinstance(assistant_thoughts_plan, list):
|
263 |
-
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
|
264 |
-
elif isinstance(assistant_thoughts_plan, dict):
|
265 |
-
assistant_thoughts_plan = str(assistant_thoughts_plan)
|
266 |
-
|
267 |
-
# Split the input_string using the newline character and dashes
|
268 |
-
lines = assistant_thoughts_plan.split("\n")
|
269 |
-
for line in lines:
|
270 |
-
line = line.lstrip("- ")
|
271 |
-
logger.typewriter_log("- ", Fore.GREEN, line.strip())
|
272 |
-
|
273 |
-
logger.typewriter_log(
|
274 |
-
"CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}"
|
275 |
-
)
|
276 |
-
# Speak the assistant's thoughts
|
277 |
-
if CFG.speak_mode and assistant_thoughts_speak:
|
278 |
-
say_text(assistant_thoughts_speak)
|
279 |
-
else:
|
280 |
-
logger.typewriter_log("SPEAK:", Fore.YELLOW, f"{assistant_thoughts_speak}")
|
281 |
-
|
282 |
-
return assistant_reply_json
|
283 |
-
except json.decoder.JSONDecodeError:
|
284 |
-
logger.error("Error: Invalid JSON\n", assistant_reply)
|
285 |
-
if CFG.speak_mode:
|
286 |
-
say_text(
|
287 |
-
"I have received an invalid JSON response from the OpenAI API."
|
288 |
-
" I cannot ignore this response."
|
289 |
-
)
|
290 |
-
|
291 |
-
# All other errors, return "Error: + error message"
|
292 |
-
except Exception:
|
293 |
-
call_stack = traceback.format_exc()
|
294 |
-
logger.error("Error: \n", call_stack)
|
295 |
-
|
296 |
-
|
297 |
-
def print_assistant_thoughts(
|
298 |
-
ai_name: object, assistant_reply_json_valid: object
|
299 |
-
) -> None:
|
300 |
-
assistant_thoughts_reasoning = None
|
301 |
-
assistant_thoughts_plan = None
|
302 |
-
assistant_thoughts_speak = None
|
303 |
-
assistant_thoughts_criticism = None
|
304 |
-
|
305 |
-
assistant_thoughts = assistant_reply_json_valid.get("thoughts", {})
|
306 |
-
assistant_thoughts_text = assistant_thoughts.get("text")
|
307 |
-
if assistant_thoughts:
|
308 |
-
assistant_thoughts_reasoning = assistant_thoughts.get("reasoning")
|
309 |
-
assistant_thoughts_plan = assistant_thoughts.get("plan")
|
310 |
-
assistant_thoughts_criticism = assistant_thoughts.get("criticism")
|
311 |
-
assistant_thoughts_speak = assistant_thoughts.get("speak")
|
312 |
-
logger.typewriter_log(
|
313 |
-
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}"
|
314 |
-
)
|
315 |
-
logger.typewriter_log("REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}")
|
316 |
-
if assistant_thoughts_plan:
|
317 |
-
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
|
318 |
-
# If it's a list, join it into a string
|
319 |
-
if isinstance(assistant_thoughts_plan, list):
|
320 |
-
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
|
321 |
-
elif isinstance(assistant_thoughts_plan, dict):
|
322 |
-
assistant_thoughts_plan = str(assistant_thoughts_plan)
|
323 |
-
|
324 |
-
# Split the input_string using the newline character and dashes
|
325 |
-
lines = assistant_thoughts_plan.split("\n")
|
326 |
-
for line in lines:
|
327 |
-
line = line.lstrip("- ")
|
328 |
-
logger.typewriter_log("- ", Fore.GREEN, line.strip())
|
329 |
-
logger.typewriter_log("CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}")
|
330 |
-
# Speak the assistant's thoughts
|
331 |
-
if CFG.speak_mode and assistant_thoughts_speak:
|
332 |
-
say_text(assistant_thoughts_speak)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|