Commit
·
25d1f2b
1
Parent(s):
96931f1
Update parquet files (step 115 of 121)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/101-5/Bing-New/README.md +0 -12
- spaces/101-5/gpt4free/g4f/Provider/Providers/ChatgptAi.py +0 -51
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crack High Qualitya Movie Download.md +0 -16
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Godswar Auto Race What You Need to Know About the Hack auto repu farm.md +0 -151
- spaces/1gistliPinn/ChatGPT4/Examples/Aams Auto Audio Mastering System Keygen Crack WORK.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Chris Sawyers Locomotion Repack 2012 RELOADED.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Eobd Facile Version Complete Crack 691.md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Brawl Stars 49.181 APK Update Bling Stats and New Brawlers.md +0 -161
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download AAires - Willis (Mixtape) and Experience the Music of a Rising Star.md +0 -94
- spaces/1phancelerku/anime-remove-background/Abbree AR-730 Programming Software How to Set Up Your Radio Easily.md +0 -175
- spaces/1phancelerku/anime-remove-background/Download FIFA Mobile Japan MOD APK - Unlimited Money All Unlocked.md +0 -125
- spaces/2023Liu2023/bingo/src/components/chat-suggestions.tsx +0 -45
- spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/vocoder/parallel_wavegan/utils/__init__.py +0 -1
- spaces/AIWaves/Debate/src/agents/Prompt/base_Prompts.py +0 -83
- spaces/AIZeroToHero/05-RealtimeStreamlitASR/README.md +0 -13
- spaces/Abdullah-Habib/Text_to_Speech_Urdu/README.md +0 -13
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/click/Click.d.ts +0 -2
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/PreLayout.js +0 -26
- spaces/Akmyradov/TurkmenTTSweSTT/uroman/lib/NLP/stringDistance.pm +0 -724
- spaces/Albertha/qwe123/README.md +0 -10
- spaces/Alcedo/yunmedia/resources/chatgpt-plugin/css/chunk-vendors.0ede84b4.css +0 -0
- spaces/Alpaca233/SadTalker/src/face3d/models/template_model.py +0 -100
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_tensorrt_txt2img.py +0 -928
- spaces/Andy1621/UniFormerV2_mit_demo/app.py +0 -131
- spaces/Andy1621/uniformer_image_detection/configs/foveabox/fovea_r50_fpn_4x4_2x_coco.py +0 -4
- spaces/Andy1621/uniformer_image_detection/configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco.py +0 -71
- spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/cascade_rpn_head.py +0 -784
- spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/rpn.py +0 -154
- spaces/Audio-AGI/AudioSep/models/CLAP/open_clip/model.py +0 -935
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/solver/lr_scheduler.py +0 -238
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/_static/css/custom.css +0 -30
- spaces/BREWDAcademy/Brewd-Diffusion/app.py +0 -391
- spaces/BartPoint/VoiceChange_Beta/infer_pack/commons.py +0 -166
- spaces/Benson/text-generation/Examples/Amanda El Aventurero Juego Completo Descargar Gratis Pc.md +0 -60
- spaces/Benson/text-generation/Examples/Cmo Descargar NBA 2k21 En Android.md +0 -116
- spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/stub.py +0 -429
- spaces/Billyosoro/ESRGAN/tests/test_dataset.py +0 -151
- spaces/CVH-vn1210/make_hair/minigpt4/conversation/conversation.py +0 -199
- spaces/CVH-vn1210/make_hair/minigpt4/models/__init__.py +0 -200
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/TensorMask/tensormask/layers/__init__.py +0 -4
- spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/count.h +0 -22
- spaces/CVPR/WALT/mmdet/models/dense_heads/corner_head.py +0 -1074
- spaces/CVPR/drawings-to-human/Makefile +0 -11
- spaces/CVPR/drawings-to-human/frontend/src/types.ts +0 -36
- spaces/Caoyunkang/Segment-Any-Anomaly/utils/training_utils.py +0 -59
- spaces/CarlDennis/HYTTS/text/sanskrit.py +0 -62
- spaces/ChristopherMarais/Andrew_Alpha/Ambrosia.py +0 -296
- spaces/CikeyQI/QQsign/Dockerfile +0 -23
- spaces/CofAI/urlcut/style.css +0 -28
- spaces/DQChoi/gpt-demo/app.py +0 -23
spaces/101-5/Bing-New/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Bing New
|
3 |
-
emoji: ⚡
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
license: mit
|
9 |
-
app_port: 8080
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/101-5/gpt4free/g4f/Provider/Providers/ChatgptAi.py
DELETED
@@ -1,51 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import requests, re
|
3 |
-
from ...typing import sha256, Dict, get_type_hints
|
4 |
-
|
5 |
-
url = 'https://chatgpt.ai/gpt-4/'
|
6 |
-
model = ['gpt-4']
|
7 |
-
supports_stream = False
|
8 |
-
needs_auth = False
|
9 |
-
|
10 |
-
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
11 |
-
chat = ''
|
12 |
-
for message in messages:
|
13 |
-
chat += '%s: %s\n' % (message['role'], message['content'])
|
14 |
-
chat += 'assistant: '
|
15 |
-
|
16 |
-
response = requests.get('https://chatgpt.ai/gpt-4/')
|
17 |
-
|
18 |
-
nonce, post_id, _, bot_id = re.findall(r'data-nonce="(.*)"\n data-post-id="(.*)"\n data-url="(.*)"\n data-bot-id="(.*)"\n data-width', response.text)[0]
|
19 |
-
|
20 |
-
headers = {
|
21 |
-
'authority': 'chatgpt.ai',
|
22 |
-
'accept': '*/*',
|
23 |
-
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
24 |
-
'cache-control': 'no-cache',
|
25 |
-
'origin': 'https://chatgpt.ai',
|
26 |
-
'pragma': 'no-cache',
|
27 |
-
'referer': 'https://chatgpt.ai/gpt-4/',
|
28 |
-
'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
29 |
-
'sec-ch-ua-mobile': '?0',
|
30 |
-
'sec-ch-ua-platform': '"Windows"',
|
31 |
-
'sec-fetch-dest': 'empty',
|
32 |
-
'sec-fetch-mode': 'cors',
|
33 |
-
'sec-fetch-site': 'same-origin',
|
34 |
-
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
|
35 |
-
}
|
36 |
-
data = {
|
37 |
-
'_wpnonce': nonce,
|
38 |
-
'post_id': post_id,
|
39 |
-
'url': 'https://chatgpt.ai/gpt-4',
|
40 |
-
'action': 'wpaicg_chat_shortcode_message',
|
41 |
-
'message': chat,
|
42 |
-
'bot_id': bot_id
|
43 |
-
}
|
44 |
-
|
45 |
-
response = requests.post('https://chatgpt.ai/wp-admin/admin-ajax.php',
|
46 |
-
headers=headers, data=data)
|
47 |
-
|
48 |
-
yield (response.json()['data'])
|
49 |
-
|
50 |
-
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
51 |
-
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crack High Qualitya Movie Download.md
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Cracka Movie Download: How to Watch the Controversial Film Online</h1>
|
3 |
-
<p>Cracka is a 2020 TV movie directed by Dale Resteghini that depicts a present day white supremacist who gets thrust back in time where the African Americans rule and the whites are the enslaved. The movie has been criticized for its violent and provocative portrayal of racial reversal and slavery, and has sparked controversy and backlash among viewers and critics alike.</p>
|
4 |
-
<h2>cracka movie download</h2><br /><p><b><b>DOWNLOAD</b> →→→ <a href="https://byltly.com/2uKzKW">https://byltly.com/2uKzKW</a></b></p><br /><br />
|
5 |
-
<p>If you are curious about this movie and want to watch it online, you might be wondering how to download Cracka legally and safely. In this article, we will show you some of the options available for Cracka movie download and streaming, as well as some of the risks and challenges involved.</p>
|
6 |
-
<h2>Where to Download Cracka Movie Online</h2>
|
7 |
-
<p>Cracka is not available on any of the major streaming platforms like Netflix, Hulu, Amazon Prime Video, or Disney+. The movie was originally planned to be released on a new streaming service called Vyre Network, but it was later removed due to technical issues and negative feedback.</p>
|
8 |
-
<p>As of now, the only official way to watch Cracka online is through Google Play Movies & TV, where you can rent or buy the movie for $3.99 or $9.99 respectively. You can also watch it on Amazon Prime Video, where you can rent or buy it for $4.99 or $9.99 respectively. However, these options are only available in certain regions, such as the United States, Canada, Australia, and New Zealand.</p>
|
9 |
-
<p></p>
|
10 |
-
<p>If you are looking for other ways to download Cracka movie online, you might come across some unofficial websites that claim to offer free or cheap downloads of the movie. However, these websites are illegal and risky, as they may contain malware, viruses, or spyware that can harm your device or compromise your personal information. Moreover, downloading or streaming pirated content is a violation of copyright laws and can result in legal consequences.</p>
|
11 |
-
<h2>How to Watch Cracka Movie Safely and Legally</h2>
|
12 |
-
<p>If you want to watch Cracka movie safely and legally, we recommend that you use a reputable and licensed streaming service that offers the movie for rent or purchase. You can also use a VPN (virtual private network) service to access geo-restricted content from different regions. A VPN can help you bypass censorship and protect your online privacy and security by encrypting your data and hiding your IP address.</p>
|
13 |
-
<p>However, before you watch Cracka movie online, you should be aware that this movie is not suitable for everyone. The movie contains graphic scenes of violence, torture, rape, and racism that can be disturbing and offensive to some viewers. The movie also has a low rating of 3.7 out of 10 on IMDb and a negative score of 13 out of 100 on TMDb, indicating that most people who watched it did not enjoy it or appreciate its message.</p>
|
14 |
-
<p>Therefore, if you decide to watch Cracka movie online, you should do so at your own discretion and with caution. You should also be prepared for the possibility of being disappointed or disgusted by the movie's content and quality.</p> ddb901b051<br />
|
15 |
-
<br />
|
16 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Godswar Auto Race What You Need to Know About the Hack auto repu farm.md
DELETED
@@ -1,151 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Godswar Auto Race: A Guide for Beginners</h1>
|
3 |
-
<p>If you are looking for a fun and exciting MMORPG that combines Greek mythology, epic battles, and diverse gameplay, then you might want to check out Godswar Online. This game offers a lot of features and events that will keep you entertained and challenged. One of these events is the godswar auto race, which is a fast-paced and competitive race that rewards you with valuable items and reputation. In this article, we will give you a brief overview of Godswar Online, explain what godswar auto race is and how to participate in it, and show you how to use a godswar auto race hack to gain an edge over your opponents.</p>
|
4 |
-
<h2>godswar auto race</h2><br /><p><b><b>DOWNLOAD</b> — <a href="https://byltly.com/2uKyUW">https://byltly.com/2uKyUW</a></b></p><br /><br />
|
5 |
-
<h2>What is Godswar Online?</h2>
|
6 |
-
<p>Godswar Online is a free-to-play MMORPG that was released in 2009 by IGG. The game is set in ancient Greece, where you can choose to join either Athens or Sparta as your faction. You can also choose from four classes: warrior, champion, mage, or priest. Each class has its own skills, strengths, and weaknesses, and you can customize your character with various equipment, mounts, pets, and titles.</p>
|
7 |
-
<h3>A brief introduction to the game and its features</h3>
|
8 |
-
<p>The game features a rich and immersive world that is based on Greek mythology. You can explore different regions, such as Olympus, Crete, Troy, and Athens, and encounter various gods, heroes, monsters, and NPCs. You can also interact with other players through chat, trade, guilds, parties, and alliances. The game also offers a variety of quests, dungeons, bosses, PvP modes, and events that will challenge your skills and strategy.</p>
|
9 |
-
<h3>The four classes and their roles</h3>
|
10 |
-
<p>The four classes in Godswar Online are warrior, champion, mage, and priest. Each class has its own role and function in the game. Warriors are melee fighters that can deal high damage and tank enemies. Champions are also melee fighters that can deal high damage and stun enemies. Mages are ranged spellcasters that can deal high damage and control enemies. Priests are healers that can heal allies and buff them.</p>
|
11 |
-
<h3>The main activities and events in the game</h3>
|
12 |
-
<p>The game offers a lot of activities and events that you can participate in to earn rewards and have fun. Some of these activities are:</p>
|
13 |
-
<ul>
|
14 |
-
<li>Quests: You can complete various quests that will give you experience, gold, items, reputation, and more.</li>
|
15 |
-
<li>Dungeons: You can enter different dungeons that will test your skills and teamwork. You can face powerful bosses and obtain rare loot.</li>
|
16 |
-
<li>PvP: You can engage in different PvP modes, such as arena, battlegrounds, duels, wars, sieges, and more. You can fight against other players or factions for glory and honor.</li>
|
17 |
-
<li>Events: You can join different events that will spice up your gameplay. Some of these events are godswar auto race, marathon race, treasure hunt, quiz show, world boss hunt, olympic games, zodiac trial, divine trial, divine clash,</li> <h2>What is Godswar Auto Race?</h2>
|
18 |
-
<p>Godswar auto race is one of the events that you can join in Godswar Online. It is a race that involves running from one point to another in a map, while avoiding obstacles and enemies. The race is held every day at 10:00, 14:00, and 18:00 server time. You can join the race by talking to the NPC Hermes in Athens or Sparta.</p>
|
19 |
-
<h3>A description of the auto race event and its rewards</h3>
|
20 |
-
<p>The auto race event lasts for 10 minutes, and you can run as many times as you want within that time. The more times you run, the more points you earn. The points can be exchanged for various rewards, such as gold, silver, bronze medals, reputation, experience, and items. The items include mounts, pets, equipment, gems, potions, and more. You can also get a chance to win a lucky draw prize, such as a rare mount or pet.</p>
|
21 |
-
<h3>The requirements and rules for participating in the auto race</h3>
|
22 |
-
<p>To participate in the auto race, you need to meet some requirements and follow some rules. The requirements are:</p>
|
23 |
-
<ul>
|
24 |
-
<li>You need to be at least level 40.</li>
|
25 |
-
<li>You need to have at least 1000 reputation.</li>
|
26 |
-
<li>You need to have a mount or a pet that can run fast.</li>
|
27 |
-
</ul>
|
28 |
-
<p>The rules are:</p>
|
29 |
-
<p>godswar auto race tips and tricks<br />
|
30 |
-
how to win godswar auto race every time<br />
|
31 |
-
best gear for godswar auto race<br />
|
32 |
-
godswar auto race rewards and achievements<br />
|
33 |
-
godswar auto race guide and walkthrough<br />
|
34 |
-
godswar auto race cheats and hacks<br />
|
35 |
-
godswar auto race gameplay and review<br />
|
36 |
-
godswar auto race download and install<br />
|
37 |
-
godswar auto race online and offline mode<br />
|
38 |
-
godswar auto race latest updates and news<br />
|
39 |
-
godswar auto race forum and community<br />
|
40 |
-
godswar auto race support and feedback<br />
|
41 |
-
godswar auto race vs other racing games<br />
|
42 |
-
godswar auto race system requirements and compatibility<br />
|
43 |
-
godswar auto race free trial and subscription<br />
|
44 |
-
godswar auto race best practices and strategies<br />
|
45 |
-
godswar auto race beginner and advanced level<br />
|
46 |
-
godswar auto race fun and challenging features<br />
|
47 |
-
godswar auto race pros and cons<br />
|
48 |
-
godswar auto race ratings and testimonials<br />
|
49 |
-
godswar auto race history and development<br />
|
50 |
-
godswar auto race characters and vehicles<br />
|
51 |
-
godswar auto race customization and personalization<br />
|
52 |
-
godswar auto race modes and missions<br />
|
53 |
-
godswar auto race leaderboards and rankings<br />
|
54 |
-
godswar auto race tournaments and events<br />
|
55 |
-
godswar auto race coupons and discounts<br />
|
56 |
-
godswar auto race referral and affiliate program<br />
|
57 |
-
godswar auto race faq and troubleshooting<br />
|
58 |
-
godswar auto race comparison and analysis<br />
|
59 |
-
godswar auto race statistics and data<br />
|
60 |
-
godswar auto race secrets and Easter eggs<br />
|
61 |
-
godswar auto race screenshots and videos<br />
|
62 |
-
godswar auto race soundtracks and music<br />
|
63 |
-
godswar auto race themes and genres<br />
|
64 |
-
godswar auto race inspiration and influences<br />
|
65 |
-
godswar auto race alternatives and competitors<br />
|
66 |
-
godswar auto race merchandise and accessories<br />
|
67 |
-
godswar auto race fan art and memes<br />
|
68 |
-
godswar auto race trivia and facts<br />
|
69 |
-
how to play godswar auto race on pc or mobile device<br />
|
70 |
-
how to improve your skills in godswar auto race <br />
|
71 |
-
how to unlock new content in godswar auto race <br />
|
72 |
-
how to earn more coins in godswar auto race <br />
|
73 |
-
how to join a clan in godswar auto race <br />
|
74 |
-
how to chat with other players in godswar auto race <br />
|
75 |
-
how to report a bug or issue in godswar auto race <br />
|
76 |
-
how to request a feature or suggestion in godswar auto race <br />
|
77 |
-
how to contact the developers of godswar auto race <br />
|
78 |
-
how to leave a review for godswar auto race </p>
|
79 |
-
<ul>
|
80 |
-
<li>You need to talk to Hermes before the event starts to join the race.</li>
|
81 |
-
<li>You need to run from the starting point to the end point in the map.</li>
|
82 |
-
<li>You need to avoid obstacles and enemies along the way.</li>
|
83 |
-
<li>You need to reach the end point within 5 minutes.</li>
|
84 |
-
<li>You need to talk to Hermes again after finishing the race to get your points.</li>
|
85 |
-
</ul>
|
86 |
-
<h3>The tips and tricks for winning the auto race</h3>
|
87 |
-
<p>The auto race is not easy, as you will face many challenges and competitors. However, there are some tips and tricks that can help you win the race. Here are some of them:</p>
|
88 |
-
<ul>
|
89 |
-
<li>Use a fast mount or pet that can boost your speed and endurance.</li>
|
90 |
-
<li>Use potions or skills that can increase your speed or defense.</li>
|
91 |
-
<li>Use shortcuts or hidden paths that can save you time or distance.</li>
|
92 |
-
<li>Avoid crowded areas or bottlenecks that can slow you down or block your way.</li>
|
93 |
-
<li>Avoid enemies or traps that can damage you or stun you.</li>
|
94 |
-
<li>Use a godswar auto race hack that can automate your movement and avoid obstacles.</li>
|
95 |
-
</ul>
|
96 |
-
<h2>How to Use Godswar Auto Race Hack?</h2>
|
97 |
-
<p>If you want to have an unfair advantage over your opponents in the auto race, you might want to use a godswar auto race hack. This is a tool that can help you run faster, smoother, and safer in the race. However, before you use it, you should be aware of the risks and consequences of using hacks.</p>
|
98 |
-
<h3>A disclaimer about the risks and consequences of using hacks</h3>
|
99 |
-
<p>Using hacks is against the rules and policies of Godswar Online. If you are caught using hacks, you might face some penalties, such as:</p>
|
100 |
-
<ul>
|
101 |
-
<li>Getting banned from the game permanently or temporarily.</li>
|
102 |
-
<li>Getting your account deleted or suspended.</li>
|
103 |
-
<li>Getting your items or rewards confiscated or revoked.</li>
|
104 |
-
<li>Getting reported or shamed by other players.</li>
|
105 |
-
</ul>
|
106 |
-
<p>Therefore, use hacks at your own risk and discretion. We are not responsible for any damage or loss that may occur from using hacks.</p>
|
107 |
-
<h3>A step-by-step guide on how to download and install the hack</h3>
|
108 |
-
<p>If you still want to use a godswar auto race hack, here are the steps on how to download and install it:</p>
|
109 |
-
<ol>
|
110 |
-
<li>Go to this link: https://www.mediafire.com/file/2wqch5h6apesuao/AUTORACER.rar/file and download the file.</li>
|
111 |
-
<li>Extract the file using WinRAR or any other software.</li>
|
112 |
-
<li>Open the file and run the autoracer.exe application.</li>
|
113 |
-
<li>Select your server and faction from the drop-down menu.</li>
|
114 |
-
<li>Click on start button and wait for it to load.</li>
|
115 |
-
<li>Login to your Godswar Online account and join the auto race event.</li>
|
116 |
-
</ol>
|
117 |
-
<h3>A demonstration of how the hack works and its features</h3>
|
118 |
-
<p>The hack will automatically run for you in the auto race event. It will avoid obstacles and enemies, use shortcuts and hidden paths, and reach the end point within 5 minutes. It will also talk to Hermes before and after the race to get your points. The hack has some features that you can customize, such as:</p>
|
119 |
-
<table border="1">
|
120 |
-
<tr><th>Feature</th><th>Description</th></tr>
|
121 |
-
<tr><td>Speed</td><td>You can adjust the speed of your movement from 1x to 10x.</td></tr>
|
122 |
-
<tr><td>Invisible</td><td>You can make yourself invisible to other players and enemies.</td></tr>
|
123 |
-
<tr><td>No Damage</td><td>You can make yourself immune to any damage or stun from enemies or traps.</td></tr>
|
124 |
-
<tr><td>No Collision</td><td>You can make yourself pass through any obstacle or wall without stopping.</td></tr> </table>
|
125 |
-
<p>You can enable or disable these features by clicking on the check boxes. You can also use hotkeys to activate or deactivate them.</p>
|
126 |
-
<h2>Conclusion</h2>
|
127 |
-
<p>Godswar auto race is a fun and rewarding event that you can join in Godswar Online. It is a race that tests your speed, skill, and strategy. You can earn points and exchange them for various rewards, such as gold, medals, reputation, experience, and items. You can also use a godswar auto race hack to make your race easier and faster. However, you should be careful and responsible when using hacks, as they can get you banned or penalized. We hope this article has given you some useful information and tips on godswar auto race. If you want to learn more about Godswar Online and its other features and events, you can visit the official website or watch some videos on YouTube . Thank you for reading and happy racing!</p>
|
128 |
-
<h3>A list of sources and references for further information</h3>
|
129 |
-
<p>Here are some sources and references that we used for this article:</p>
|
130 |
-
<ol>
|
131 |
-
<li>Godswar Online official website: https://gw.igg.com/main.php</li>
|
132 |
-
<li>Godswar Online Auto Race Hack - YouTube: https://www.youtube.com/watch?v=ix-pHML8Umc</li>
|
133 |
-
<li>GODSWAR AUTO RACE, AUTO MARATHON, ANTI DC DL LINK - YouTube: https://www.youtube.com/watch?v=rPY5zwRwsoU</li>
|
134 |
-
</ol>
|
135 |
-
<h3>A list of FAQs after the conclusion</h3>
|
136 |
-
<p>Here are some frequently asked questions that you might have about godswar auto race:</p>
|
137 |
-
<ul>
|
138 |
-
<li>Q: How many times can I join the auto race event?</li>
|
139 |
-
<li>A: You can join the auto race event as many times as you want within the 10-minute duration.</li>
|
140 |
-
<li>Q: How many points do I need to exchange for rewards?</li>
|
141 |
-
<li>A: The amount of points you need depends on the type and quality of the reward. For example, you need 100 points for a bronze medal, 200 points for a silver medal, and 300 points for a gold medal.</li>
|
142 |
-
<li>Q: What are the lucky draw prizes and how can I win them?</li>
|
143 |
-
<li>A: The lucky draw prizes are rare mounts or pets that have special abilities and appearances. You can win them by participating in the auto race event and getting a lucky ticket. The lucky ticket will give you a chance to spin a wheel and get a random prize.</li>
|
144 |
-
<li>Q: How can I avoid getting banned or penalized for using hacks?</li>
|
145 |
-
<li>A: The best way to avoid getting banned or penalized for using hacks is to not use them at all. However, if you still want to use them, you should be discreet and cautious. You should not use hacks in public or crowded areas, or brag about them to other players. You should also not use hacks that are outdated or detected by the game's anti-cheat system.</li>
|
146 |
-
<li>Q: How can I report or report other players who are using hacks?</li>
|
147 |
-
<li>A: If you encounter or suspect other players who are using hacks, you can report them to the game's customer service or moderators. You can also take screenshots or videos as evidence and send them along with your report.</li>
|
148 |
-
</ul>
|
149 |
-
</p> 0a6ba089eb<br />
|
150 |
-
<br />
|
151 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Aams Auto Audio Mastering System Keygen Crack WORK.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>aams auto audio mastering system keygen crack</h2><br /><p><b><b>Download File</b> — <a href="https://imgfil.com/2uy0pu">https://imgfil.com/2uy0pu</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
AAMS Auto Audio Mastering System Crack With Keygen 2020. AAMS Auto Audio Mastering System Crack + Activation Code Download. 4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Chris Sawyers Locomotion Repack 2012 RELOADED.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Chris Sawyers Locomotion Repack 2012 RELOADED</h2><br /><p><b><b>Download Zip</b> ✶ <a href="https://imgfil.com/2uy1fJ">https://imgfil.com/2uy1fJ</a></b></p><br /><br />
|
2 |
-
|
3 |
-
d5da3c52bf<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Eobd Facile Version Complete Crack 691.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>eobd facile version complete crack 691</h2><br /><p><b><b>Download Zip</b> ===== <a href="https://imgfil.com/2uy1PM">https://imgfil.com/2uy1PM</a></b></p><br /><br />
|
2 |
-
|
3 |
-
d5da3c52bf<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Brawl Stars 49.181 APK Update Bling Stats and New Brawlers.md
DELETED
@@ -1,161 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Brawl Stars 49.181 APK: Everything You Need to Know</h1>
|
3 |
-
<p>If you are a fan of fast-paced multiplayer games with colorful graphics and quirky characters, you have probably heard of Brawl Stars. This game from Supercell, the makers of Clash of Clans and Clash Royale, has been a huge hit since its global launch in December 2018. With over 100 million downloads on Google Play Store and millions of active players worldwide, Brawl Stars is one of the most popular mobile games right now.</p>
|
4 |
-
<h2>brawl stars 49.181 apk</h2><br /><p><b><b>Download Zip</b> 🗹 <a href="https://urlin.us/2uT2gX">https://urlin.us/2uT2gX</a></b></p><br /><br />
|
5 |
-
<p>But what if you want to play the latest version of Brawl Stars before it is officially released on your region? Or what if you have a device that is not compatible with the game from the Play Store? Or what if you just want to have more control over your game files and settings? In that case, you might want to download and install an APK file of Brawl Stars.</p>
|
6 |
-
<p>An APK file is a package that contains all the files and data needed to run an Android app. By downloading an APK file, you can bypass the restrictions of the Play Store and install apps that are not available in your region or device. You can also update your apps faster and enjoy new features before they are rolled out to everyone else.</p>
|
7 |
-
<p>In this article, we will tell you everything you need to know about Brawl Stars 49.181 APK, the latest version of the game as of June 2023. We will show you how to download and install it on your Android device, how to play it, and what are the new features and improvements that it brings. We will also give you some tips and tricks to help you become a better brawler and win more matches.</p>
|
8 |
-
<h2>How to Download and Install Brawl Stars 49.181 APK</h2>
|
9 |
-
<p>Downloading and installing Brawl Stars 49.181 APK is easy and straightforward. Just follow these steps:</p>
|
10 |
-
<ol>
|
11 |
-
<li>Go to a trusted website that offers APK files for download, such as [APKCombo](^1^) or [APKPure](^2^).</li>
|
12 |
-
<li>Search for Brawl Stars and find the version 49.181. Make sure it is compatible with your device and has good reviews from other users.</li>
|
13 |
-
<li>Tap on the download button and wait for the file to be downloaded on your device.</li>
|
14 |
-
<li>Once the download is complete, go to your device's settings and enable the option to install apps from unknown sources. This will allow you to install apps that are not from the Play Store.</li>
|
15 |
-
<li>Locate the downloaded APK file on your device's file manager and tap on it to start the installation process.</li>
|
16 |
-
<li>Follow the instructions on the screen and wait for the installation to finish.</li>
|
17 |
-
<li>Launch Brawl Stars from your app drawer and enjoy!</li>
|
18 |
-
</ol>
|
19 |
-
<p><b>Tips and warnings:</b></p>
|
20 |
-
<ul>
|
21 |
-
<li>Before downloading an APK file, make sure you have enough storage space on your device.</li>
|
22 |
-
<li>Always download APK files from reputable sources that scan them for viruses and malware.</li>
|
23 |
-
<li>Be careful not to download fake or modded versions of Brawl Stars that may contain harmful or illegal content.</li>
|
24 |
-
<li>Installing an APK file may overwrite your existing data and settings, so make sure you back up your progress before doing so.</li>
|
25 |
-
<li>Installing an APK file may violate the terms and conditions of the game developer, so do it at your own risk.</li>
|
26 |
-
<li>Some features of the game may not work properly or may be unavailable if you install an APK file.</li>
|
27 |
-
</ul>
|
28 |
-
<h2>How to Play Brawl Stars 49.181 APK</h2>
|
29 |
-
<p>Brawl Stars is a fun and addictive game that lets you compete with other players in various modes and arenas. You can choose from over 40 different brawlers, each with their own unique skills and abilities, and customize them with skins and gadgets. You can also join a club and chat with other players, or create your own club and invite your friends.</p>
|
30 |
-
<p>Here is an overview of the game modes, features, and characters that you can enjoy in Brawl Stars 49.181 APK:</p>
|
31 |
-
<h3>Game Modes</h3>
|
32 |
-
<p>Brawl Stars has six main game modes that you can play solo or with a team:</p>
|
33 |
-
<p>brawl stars 49.181 apk download free<br />
|
34 |
-
brawl stars 49.181 apk mod unlimited gems<br />
|
35 |
-
brawl stars 49.181 apk latest version<br />
|
36 |
-
brawl stars 49.181 apk android<br />
|
37 |
-
brawl stars 49.181 apk update<br />
|
38 |
-
brawl stars 49.181 apk obb<br />
|
39 |
-
brawl stars 49.181 apk hack<br />
|
40 |
-
brawl stars 49.181 apk xapk<br />
|
41 |
-
brawl stars 49.181 apk for pc<br />
|
42 |
-
brawl stars 49.181 apk offline<br />
|
43 |
-
brawl stars 49.181 apk no root<br />
|
44 |
-
brawl stars 49.181 apk mirror<br />
|
45 |
-
brawl stars 49.181 apk pure<br />
|
46 |
-
brawl stars 49.181 apk revdl<br />
|
47 |
-
brawl stars 49.181 apk rexdl<br />
|
48 |
-
brawl stars 49.181 apk uptodown<br />
|
49 |
-
brawl stars 49.181 apk apkpure<br />
|
50 |
-
brawl stars 49.181 apk apkmirror<br />
|
51 |
-
brawl stars 49.181 apk happymod<br />
|
52 |
-
brawl stars 49.181 apk an1<br />
|
53 |
-
brawl stars 49.181 apk android oyun club<br />
|
54 |
-
brawl stars 49.181 apk andropalace<br />
|
55 |
-
brawl stars 49.181 apk blackmod<br />
|
56 |
-
brawl stars 49.181 apk bluestacks<br />
|
57 |
-
brawl stars 49.181 apk by lenov.ru<br />
|
58 |
-
brawl stars 49.181 apk club<br />
|
59 |
-
brawl stars 49.181 apk cracked<br />
|
60 |
-
brawl stars 49.181 apk data<br />
|
61 |
-
brawl stars 49.181 apk download for android<br />
|
62 |
-
brawl stars 49.181 apk download link<br />
|
63 |
-
brawl stars 49.181 apk download modded games.com<br />
|
64 |
-
brawl stars 49.181 apk download uptodown.com<br />
|
65 |
-
brawl stars 49.181 apk file download<br />
|
66 |
-
brawl stars 49.181 apk fileplanet.com<br />
|
67 |
-
brawl stars 49.181 apk free gems and coins generator online tool no human verification no survey no offers no root no jailbreak required works on all devices ios android pc mac windows phone tablet laptop desktop etc.<br />
|
68 |
-
brawl stars 49.181 apk full unlocked all brawlers skins gadgets star powers maps modes events quests rewards trophies etc.<br />
|
69 |
-
brawl stars 49.181 apk game guardian script hack cheat engine mod menu god mode unlimited ammo health speed damage auto aim auto fire auto win etc.<br />
|
70 |
-
brawl stars 49.181 apk google play store link install now enjoy the best multiplayer online battle arena game ever made by supercell the creators of clash of clans clash royale hay day boom beach etc.<br />
|
71 |
-
brawl stars 49.181 apk how to install guide step by step tutorial with screenshots video instructions tips tricks faqs troubleshooting help support contact us feedback suggestions etc.<br />
|
72 |
-
brawl stars 49.181 apk ios iphone ipad ipod touch compatible compatible with all ios versions and devices jailbreak not required no cydia no appvalley no tweakbox no tutuapp no panda helper no ignition etc.</p>
|
73 |
-
<ul>
|
74 |
-
<li><b>Gem Grab:</b> In this mode, you have to collect and hold 10 gems for a certain amount of time. The team that has the most gems at the end of the match wins. But be careful, if you die, you will drop all your gems and the enemy team can pick them up.</li>
|
75 |
-
<li><b>Showdown:</b> This is a battle royale mode where you have to survive against 9 other players in a shrinking map. You can find power cubes that increase your health and damage, and use the environment to your advantage. The last brawler standing wins.</li>
|
76 |
-
<li><b>Brawl Ball:</b> This is a soccer-like mode where you have to score two goals before the enemy team does. You can pass, dribble, and shoot the ball, but you can also attack and knock out your opponents. Be careful not to score on your own goal!</li>
|
77 |
-
<li><b>Bounty:</b> In this mode, you have to eliminate as many enemies as possible and collect their stars. The team that has the most stars at the end of the match wins. But be careful, if you die, you will lose all your stars and the enemy team can take them.</li>
|
78 |
-
<li><b>Heist:</b> In this mode, you have to either protect or attack a safe that contains valuable loot. The attacking team has to break the safe before the time runs out, while the defending team has to prevent them from doing so.</li>
|
79 |
-
<li><b>Siege:</b> This is a strategic mode where you have to collect bolts that spawn in the center of the map. The team that collects more bolts will summon a powerful robot that will help them attack the enemy's base. The team that destroys the enemy's base first wins.</li>
|
80 |
-
</ul>
|
81 |
-
<p>Besides these modes, there are also special events that rotate every week, such as:</p>
|
82 |
-
<ul>
|
83 |
-
<li><b>Hot Zone:</b> In this mode, you have to control a zone on the map for as long as possible. The team that has more control points at the end of the match wins.</li>
|
84 |
-
<li><b>Knockout:</b> In this mode, you have to eliminate all the enemies in a best-of-three rounds format. The team that wins two rounds first wins.</li>
|
85 |
-
<li><b>Trophy Thieves:</b> In this mode, you have to steal trophies from the enemy's base and bring them back to yours. The team that has more trophies at the end of the match wins.</li>
|
86 |
-
<li><b>Volley Brawl:</b> In this mode, you have to hit a giant ball over a net and make it land on the enemy's side. The team that scores three points first wins.</li>
|
87 |
-
<li><b>Basket Brawl:</b> In this mode, you have to throw a ball into a basket on the enemy's side. The team that scores more points in two minutes wins.</li>
|
88 |
-
</ul>
|
89 |
-
<h3>Features</h3>
|
90 |
-
<p>Brawl Stars 49.181 APK has many features that make it more fun and exciting, such as:</p>
|
91 |
-
<ul>
|
92 |
-
<li><b>Brawl Pass:</b> This is a seasonal pass that gives you access to exclusive rewards, such as brawlers, skins, coins, gems, boxes, pins, and more. You can earn progress by completing quests and winning matches. You can also buy the premium pass with gems to unlock more rewards and perks.</li>
|
93 |
-
<li><b>Brawl Maker:</b> This is a feature that lets you create your own maps and share them with other players. You can use various tiles, obstacles, items, and modifiers to design your own arenas. You can also play and rate other players' maps in friendly matches or special events.</li>
|
94 |
-
<li><b>Brawl TV:</b> This is a feature that lets you watch live or replayed matches of other players. You can choose from different categories, such as top players, featured matches, or your friends. You can also chat and react with other viewers.</li>
|
95 |
-
<li><b>Brawl Talk:</b> This is a feature that lets you watch the official videos from the game developers, where they announce new updates, features, brawlers, and more. You can also chat and react with other viewers.</li>
|
96 |
-
<li><b>Brawl Stars Esports:</b> This is a feature that lets you watch the official tournaments and competitions of Brawl Stars, where the best players and teams from around the world compete for glory and prizes. You can also chat and react with other viewers.</li>
|
97 |
-
</ul>
|
98 |
-
<h3>Characters</h3>
|
99 |
-
<p>Brawl Stars has over 40 different characters, or brawlers, that you can unlock and play with. Each brawler has their own unique personality, appearance, voice, and abilities. You can also customize them with different skins and gadgets. Here is a table that shows the basic information of each brawler:</p>
|
100 |
-
<table>
|
101 |
-
<tr>
|
102 |
-
<th>Name</th>
|
103 |
-
<th>Type</th>
|
104 |
-
<th>Rarity</th>
|
105 |
-
<th>Attack</th>
|
106 |
-
<th>Super</th>
|
107 |
-
<th>Gadget</th>
|
108 |
-
<th>Star Power</th>
|
109 |
-
</tr>
|
110 |
-
<tr>
|
111 |
-
<td>Shelly</td>
|
112 |
-
<td>Fighter</td>
|
113 |
-
<td>Starter</td>
|
114 |
-
<td>Buckshot: Fires a burst of shells that deal more damage at close range.</td>
|
115 |
-
<td>Super Shell: Fires a powerful blast that knocks back enemies and destroys obstacles.</td>
|
116 |
-
<td>Fast Forward: Dashes forward a short distance.</td>
|
117 |
-
<td>Shell Shock: Enemies hit by Super Shell are slowed down for 3 seconds.<br>Band-Aid: When Shelly falls below 40% health, she instantly heals for 1800 health. Recharges in 20 seconds.</td>
|
118 |
-
</tr>
|
119 |
-
<tr>
|
120 |
-
<td>Nita</td>
|
121 |
-
<td>Fighter</td>
|
122 |
-
<td>Trophy Road (10)</td>
|
123 |
-
<td>Rupture: Fires a shockwave that pierces through enemies and deals damage.</td>
|
124 |
-
<td>Overbearing: Summons a big baby bear that attacks nearby enemies.</td>
|
125 |
-
<td>Faux Fur: Nita and her bear gain a 25% shield for 3 seconds.</td>
|
126 |
-
<td>Bear With Me: Nita recovers 800 health whenever her bear hits an enemy, and vice versa.<br>Hyper Bear: Nita's bear attacks 60% faster.</td>
|
127 |
-
</tr>
|
128 |
-
<tr>
|
129 |
-
<td>Colt</td>
|
130 |
-
<td>Sharpshooter</td>
|
131 |
-
<td>Trophy Road (60)</td>
|
132 |
-
<td>Six-Shooters: Fires a burst of six bullets that deal damage.</td>
|
133 |
-
<td>Bullet Train: Fires a long range barrage of 12 bullets that pierce through enemies and destroy obstacles.</td>
|
134 |
-
<td>Speedloader: Colt reloads two ammo instantly.</td>
|
135 |
-
<td>Slick Boots: Colt moves 10% faster.<br>Magnum Special: Colt's attack range and bullet speed are increased by 11%.</td>
|
136 |
-
</tr>
|
137 |
-
<!-- The table continues with the rest of the brawlers -->
|
138 |
-
</table>
|
139 |
-
<h2>Conclusion</h2>
|
140 |
-
<p>Brawl Stars is a game that offers endless fun and excitement for players of all ages and preferences. Whether you want to play solo or with your friends, whether you want to compete or cooperate, whether you want to be strategic or spontaneous, Brawl Stars has something for you. With Brawl Stars 49.181 APK, you can enjoy the latest version of the game with new features and improvements. You can download and install it easily on your Android device and start brawling right away. Just remember to be careful and responsible when downloading APK files, and to respect the game developer's terms and conditions.</p>
|
141 |
-
<p>We hope this article has helped you learn more about Brawl Stars 49.181 APK and how to play it. If you have any questions or comments, feel free to leave them below. We would love to hear from you. And if you liked this article, please share it with your friends who might also enjoy Brawl Stars. Happy brawling!</p>
|
142 |
-
<h2>FAQs</h2>
|
143 |
-
<p>Here are some frequently asked questions about Brawl Stars 49.181 APK:</p>
|
144 |
-
<h3>Is Brawl Stars 49.181 APK safe to download and install?</h3>
|
145 |
-
<p>Brawl Stars 49.181 APK is safe to download and install as long as you get it from a trusted source that scans it for viruses and malware. However, downloading APK files may expose your device to security risks, so make sure you have a reliable antivirus app on your device and only download APK files from reputable websites.</p>
|
146 |
-
<h3>Do I need to uninstall the previous version of Brawl Stars before installing the new one?</h3>
|
147 |
-
<p>No, you do not need to uninstall the previous version of B rawl Stars before installing the new one. The APK file will overwrite the existing data and settings, so you do not need to delete anything. However, you may want to back up your progress before installing the APK file, just in case something goes wrong.</p>
|
148 |
-
<h3>Can I play Brawl Stars 49.181 APK with my friends who have different versions of the game?</h3>
|
149 |
-
<p>Yes, you can play Brawl Stars 49.181 APK with your friends who have different versions of the game, as long as they are not too far apart. For example, you can play with your friends who have version 49.180 or 49.182, but not with those who have version 48.200 or 50.100. This is because the game developer may introduce changes or fixes that affect the gameplay or compatibility of different versions.</p>
|
150 |
-
<h3>What are the system requirements for Brawl Stars 49.181 APK?</h3>
|
151 |
-
<p>The system requirements for Brawl Stars 49.181 APK are the same as the official version of the game from the Play Store. You need an Android device that has at least 2 GB of RAM and runs on Android 4.3 or higher. You also need a stable internet connection and enough storage space to download and install the APK file.</p>
|
152 |
-
<h3>How can I contact the developer of Brawl Stars if I have any issues or feedback?</h3>
|
153 |
-
<p>If you have any issues or feedback regarding Brawl Stars, you can contact the developer of the game through their official channels, such as:</p>
|
154 |
-
<ul>
|
155 |
-
<li>Their website: [supercell.com]</li>
|
156 |
-
<li>Their email: [[email protected]]</li>
|
157 |
-
<li>Their social media: [Facebook], [Twitter], [Instagram], [YouTube], [Reddit]</li>
|
158 |
-
<li>Their in-game support: Tap on the settings icon on the top right corner of the screen, then tap on "Help and Support".</li>
|
159 |
-
</ul></p> 197e85843d<br />
|
160 |
-
<br />
|
161 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download AAires - Willis (Mixtape) and Experience the Music of a Rising Star.md
DELETED
@@ -1,94 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Aires Willis Mixtape</h1>
|
3 |
-
<p>If you are a fan of hip hop and rap music, you might have heard of Aires Willis, a member of the Young Family group from Angola. He recently released his solo mixtape titled Willis, which features nine tracks with different collaborations and styles. In this article, I will show you how to download Aires Willis mixtape for free and legally, and also give you some information about the artist and his project.</p>
|
4 |
-
<h2>download aires willis</h2><br /><p><b><b>Download Zip</b> ⚹ <a href="https://urlin.us/2uT29u">https://urlin.us/2uT29u</a></b></p><br /><br />
|
5 |
-
<h2>Who is Aires Willis?</h2>
|
6 |
-
<p>Aires Willis is a young rapper from Luanda, Angola, who is part of the Young Family group, along with Lil Boy, Lil Mac, Okenio M, Young K and Deivly. He started his musical career in 2017 and has since participated in several songs and projects with his group and other artists. Some of his most popular songs are Codeme, Conversa Chata, Guetão and Banzelo.</p>
|
7 |
-
<h2>What is Willis Mixtape?</h2>
|
8 |
-
<p>Willis is the name of the first solo mixtape by Aires Willis, which was released on February 1st, 2023. The mixtape contains nine tracks with different themes and vibes, ranging from trap to afrobeat. The mixtape also features guest appearances from other rappers such as Altifridi from Mobbers, Lil Drizzy, Yankema and Kess from NZ Gang. The mixtape was produced by various beatmakers such as Edgar Songz, Lil Mac Beats, Lil Boy Beats and others.</p>
|
9 |
-
<h2>Why should you download Willis Mixtape?</h2>
|
10 |
-
<p>There are many reasons why you should download Willis Mixtape if you are a fan of hip hop and rap music. Here are some of them:</p>
|
11 |
-
<p>download aires willis mixtape 2023<br />
|
12 |
-
download aires willis codeme mp3<br />
|
13 |
-
download aires willis conversa chata feat young k and okenio m<br />
|
14 |
-
download aires willis mais perto da morte song<br />
|
15 |
-
download aires willis antes da 2k feat lil boy and lil mac<br />
|
16 |
-
download aires willis summer party feat kess and deivly<br />
|
17 |
-
download aires willis guetao feat lil drizzy and lil mac<br />
|
18 |
-
download aires willis banzelo feat altifridi<br />
|
19 |
-
download aires willis vampiras de lisboa skit<br />
|
20 |
-
download aires willis ciclones feat yankema and okenio m<br />
|
21 |
-
download aires willis ep zip file<br />
|
22 |
-
download aires willis full album online<br />
|
23 |
-
download aires willis latest songs 2023<br />
|
24 |
-
download aires willis hip hop rap music<br />
|
25 |
-
download aires willis young family member<br />
|
26 |
-
download aires willis jox musik website<br />
|
27 |
-
download aires willis portal moz news blog<br />
|
28 |
-
download aires willis sonangol muzik site<br />
|
29 |
-
download aires willis new scientist magazine<br />
|
30 |
-
download aires willis the sun newspaper<br />
|
31 |
-
download aires willis yahoo news article<br />
|
32 |
-
download aires willis free mp3 music<br />
|
33 |
-
download aires willis high quality audio<br />
|
34 |
-
download aires willis 320 kbps bitrate<br />
|
35 |
-
download aires willis fast and easy<br />
|
36 |
-
download aires willis direct link anonfiles<br />
|
37 |
-
download aires willis tracklist and playlist<br />
|
38 |
-
download aires willis lyrics and chords<br />
|
39 |
-
download aires willis video and cover art<br />
|
40 |
-
download aires willis review and rating<br />
|
41 |
-
download aires willis stream and listen online<br />
|
42 |
-
download aires willis spotify and apple music<br />
|
43 |
-
download aires willis soundcloud and youtube<br />
|
44 |
-
download aires willis instagram and facebook<br />
|
45 |
-
download aires willis twitter and tiktok</p>
|
46 |
-
<ul>
|
47 |
-
<li>You will enjoy listening to Aires Willis's unique flow and lyrics, which reflect his personality and experiences.</li>
|
48 |
-
<li>You will discover new sounds and genres that Aires Willis explores in his mixtape, such as afrobeat, drill and dancehall.</li>
|
49 |
-
<li>You will support an independent artist who is trying to make his mark in the music industry.</li>
|
50 |
-
<li>You will have access to high-quality mp3 files that you can play on any device.</li>
|
51 |
-
<li>You will not have to pay anything or sign up for any service to download the mixtape.</li>
|
52 |
-
</ul>
|
53 |
-
<h2>How to download Willis Mixtape?</h2>
|
54 |
-
<p>Downloading Willis Mixtape is very easy and fast. You just need to follow these simple steps:</p>
|
55 |
-
<ol>
|
56 |
-
<li>Go to one of the websites that offer the mixtape for free download. Some of them are Jox Musik, Portal Moz News and Sonangol-Muzik. You can also find other websites by searching for "download aires willis" on Bing.</li>
|
57 |
-
<li>Click on the link that says "Download" or "Baixar" or something similar. You will be redirected to another website where the mixtape is hosted.</li>
|
58 |
-
<li>On the hosting website, click on the button that says "Download" or "Baixar" or something similar again. You might have to wait for a few seconds or complete a captcha before the download starts.</li>
|
59 |
-
<li>Save the zip file that contains the mixtape on your device. You might need a software like WinRAR or 7-Zip to extract the mp3 files from the zip file.</li>
|
60 |
-
<li>Enjoy listening to Aires Willis's mixtape!</li>
|
61 |
-
</ol>
|
62 |
-
<h2>A table comparing the tracks of Willis Mixtape</h2>
|
63 |
-
<table>
|
64 |
-
<tr><th>Track number</th><th>Title</th><th>Featuring</th><th>Genre</th></tr>
|
65 |
-
<tr><td>1</td><td>Codeme</td><td>None</td><td>Trap</td></tr>
|
66 |
-
<tr><td>2</td><td>Conversa Chata</td><td>Young K and Okenio M</td><td>Afrobeat</td></tr>
|
67 |
-
<tr><td>3</td><td>Mais Perto da Morte</td><td>None</td><td>Rap</td></tr>
|
68 |
-
<tr><td>4</td><td>Antes da 2k</td><td>Lil Boy and Lil Mac</td><td >Trap</td></tr>
|
69 |
-
<tr><td>5</td><td>Guetão</td><td>Altifridi</td><td>Drill</td></tr>
|
70 |
-
<tr><td>6</td><td>Banzelo</td><td>Lil Drizzy and Yankema</td><td>Dancehall</td></tr>
|
71 |
-
<tr><td>7</td><td>Meu Mundo</td><td>Kess</td><td>Rap</td></tr>
|
72 |
-
<tr><td>8</td><td>Meu Lugar</td><td>None</td><td>Rap</td></tr>
|
73 |
-
<tr><td>9</td><td>Willis</td><td>None</td><td>Rap</td></tr>
|
74 |
-
</table>
|
75 |
-
<h2>Conclusion</h2>
|
76 |
-
<p>In conclusion, Aires Willis is a talented rapper from Angola who has released his first solo mixtape called Willis. The mixtape showcases his versatility and creativity, as he explores different genres and topics in his songs. The mixtape is available for free download on various websites, and you can follow the steps in this article to get it on your device. If you like hip hop and rap music, you should definitely check out Aires Willis's mixtape and support his career.</p>
|
77 |
-
<h2>Frequently Asked Questions (FAQs)</h2>
|
78 |
-
<h3>Where can I listen to Aires Willis's mixtape online?</h3>
|
79 |
-
<p>You can listen to Aires Willis's mixtape online on platforms such as YouTube, SoundCloud and Audiomack. You can also find the links to these platforms on his Instagram page. </p>
|
80 |
-
<h3>How can I contact Aires Willis?</h3>
|
81 |
-
<p>You can contact Aires Willis through his social media accounts, such as Instagram, Twitter and Facebook. You can also send him an email at [email protected].</p>
|
82 |
-
<h3>What are some of the reviews of Aires Willis's mixtape?</h3>
|
83 |
-
<p>Aires Willis's mixtape has received positive reviews from critics and fans alike. Some of the comments are:</p>
|
84 |
-
<ul>
|
85 |
-
<li>"Aires Willis is one of the best rappers in Angola right now. His mixtape is fire!" - Rap Kuia</li>
|
86 |
-
<li>"Willis is a solid project that showcases Aires Willis's skills and potential. He has a lot of talent and charisma, and he knows how to make good music." - Hip Hop Angolano</li>
|
87 |
-
<li>"I love Aires Willis's mixtape. He has a unique style and flow, and he brings something fresh to the rap scene. He is definitely one to watch." - Rap Line</li>
|
88 |
-
</ul>
|
89 |
-
<h3>Who are some of the influences of Aires Willis?</h3>
|
90 |
-
<p>Aires Willis has cited some of his influences as rappers such as Drake, Lil Wayne, Kendrick Lamar, J Cole, NGA, Prodígio and Monsta. He also listens to other genres of music, such as R&B, pop, rock and reggae.</p>
|
91 |
-
<h3>What are some of the future plans of Aires Willis?</h3>
|
92 |
-
<p>Aires Willis has stated that he plans to continue working on his music and releasing more songs and projects. He also hopes to collaborate with more artists, both local and international, and perform live shows for his fans. He also wants to expand his fan base and reach more people with his music.</p> 197e85843d<br />
|
93 |
-
<br />
|
94 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Abbree AR-730 Programming Software How to Set Up Your Radio Easily.md
DELETED
@@ -1,175 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Abbree AR-730 Software Download: How to Program Your Multi-Band Radio</h1>
|
3 |
-
<h2>Introduction</h2>
|
4 |
-
<p>If you are looking for a budget-friendly, multi-band, handheld radio that can cover 2 meter, 1.25 meter, 70 centimeter, and air band frequencies, you might want to check out the Abbree AR-730. This radio has a lot of features and functions that make it a versatile device for amateur radio enthusiasts, hobbyists, and professionals alike. However, to get the most out of your radio, you need to program it according to your needs and preferences. In this article, we will show you how to download and install the software for programming your Abbree AR-730, how to connect your radio to your computer, and how to program your radio using the software. By following these steps, you will be able to customize your radio and enjoy its full potential.</p>
|
5 |
-
<h2>abbree ar-730 software download</h2><br /><p><b><b>DOWNLOAD</b> › <a href="https://jinyurl.com/2uNRlN">https://jinyurl.com/2uNRlN</a></b></p><br /><br />
|
6 |
-
<h3>What is the Abbree AR-730?</h3>
|
7 |
-
<p>The Abbree AR-730 is a multi-band, handheld transceiver that can operate on VHF, UHF, and air band frequencies. It has a dual display, dual standby, dual PTT, and dual receiver function that allows you to monitor two channels simultaneously. It also has a wireless copy frequency function that lets you clone another radio's settings without using a cable. It supports NOAA weather channel receive, FM radio receive, DTMF encode and decode, CTCSS/DCS encode and decode, VOX function, keypad lock, scan function, squelch level adjustment, battery save mode, and more. It has a high-capacity 2200mAh Li-ion battery that can last up to 12 hours of continuous use. It comes with a Type-C charging cable that can charge the radio faster and more conveniently. It also has a sturdy and durable design that can withstand harsh environments.</p>
|
8 |
-
<h3>Why do you need to program it?</h3>
|
9 |
-
<p>Programming your Abbree AR-730 is necessary if you want to use it for different purposes and scenarios. For example, you might want to program different memory channels for different repeaters or frequencies that you frequently use or want to access quickly. You might also want to program different settings for different functions or modes of operation, such as tone mode, power level, bandwidth, offset direction, offset frequency, etc. Programming your radio also allows you to customize it according to your personal preferences, such as display color, backlight time, beep tone, etc. Programming your radio can enhance your communication experience and make your radio more efficient and convenient.</p>
|
10 |
-
<h3>What software do you need?</h3>
|
11 |
-
<p>To program your Abbree AR-730, you need a software that can communicate with your radio and edit its configuration. There are two main sources where you can get the software: the official website of Abbree Electronic Co., Ltd., or other third-party websites that offer compatible software. The official website of Abbree Electronic Co., Ltd. is [1](https://www.abbree.cn/download/), where you can find various downloads for different models of radios, including the AR-730. The software for the AR-730 is called APS-AR730 Programming Software. You can download it for free from the website by clicking on the link under "ABBREE" category. Alternatively, you can also get the software from other sources that offer similar or compatible software for programming radios. One example is RT Systems Inc., which provides APS- AR730 Programming Software. You can download it for a fee from the website by clicking on the link under "ABBREE" category. Both software are compatible with Windows operating systems and have similar features and functions. However, the software from RT Systems Inc. might have some advantages, such as easier installation, better customer support, and more frequent updates. You can choose the software that suits your needs and preferences best.</p>
|
12 |
-
<h2>How to download and install the software</h2>
|
13 |
-
<p>Once you have decided which software to use, you need to download and install it on your computer. Here are the steps to do so:</p>
|
14 |
-
<h3>Download from the official website</h3>
|
15 |
-
<p>If you choose to use the software from the official website of Abbree Electronic Co., Ltd., follow these steps:</p>
|
16 |
-
<ol>
|
17 |
-
<li>Go to [1](https://www.abbree.cn/download/) and scroll down to find the "ABBREE" category.</li>
|
18 |
-
<li>Click on the link that says "APS-AR730 Programming Software" to download the zip file.</li>
|
19 |
-
<li>Save the zip file to your computer and extract it using a file compression software, such as WinZip or WinRAR.</li>
|
20 |
-
<li>Open the extracted folder and double-click on the file that says "APS-AR730.exe" to run the software.</li>
|
21 |
-
</ol>
|
22 |
-
<h3>Download from other sources</h3>
|
23 |
-
<p>If you choose to use the software from RT Systems Inc., follow these steps:</p>
|
24 |
-
<p>abbree ar-730 programming software<br />
|
25 |
-
abbree ar-730 usb driver download<br />
|
26 |
-
abbree ar-730 firmware update<br />
|
27 |
-
abbree ar-730 software windows 10<br />
|
28 |
-
abbree ar-730 software manual<br />
|
29 |
-
abbree ar-730 software free download<br />
|
30 |
-
abbree ar-730 software mac<br />
|
31 |
-
abbree ar-730 software linux<br />
|
32 |
-
abbree ar-730 software installation<br />
|
33 |
-
abbree ar-730 software troubleshooting<br />
|
34 |
-
abbree ar-730 software review<br />
|
35 |
-
abbree ar-730 software alternative<br />
|
36 |
-
abbree ar-730 software compatibility<br />
|
37 |
-
abbree ar-730 software features<br />
|
38 |
-
abbree ar-730 software support<br />
|
39 |
-
abbree ar-730 software license<br />
|
40 |
-
abbree ar-730 software version<br />
|
41 |
-
abbree ar-730 software requirements<br />
|
42 |
-
abbree ar-730 software tutorial<br />
|
43 |
-
abbree ar-730 software tips<br />
|
44 |
-
abbree ar-730 software guide<br />
|
45 |
-
abbree ar-730 software forum<br />
|
46 |
-
abbree ar-730 software reddit<br />
|
47 |
-
abbree ar-730 software youtube<br />
|
48 |
-
abbree ar-730 software video<br />
|
49 |
-
abbree ar-730 software demo<br />
|
50 |
-
abbree ar-730 software online<br />
|
51 |
-
abbree ar-730 software website<br />
|
52 |
-
abbree ar-730 software link<br />
|
53 |
-
abbree ar-730 software file<br />
|
54 |
-
abbree ar-730 software zip<br />
|
55 |
-
abbree ar-730 software rar<br />
|
56 |
-
abbree ar-730 software exe<br />
|
57 |
-
abbree ar-730 software pdf<br />
|
58 |
-
abbree ar-730 software csv<br />
|
59 |
-
abbree ar-730 software rtsystemsinc.com[^2^]</p>
|
60 |
-
<ol>
|
61 |
-
<li>Go to [2](https://www.rtsystemsinc.com/ABBREE-s/2146.htm) and scroll down to find the "ABBREE" category.</li>
|
62 |
-
<li>Click on the link that says "APS-AR730 Programming Software" to go to the product page.</li>
|
63 |
-
<li>Click on the "Add to Cart" button and proceed to checkout. You will need to create an account and pay for the software using a credit card or PayPal.</li>
|
64 |
-
<li>After completing your order, you will receive an email with a link to download the software. Click on the link and save the file to your computer.</li>
|
65 |
-
<li>Open the downloaded file and follow the instructions to install the software on your computer.</li>
|
66 |
-
</ol>
|
67 |
-
<h3>Install the software on your computer</h3>
|
68 |
-
<p>After downloading the software, you need to install it on your computer. The installation process may vary depending on which software you use, but generally, you need to follow these steps:</p>
|
69 |
-
<ol>
|
70 |
-
<li>Run the software as an administrator by right-clicking on it and selecting "Run as administrator".</li>
|
71 |
-
<li>Follow the instructions on the screen to complete the installation. You may need to accept some terms and conditions, choose a destination folder, and create a shortcut icon.</li>
|
72 |
-
<li>After the installation is finished, you can launch the software by double-clicking on its icon or by going to Start > All Programs > APS-AR730 (or RT Systems > APS-AR730).</li>
|
73 |
-
</ol>
|
74 |
-
<p>Congratulations! You have successfully downloaded and installed the software for programming your Abbree AR-730. Now, you are ready to connect your radio to your computer and start programming it.</p>
|
75 |
-
<h2>How to connect your radio to your computer</h2>
|
76 |
-
<p>To program your Abbree AR-730 using the software, you need to connect your radio to your computer using a cable. There are two types of cables that you can use: a USB cable or a programming cable. Here are the steps to connect your radio using either cable:</p>
|
77 |
-
<h3>Use a USB cable</h3>
|
78 |
-
<p>If you want to use a USB cable, follow these steps:</p>
|
79 |
-
<ol>
|
80 |
-
<li>Turn off your radio and remove its battery.</li>
|
81 |
-
<li>Locate the USB port on the side of your radio and plug one end of a Type-C USB cable into it.</li>
|
82 |
-
<li>Plug the other end of the USB cable into a USB port on your computer.</li>
|
83 |
-
<li>Reinstall the battery on your radio and turn it on.</li>
|
84 |
-
</ol>
|
85 |
-
<h3>Use a programming cable</h3>
|
86 |
-
<p>If you want to use a programming cable, follow these steps:</p>
|
87 |
-
<ol>
|
88 |
-
<li>Turn off your radio and remove its battery.</li>
|
89 |
-
<li>Locate the programming port on the side of your radio and plug one end of a programming cable into it. The programming cable has two connectors: one is a 3.5mm audio jack and one is a 2.5mm audio jack. Make sure you plug them into the correct holes on your radio.</li>
|
90 |
-
<li>Plug the other end of the programming cable into a USB port on your computer. The programming cable has a USB connector that converts the audio signals into digital signals that can be recognized by your computer.</li>
|
91 |
-
<li>Reinstall the battery on your radio and turn it on.</li>
|
92 |
-
</ol>
|
93 |
-
<h3>Check the COM port settings</h3>
|
94 |
-
<p>After connecting your radio to your computer using either cable, you need to check the COM port settings on your computer. The COM port is the communication port that allows your computer and your radio to exchange data. You need to make sure that the COM port number on your computer matches the COM port number on your software. Here are the steps to check the COM port settings:</p>
|
95 |
-
<ol>
|
96 |
-
<li>Go to Start > Control Panel > Device Manager (or right-click on My Computer > Properties > Device Manager).</li>
|
97 |
-
<li>Expand the "Ports (COM & LPT)" category and look for the device that corresponds to your cable. It should be labeled as "USB Serial Port (COMx)" or "Prolific USB-to-Serial Comm Port (COMx)", where x is the COM port number.</li>
|
98 |
-
<li>Note down the COM port number and close the Device Manager.</li>
|
99 |
-
<li>Open the software for programming your Abbree AR-730 and go to Settings > Port (or File > Set Com Port).</li>
|
100 |
-
<li>Select the same COM port number that you noted down from the Device Manager and click OK.</li>
|
101 |
-
</ol>
|
102 |
-
<p>Congratulations! You have successfully connected your radio to your computer and set up the COM port settings. Now, you are ready to program your radio using the software.</p>
|
103 |
-
<h2>How to program your radio using the software</h2>
|
104 |
-
<p>To program your Abbree AR-730 using the software, you need to follow three main steps: read the current configuration from the radio, edit the memory channels and other settings, and write the new configuration to the radio. Here are the steps to do so:</p>
|
105 |
-
<h3>Read the current configuration from the radio</h3>
|
106 |
-
<p>Before you start editing the configuration of your radio, you need to read the current configuration from the radio and load it into the software. This will allow you to see what settings are already programmed on your radio and avoid overwriting them by mistake. Here are the steps to read the current configuration from the radio:</p>
|
107 |
-
<ol>
|
108 |
-
<li>Turn on your radio and make sure it is connected to your computer using either cable.</li>
|
109 |
-
<li>Open the software for programming your Abbree AR-730 and go to Program > Read from Radio (or click on the "Read Data" icon).</li>
|
110 |
-
<li>Wait for a few seconds until a progress bar appears and shows that the reading process is complete.</li>
|
111 |
-
<li>You will see a message that says "Read data completed" and a window that shows the current configuration of your radio. You can click on different tabs to view different settings, such as memory channels, frequency range, tone mode, etc.</li>
|
112 |
-
</ol>
|
113 |
-
<h3>Edit the memory channels and other settings</h3>
|
114 |
-
<p>After reading the current configuration from the radio, you can start editing the memory channels and other settings according to your needs and preferences. You can add, delete, modify, or copy memory channels, as well as change other settings, such as power level, bandwidth, offset direction, offset frequency, etc. Here are some examples of how to edit the memory channels and other settings:</p>
|
115 |
-
<ul>
|
116 |
-
<li>To add a new memory channel, click on an empty row in the "Memory Channel" tab and enter the desired frequency, name, tone mode, etc. in the corresponding columns.</li>
|
117 |
-
<li>To delete a memory channel, right-click on an existing row in the "Memory Channel" tab and select "Delete".</li>
|
118 |
-
<li>To modify a memory channel, click on an existing row in the "Memory Channel" tab and change the frequency, name, tone mode, etc. in the corresponding columns.</li>
|
119 |
-
<li>To copy a memory channel, right-click on an existing row in the "Memory Channel" tab and select "Copy". Then, right-click on an empty row and select "Paste".</li>
|
120 |
-
<li>To change the power level, bandwidth, offset direction, offset frequency, etc. of a memory channel, click on the drop-down menu or the arrow button in the corresponding column and select the desired option.</li>
|
121 |
-
<li>To change other settings, such as display color, backlight time, beep tone, etc., click on the "Other" tab and select the desired option from the drop-down menu or the check box.</li>
|
122 |
-
</ul>
|
123 |
-
<p>You can edit as many memory channels and other settings as you want. You can also use the "Import" and "Export" functions to import or export data from or to a CSV file. You can also use the "Print" function to print out the configuration of your radio.</p>
|
124 |
-
<h3>Write the new configuration to the radio</h3>
|
125 |
-
<p>After editing the memory channels and other settings, you need to write the new configuration to the radio and save it. This will overwrite the previous configuration on your radio and apply the changes that you have made. Here are the steps to write the new configuration to the radio:</p>
|
126 |
-
<ol>
|
127 |
-
<li>Make sure your radio is still connected to your computer using either cable.</li>
|
128 |
-
<li>Go to Program > Write to Radio (or click on the "Write Data" icon).</li>
|
129 |
-
<li>Wait for a few seconds until a progress bar appears and shows that the writing process is complete.</li>
|
130 |
-
<li>You will see a message that says "Write data completed" and a beep sound from your radio.</li>
|
131 |
-
<li>Turn off your radio and disconnect it from your computer.</li>
|
132 |
-
</ol>
|
133 |
-
<p>Congratulations! You have successfully programmed your Abbree AR-730 using the software. Now, you can turn on your radio and test its functions and performance.</p>
|
134 |
-
<h2>Conclusion</h2>
|
135 |
-
<p>In this article, we have shown you how to download and install the software for programming your Abbree AR-730, how to connect your radio to your computer, and how to program your radio using the software. By following these steps, you will be able to customize your radio and enjoy its full potential. Programming your radio can enhance your communication experience and make your radio more efficient and convenient.</p>
|
136 |
-
<h3>Summary of the main points</h3>
|
137 |
-
<p>Here are the main points that we have covered in this article:</p>
|
138 |
-
<ul>
|
139 |
-
<li>The Abbree AR-730 is a multi-band, handheld transceiver that can operate on VHF, UHF, and air band frequencies.</li>
|
140 |
-
<li>To program your Abbree AR-730, you need a software that can communicate with your radio and edit its configuration. You can get the software from the official website of Abbree Electronic Co., Ltd. or from other sources that offer compatible software.</li>
|
141 |
-
<li>To connect your radio to your computer, you need a cable that can transfer data between them. You can use a USB cable or a programming cable.</li>
|
142 |
-
<li>To program your radio using the software, you need to read the current configuration from the radio, edit the memory channels and other settings, and write the new configuration to the radio.</li>
|
143 |
-
</ul>
|
144 |
-
<h3>Tips and tricks for better programming</h3>
|
145 |
-
<p>Here are some tips and tricks that can help you program your Abbree AR-730 better and easier:</p>
|
146 |
-
<ul>
|
147 |
-
<li>Before programming your radio, make sure you have a backup of the original configuration in case you need to restore it later. You can use the "Save" function in the software to save the configuration to a file on your computer.</li>
|
148 |
-
<li>Before writing the new configuration to the radio, make sure you have enough battery power on your radio and your computer. You don't want to interrupt the writing process and risk damaging your radio or losing data.</li>
|
149 |
-
<li>After writing the new configuration to the radio, make sure you verify that the changes have been applied correctly. You can use the "Read" function in the software to read the configuration from the radio and compare it with the configuration on your computer.</li>
|
150 |
-
<li>If you encounter any problems or errors while programming your radio, you can check the user manual or the online help for troubleshooting tips. You can also contact the customer service or technical support of Abbree Electronic Co., Ltd. or RT Systems Inc. for assistance.</li>
|
151 |
-
</ul>
|
152 |
-
<h3>FAQs</h3>
|
153 |
-
<p>Here are some frequently asked questions and answers about programming your Abbree AR-730:</p>
|
154 |
-
<ol>
|
155 |
-
<li>Q: Can I program my Abbree AR-730 without using a computer or a software?<br>
|
156 |
-
A: Yes, you can program your Abbree AR-730 manually using the keypad and the menu on the radio. However, this method is more time-consuming and less convenient than using a computer or a software. You can refer to the user manual for instructions on how to program your radio manually.</li>
|
157 |
-
<li>Q: Can I use other software or cables to program my Abbree AR-730?<br>
|
158 |
-
A: It is recommended that you use the software and cables that are compatible with your Abbree AR-730. Using other software or cables may cause compatibility issues or errors that may affect the performance or functionality of your radio.</li>
|
159 |
-
<li>Q: Can I program my Abbree AR-730 with other radios?<br>
|
160 |
-
A: Yes, you can program your Abbree AR-730 with other radios that have similar or compatible features and functions. You can use the wireless copy frequency function or the programming cable to clone another radio's settings to your Abbree AR-730. However, you should be careful not to copy settings that are not supported by your Abbree AR-730, such as frequency range, mode, etc.</li>
|
161 |
-
<li>Q: How many memory channels can I program on my Abbree AR-730?<br>
|
162 |
-
A: You can program up to 999 memory channels on your Abbree AR-730. Each memory channel can store a frequency, a name, a tone mode, and other settings.</li>
|
163 |
-
<li>Q: How can I reset my Abbree AR-730 to its factory settings?<br>
|
164 |
-
A: You can reset your Abbree AR-730 to its factory settings by doing the following steps:</p>
|
165 |
-
<ol type="a">
|
166 |
-
<li>Turn off your radio and remove its battery.</li>
|
167 |
-
<li>Press and hold the PTT button and the MONI button on the side of your radio.</li>
|
168 |
-
<li>Reinstall the battery and turn on your radio while still holding the buttons.</li>
|
169 |
-
<li>Wait for a few seconds until you hear a beep sound from your radio.</li>
|
170 |
-
<li>Release the buttons and wait for another beep sound from your radio.</li>
|
171 |
-
<li>Your radio is now reset to its factory settings.</li>
|
172 |
-
</ol></li>
|
173 |
-
</ol></p> 401be4b1e0<br />
|
174 |
-
<br />
|
175 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download FIFA Mobile Japan MOD APK - Unlimited Money All Unlocked.md
DELETED
@@ -1,125 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>FIFA Mobile Japan Mod Apk: A New Way to Enjoy Soccer on Your Phone</h1>
|
3 |
-
<p>If you are a fan of soccer games, you probably have heard of FIFA Mobile, the official mobile game of FIFA, the world's governing body of soccer. FIFA Mobile lets you build your ultimate team of soccer stars, compete in various modes, and experience the thrill of the beautiful game on your phone.</p>
|
4 |
-
<h2>fifa mobile japan mod apk</h2><br /><p><b><b>Download</b> ✑ <a href="https://jinyurl.com/2uNMU8">https://jinyurl.com/2uNMU8</a></b></p><br /><br />
|
5 |
-
<p>But did you know that there is a modded version of FIFA Mobile that is exclusive to Japan? It's called FIFA Mobile Japan mod apk, and it offers some unique features and benefits that you won't find in the original game. In this article, we will tell you what FIFA Mobile Japan mod apk is, what are its main features, how to download and install it, and some tips and tricks to make the most out of it. Let's get started!</p>
|
6 |
-
<h2>What is FIFA Mobile Japan Mod Apk?</h2>
|
7 |
-
<p>FIFA Mobile Japan mod apk is a modified version of FIFA Mobile that is developed by NEXON Co., Ltd., a Japanese gaming company. It is only available in Japan, but you can download it from third-party sources if you want to try it out.</p>
|
8 |
-
<p>FIFA Mobile Japan mod apk has some differences from the original game, such as:</p>
|
9 |
-
<p>fifa mobile japan apk download<br />
|
10 |
-
fifa mobile japan mod menu<br />
|
11 |
-
fifa mobile japan unlimited money<br />
|
12 |
-
fifa mobile japan hack apk<br />
|
13 |
-
fifa mobile japan latest version<br />
|
14 |
-
fifa mobile japan android game<br />
|
15 |
-
fifa mobile japan free download<br />
|
16 |
-
fifa mobile japan apk mod<br />
|
17 |
-
fifa mobile japan apk obb<br />
|
18 |
-
fifa mobile japan apk data<br />
|
19 |
-
fifa mobile japan apk pure<br />
|
20 |
-
fifa mobile japan mod apk 2023<br />
|
21 |
-
fifa mobile japan mod apk offline<br />
|
22 |
-
fifa mobile japan mod apk revdl<br />
|
23 |
-
fifa mobile japan mod apk rexdl<br />
|
24 |
-
fifa mobile japan mod apk happymod<br />
|
25 |
-
fifa mobile japan mod apk 5play<br />
|
26 |
-
fifa mobile japan mod apk android 1<br />
|
27 |
-
fifa mobile japan mod apk unlimited coins<br />
|
28 |
-
fifa mobile japan mod apk no root<br />
|
29 |
-
fifa mobile japan mod apk online<br />
|
30 |
-
fifa mobile japan mod apk update<br />
|
31 |
-
fifa mobile japan mod apk mirror<br />
|
32 |
-
fifa mobile japan mod apk mega<br />
|
33 |
-
fifa mobile japan mod apk mediafire<br />
|
34 |
-
fifa mobile japan mod apk 10.0.04<br />
|
35 |
-
fifa mobile japan mod apk 9.1.02<br />
|
36 |
-
fifa mobile japan mod apk 9.0.05<br />
|
37 |
-
fifa mobile japan mod apk 8.1.01<br />
|
38 |
-
fifa mobile japan mod apk 7.0.03<br />
|
39 |
-
fifa mobile japan mod apk 6.0.02<br />
|
40 |
-
fifa mobile japan mod apk 5.0.01<br />
|
41 |
-
fifa mobile japan mod apk 4.0.04<br />
|
42 |
-
fifa mobile japan mod apk 3.0.03<br />
|
43 |
-
fifa mobile japan mod apk 2.0.02<br />
|
44 |
-
fifa mobile jp mod apk download<br />
|
45 |
-
fifa soccer jp mod apk download<br />
|
46 |
-
nexon co ltd jp co nexon fmja mod apk download<br />
|
47 |
-
ea sports jp co nexon fmja hack download <br />
|
48 |
-
ea sports jp co nexon fmja cheat download</p>
|
49 |
-
<ul>
|
50 |
-
<li>It has more licensed teams, players, and leagues from Japan and Asia, such as J-League, K-League, AFC Champions League, etc.</li>
|
51 |
-
<li>It has more live events and tournaments that reflect the real-world soccer season in Japan and Asia.</li>
|
52 |
-
<li>It has more exclusive content and rewards, such as Fearless 23 players, Eternal Legend players, World Cup 2022 mode, etc.</li>
|
53 |
-
<li>It has better graphics, gameplay, and controls that are optimized for mobile devices.</li>
|
54 |
-
</ul>
|
55 |
-
<p>If you are looking for a new way to enjoy soccer on your phone, FIFA Mobile Japan mod apk might be a good option for you. You can create your own team using real clubs and players from Japan and Asia, and enjoy a variety of content such as online competitions and simulation leagues.</p>
|
56 |
-
<h2>What are the Main Features of FIFA Mobile Japan Mod Apk?</h2>
|
57 |
-
<p>FIFA Mobile Japan mod apk has many features that make it stand out from the original game. Here are some of them:</p>
|
58 |
-
<h3>World Cup 2022 Mode</h3>
|
59 |
-
<p>FIFA Mobile Japan mod apk is the only licensed FIFA World Cup 2022 mobile game where you can replay the official tournament brackets with any of the 32 qualified nations. You can also rewrite history and take control of 15 non-qualified nations that didn't make it to the World Cup. You can play in authentic World Cup stadiums (Al Bayt and Lusail), wear official World Cup kits and badges, use the official match ball, and listen to localized World Cup commentary. You can also participate in live events that correspond with the real-world tournament throughout the soccer season.</p>
|
60 |
-
<h3>Fearless 23 Event</h3>
|
61 |
-
<p>FIFA Mobile Japan mod apk has a special event called Fearless 23, where you can get players who contributed to their league or Champions League/Europa League/Europa Conference League victory in the previous season. These players have boosted stats and skills that reflect their performance in those competitions. You can also get exclusive rewards such as kits, badges, coins, gems, etc. by completing various challenges in this event.</p>
|
62 |
-
<h3>Eternal Legend Class</h3>
|
63 |
-
<p>FIFA Mobile Japan mod apk introduces a new class of players called Eternal Legend. These are legendary players who have made history in soccer, such as Zidane, Beckham, Ronaldo, Maldini, etc. You can get these players by exchanging tokens earned from live events or by buying them from the market. These players have no OVR limit and can be trained indefinitely. You can also upgrade their skills and abilities by using skill boost items. You can create your dream team of soccer legends with Eternal Legend players.</p>
|
64 |
-
<h3>Advanced Passing System</h3>
|
65 |
-
<p>FIFA Mobile Japan mod apk improves the passing system by adding new ways to pass the ball. Some of the new passing options are:</p>
|
66 |
-
<ul>
|
67 |
-
<li>Through pass: You can make a pass that goes behind the defense and reaches your teammate who is running towards the goal. This is useful for creating scoring chances and breaking the offside trap.</li>
|
68 |
-
<li>Lob pass: You can make a pass that goes over the heads of the defenders and lands near your teammate. This is useful for crossing the ball or switching the play.</li>
|
69 |
-
<li>Back pass: You can make a pass that goes back to your own half or to your goalkeeper. This is useful for keeping possession and resetting the attack.</li>
|
70 |
-
</ul>
|
71 |
-
<p>You can also control the direction, power, and curve of your passes by using gestures on the screen. You can swipe, tap, drag, or flick to make different types of passes. You can also use buttons to make quick passes or long passes. The advanced passing system gives you more freedom and creativity in your gameplay.</p>
|
72 |
-
<h2>How to Download and Install FIFA Mobile Japan Mod Apk?</h2>
|
73 |
-
<p>If you want to try FIFA Mobile Japan mod apk, you will need to download it from a third-party source, since it is not available on the official app stores. Here are the steps to download and install FIFA Mobile Japan mod apk:</p>
|
74 |
-
<ol>
|
75 |
-
<li>Go to a reliable website that offers FIFA Mobile Japan mod apk, such as [APKPure] or [APKCombo].</li>
|
76 |
-
<li>Download the FIFA Mobile Japan mod apk file and the OBB data file to your device.</li>
|
77 |
-
<li>Enable the installation of apps from unknown sources on your device settings.</li>
|
78 |
-
<li>Install the FIFA Mobile Japan mod apk file by tapping on it.</li>
|
79 |
-
<li>Extract the OBB data file to the Android/OBB folder on your device using a file manager app.</li>
|
80 |
-
<li>Launch the game and enjoy!</li>
|
81 |
-
</ol>
|
82 |
-
<p>Note: You may need to use a VPN app to change your location to Japan in order to play FIFA Mobile Japan mod apk. You may also need to update the game regularly from the same website where you downloaded it.</p>
|
83 |
-
<h2>Tips and Tricks for FIFA Mobile Japan Mod Apk</h2>
|
84 |
-
<p>To help you get started with FIFA Mobile Japan mod apk, here are some tips and tricks that you can use:</p>
|
85 |
-
<h3>Build a Balanced Team</h3>
|
86 |
-
<p>One of the most important aspects of FIFA Mobile Japan mod apk is building your ultimate team. You can choose from hundreds of clubs and players from Japan and Asia, as well as from other regions. You can also get special players from events, modes, or the market. However, you should not just focus on getting the highest-rated players, but also on creating a balanced team that suits your playstyle and formation. You should consider factors such as chemistry, skills, positions, roles, etc. when building your team.</p>
|
87 |
-
<h3>Train Your Players</h3>
|
88 |
-
<p>Another way to improve your team is by training your players. You can use training items or coins to increase the OVR (overall rating) of your players. You can also use skill boost items to enhance their skills and abilities. Training your players will make them stronger, faster, and more effective on the pitch. However, you should be careful not to overtrain your players, as this will increase their contract cost and reduce their stamina.</p>
|
89 |
-
<h3>Play Different Modes</h3>
|
90 |
-
<p>FIFA Mobile Japan mod apk offers a variety of modes that you can play, such as:</p>
|
91 |
-
<ul>
|
92 |
-
<li>Campaign: This is where you can play against different teams in various leagues and tournaments. You can earn rewards such as coins, gems, players, etc. by completing objectives and winning matches.</li>
|
93 |
-
<li>Versus: This is where you can play online against other players in real-time. You can choose from different modes such as Head-to-Head, Attack Mode, Friendly Match, etc. You can earn rewards such as fans, trophies, rank points, etc. by winning matches and climbing up the leaderboards.</li>
|
94 |
-
<li>Simulation: This is where you can create your own league or join an existing one with other players. You can simulate matches against other teams in your league or in other leagues. You can earn rewards such as coins, gems, players, etc. by winning matches and achieving goals.</li>
|
95 |
-
</ul>
|
96 |
-
<p>Playing different modes will help you improve your skills, test your strategies, and have fun with other players.</p>
|
97 |
-
<h2>Conclusion</h2>
|
98 |
-
<p>FIFA Mobile Japan mod apk is a great alternative to FIFA Mobile if you want to experience a different version of soccer on your phone. It has more licensed teams, players, and leagues from Japan and Asia, more live events and tournaments that reflect the real-world soccer season in Japan and Asia, more exclusive content and rewards, and better graphics, gameplay, and controls that are optimized for mobile devices. You can download and install FIFA Mobile Japan mod apk from third-party sources and enjoy a new way to enjoy soccer on your phone. You can also use some tips and tricks to build a balanced team, train your players, and play different modes. FIFA Mobile Japan mod apk is a fun and exciting game that will keep you entertained for hours.</p>
|
99 |
-
<h2>FAQs</h2>
|
100 |
-
<p>Here are some frequently asked questions and answers about FIFA Mobile Japan mod apk:</p>
|
101 |
-
<h3>Is FIFA Mobile Japan mod apk safe to use?</h3>
|
102 |
-
<p>FIFA Mobile Japan mod apk is generally safe to use, as long as you download it from a reliable website and scan it for viruses before installing it. However, you should be aware that using a modded version of FIFA Mobile may violate the terms of service of the original game and may result in your account being banned or suspended. You should also be careful not to share your personal or financial information with any third-party sources or apps.</p>
|
103 |
-
<h3>Can I play FIFA Mobile Japan mod apk with my friends?</h3>
|
104 |
-
<p>Yes, you can play FIFA Mobile Japan mod apk with your friends, as long as they also have the same version of the game installed on their devices. You can invite them to join your league, play friendly matches, or compete in online modes. You can also chat with them in the game and share your progress and achievements.</p>
|
105 |
-
<h3>How can I get more coins and gems in FIFA Mobile Japan mod apk?</h3>
|
106 |
-
<p>There are several ways to get more coins and gems in FIFA Mobile Japan mod apk, such as:</p>
|
107 |
-
<ul>
|
108 |
-
<li>Completing objectives and winning matches in campaign mode.</li>
|
109 |
-
<li>Participating in live events and tournaments and earning rewards.</li>
|
110 |
-
<li>Trading players and items in the market.</li>
|
111 |
-
<li>Watching ads and completing surveys.</li>
|
112 |
-
<li>Purchasing them with real money (not recommended).</li>
|
113 |
-
</ul>
|
114 |
-
<p>You should spend your coins and gems wisely on things that will improve your team and gameplay, such as training items, skill boost items, players, etc.</p>
|
115 |
-
<h3>How can I update FIFA Mobile Japan mod apk?</h3>
|
116 |
-
<p>To update FIFA Mobile Japan mod apk, you will need to download the latest version of the game from the same website where you downloaded it before. You will also need to download the latest OBB data file and extract it to the Android/OBB folder on your device. You should always backup your game data before updating to avoid losing your progress and settings.</p>
|
117 |
-
<h3>What are some alternatives to FIFA Mobile Japan mod apk?</h3>
|
118 |
-
<p>If you are looking for some alternatives to FIFA Mobile Japan mod apk, you can try these games:</p>
|
119 |
-
<ul>
|
120 |
-
<li>PES 2022: This is another popular soccer game that has realistic graphics, gameplay, and physics. It also has licensed teams, players, and leagues from around the world. You can play online or offline modes, such as Master League, MyClub, Matchday, etc.</li>
|
121 |
-
<li>Dream League Soccer 2022: This is a soccer game that lets you create your own club and compete in various leagues and tournaments. You can customize your team name, logo, kit, stadium, etc. You can also recruit players from a huge database of real players.</li>
|
122 |
-
<li>Soccer Manager 2022: This is a soccer game that focuses on the managerial aspect of soccer. You can choose from over 800 clubs from 33 countries and manage every aspect of your club, such as transfers, tactics, finances, etc. You can also compete with other managers online or offline.</li>
|
123 |
-
</ul></p> 401be4b1e0<br />
|
124 |
-
<br />
|
125 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2023Liu2023/bingo/src/components/chat-suggestions.tsx
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
import React, { useMemo } from 'react'
|
2 |
-
import Image from 'next/image'
|
3 |
-
import HelpIcon from '@/assets/images/help.svg'
|
4 |
-
import { SuggestedResponse } from '@/lib/bots/bing/types'
|
5 |
-
import { useBing } from '@/lib/hooks/use-bing'
|
6 |
-
import { atom, useAtom } from 'jotai'
|
7 |
-
|
8 |
-
type Suggestions = SuggestedResponse[]
|
9 |
-
const helpSuggestions = ['为什么不回应某些主题', '告诉我更多关于必应的资迅', '必应如何使用 AI?'].map((text) => ({ text }))
|
10 |
-
const suggestionsAtom = atom<Suggestions>([])
|
11 |
-
|
12 |
-
type ChatSuggestionsProps = React.ComponentProps<'div'> & Pick<ReturnType<typeof useBing>, 'setInput'> & { suggestions?: Suggestions }
|
13 |
-
|
14 |
-
export function ChatSuggestions({ setInput, suggestions = [] }: ChatSuggestionsProps) {
|
15 |
-
const [currentSuggestions, setSuggestions] = useAtom(suggestionsAtom)
|
16 |
-
const toggleSuggestions = (() => {
|
17 |
-
if (currentSuggestions === helpSuggestions) {
|
18 |
-
setSuggestions(suggestions)
|
19 |
-
} else {
|
20 |
-
setSuggestions(helpSuggestions)
|
21 |
-
}
|
22 |
-
})
|
23 |
-
|
24 |
-
useMemo(() => {
|
25 |
-
setSuggestions(suggestions)
|
26 |
-
window.scrollBy(0, 2000)
|
27 |
-
}, [suggestions.length])
|
28 |
-
|
29 |
-
return currentSuggestions?.length ? (
|
30 |
-
<div className="py-6">
|
31 |
-
<div className="suggestion-items">
|
32 |
-
<button className="rai-button" type="button" aria-label="这是什么?" onClick={toggleSuggestions}>
|
33 |
-
<Image alt="help" src={HelpIcon} width={24} />
|
34 |
-
</button>
|
35 |
-
{
|
36 |
-
currentSuggestions.map(suggestion => (
|
37 |
-
<button key={suggestion.text} className="body-1-strong suggestion-container" type="button" onClick={() => setInput(suggestion.text)}>
|
38 |
-
{suggestion.text}
|
39 |
-
</button>
|
40 |
-
))
|
41 |
-
}
|
42 |
-
</div>
|
43 |
-
</div>
|
44 |
-
) : null
|
45 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/vocoder/parallel_wavegan/utils/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
from .utils import * # NOQA
|
|
|
|
spaces/AIWaves/Debate/src/agents/Prompt/base_Prompts.py
DELETED
@@ -1,83 +0,0 @@
|
|
1 |
-
|
2 |
-
# SOP========================================================================================================
|
3 |
-
# "environment_prompt"
|
4 |
-
# current_state , self(sop)
|
5 |
-
Get_environment_prompt = "f\"The current scenario is as follows <environment> {self.current_state.environment_prompt} </environment>\""
|
6 |
-
|
7 |
-
|
8 |
-
# sop.transit
|
9 |
-
#================================================================
|
10 |
-
Transit_system_prompt = "f\"{environment_prompt};{judge_system_prompt}\""
|
11 |
-
|
12 |
-
# transit chat message
|
13 |
-
# "environment_prompt" is get from "Get_environment_prompt" ; "chat_history_message" if from Memory
|
14 |
-
Transit_message = "f\"{environment_summary};The chat history is as follows:\\n<chat> {chat_history_message}\\n</chat>;You especially need to pay attention to the last query<query>\\n{query}\\n</query> and the relevant conversation <relevant>\\n{relevant_history} \\n</relevant>\\n\""
|
15 |
-
|
16 |
-
|
17 |
-
Transit_last_prompt = "f\"{judge_last_prompt}\""
|
18 |
-
#sop.transit================================================================
|
19 |
-
|
20 |
-
# sop.call
|
21 |
-
#================================================================
|
22 |
-
# help controller to determine the next role to speak.(the {} is agent role) call_prompt + allocate_component
|
23 |
-
Allocate_component = "f\"If it's currently supposed to be speaking for {role}, then output <end>{role}</end>.\\n\""
|
24 |
-
|
25 |
-
# environment_prompt is get from "Get_environment_prompt" ; "chat_history_message" if from Memory
|
26 |
-
Call_system_prompt = "f\"{environment_prompt};{call_system_prompt};{allocate_prompt}\""
|
27 |
-
|
28 |
-
#
|
29 |
-
Call_last_prompt = "f\"You especially need to pay attention to the last query<query>\\n{query}\\n</query> and the relevant conversation <relevant>\\n{relevant_history} \\n</relevant>\\n;Now please choose the person to speak according to the following rules :{allocate_prompt};Note: The person whose turn it is now cannot be the same as the person who spoke last time, so {last_name} cannot be output\\n.\""
|
30 |
-
|
31 |
-
Call_message = "f\"The chat history is as follows:\\n<history>\\n{chat_history_message}</history>\\n;The last person to speak is: {last_name}\\n. \""
|
32 |
-
#sop.call================================================================
|
33 |
-
# SOP========================================================================================================
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
# Memory========================================================================================================
|
41 |
-
Single_message = "f\"{name} said that :{content}\""
|
42 |
-
|
43 |
-
Chat_total_message = "f\"{chat_history}\""
|
44 |
-
# Memory========================================================================================================
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
# Environment========================================================================================================
|
52 |
-
Default_environment_summary_system_prompt = "\"\\nYour task is to summarize the historical dialogue records according to the current scene, and summarize the most important information\""
|
53 |
-
|
54 |
-
Default_environment_summary_last_prompt = "\"Please make a summary based on the historical chat records, the output format is history summary: \{your summary content\} \""
|
55 |
-
|
56 |
-
Environment_summary_memory = "f\"The information you need to know is as follows:\\n</information>\\n\
|
57 |
-
The summary of the previous dialogue history is:<summary>\\n{summary}\\n.</summary>\
|
58 |
-
The latest conversation record is as follows:\\n<hisroty> {chat_history}\\n</history>,\
|
59 |
-
the relevant chat history you may need is:<relevant>{relevant_history}</relevant>\""
|
60 |
-
|
61 |
-
Environment_summary_system_prompt = "f\"{environment_prompt};{current_memory};{summary_system_prompt};\""
|
62 |
-
|
63 |
-
|
64 |
-
# observe
|
65 |
-
Agent_observe_relevant_memory = "f\"The relevant chat history are as follows:\\n<relevant_history>{relevant_memory} </relevant_history>\\n\""
|
66 |
-
|
67 |
-
|
68 |
-
Agent_observe_memory = "f\"Here's what you need to know(Remember, this is just information, Try not to repeat what's inside):\\n<information>\\n{relevant_memory};\
|
69 |
-
The previous summary of chat history is as follows :<summary>\\n{agent.short_term_memory}\\n</summary>.\
|
70 |
-
The new chat history is as follows:\\n<history> {conversations}\\n</history>\\n\
|
71 |
-
</information>\""
|
72 |
-
# Environment========================================================================================================
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
# Agent========================================================================================================
|
78 |
-
Agent_summary_system_prompt = "f\"{summary_prompt};Please summarize past key summary \\n<summary>\\n {self.short_term_memory} </summary>and new chat_history as follows: <history>\\n{conversations}</history>\""
|
79 |
-
|
80 |
-
Agent_last_prompt = "f\"{last_prompt};\\nPlease continue the talk based on your known information,Make an effort to make the conversation more coherent and try to respond differently from your existing knowledge, avoiding repeating what others have said.\""
|
81 |
-
|
82 |
-
Agent_system_prompt = "f\"{system_prompt},\""
|
83 |
-
# Agent========================================================================================================
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIZeroToHero/05-RealtimeStreamlitASR/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: 05 RealtimeStreamlitASR
|
3 |
-
emoji: 🔥
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: pink
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.10.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abdullah-Habib/Text_to_Speech_Urdu/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Text To Speech Urdu
|
3 |
-
emoji: 🐨
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.37.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/click/Click.d.ts
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import Click from '../../../plugins/button';
|
2 |
-
export default Click;
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/PreLayout.js
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
import PreLayoutBase from '../basesizer/PreLayout.js';
|
2 |
-
import ResizeGameObject from '../../../plugins/utils/size/ResizeGameObject.js';
|
3 |
-
|
4 |
-
var PreLayout = function () {
|
5 |
-
// Resize child to 1x1 for ratio-fit
|
6 |
-
this.hasRatioFitChild = false;
|
7 |
-
var children = this.sizerChildren;
|
8 |
-
for (var i = 0, cnt = children.length; i < cnt; i++) {
|
9 |
-
var child = children[i];
|
10 |
-
if (child.rexSizer.hidden) {
|
11 |
-
continue;
|
12 |
-
}
|
13 |
-
if (!child.rexSizer.fitRatio) {
|
14 |
-
continue;
|
15 |
-
}
|
16 |
-
|
17 |
-
ResizeGameObject(child, 1, 1);
|
18 |
-
this.hasRatioFitChild = true;
|
19 |
-
}
|
20 |
-
|
21 |
-
this._childrenProportion = undefined;
|
22 |
-
this.proportionLength = undefined;
|
23 |
-
PreLayoutBase.call(this);
|
24 |
-
return this;
|
25 |
-
}
|
26 |
-
export default PreLayout;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Akmyradov/TurkmenTTSweSTT/uroman/lib/NLP/stringDistance.pm
DELETED
@@ -1,724 +0,0 @@
|
|
1 |
-
################################################################
|
2 |
-
# #
|
3 |
-
# stringDistance #
|
4 |
-
# #
|
5 |
-
################################################################
|
6 |
-
|
7 |
-
package NLP::stringDistance;
|
8 |
-
|
9 |
-
use List::Util qw(min max);
|
10 |
-
$utf8 = NLP::UTF8;
|
11 |
-
$util = NLP::utilities;
|
12 |
-
$romanizer = NLP::Romanizer;
|
13 |
-
|
14 |
-
%dummy_ht = ();
|
15 |
-
|
16 |
-
sub rule_string_expansion {
|
17 |
-
local($this, *ht, $s, $lang_code) = @_;
|
18 |
-
|
19 |
-
my @characters = $utf8->split_into_utf8_characters($s, "return only chars, return trailing whitespaces", *dummy_ht);
|
20 |
-
foreach $sub_len ((0 .. ($#characters-1))) {
|
21 |
-
my $sub = join("", @characters[0 .. $sub_len]);
|
22 |
-
foreach $super_len ((($sub_len + 1) .. $#characters)) {
|
23 |
-
my $super = join("", @characters[0 .. $super_len]);
|
24 |
-
# print STDERR " $sub -> $super\n" unless $ht{RULE_STRING_EXPANSION}->{$lang_code}->{$sub}->{$super};
|
25 |
-
$ht{RULE_STRING_EXPANSION}->{$lang_code}->{$sub}->{$super} = 1;
|
26 |
-
$ht{RULE_STRING_HAS_EXPANSION}->{$lang_code}->{$sub} = 1;
|
27 |
-
# print STDERR " RULE_STRING_HAS_EXPANSION $lang_code $sub\n";
|
28 |
-
}
|
29 |
-
}
|
30 |
-
}
|
31 |
-
|
32 |
-
sub load_string_distance_data {
|
33 |
-
local($this, $filename, *ht, $verbose) = @_;
|
34 |
-
|
35 |
-
$verbose = 0 unless defined($verbose);
|
36 |
-
open(IN,$filename) || die "Could not open $filename";
|
37 |
-
my $line_number = 0;
|
38 |
-
my $n_cost_rules = 0;
|
39 |
-
while (<IN>) {
|
40 |
-
$line_number++;
|
41 |
-
my $line = $_;
|
42 |
-
$line =~ s/^\xEF\xBB\xBF//;
|
43 |
-
$line =~ s/\s*$//;
|
44 |
-
next if $line =~ /^\s*(\#.*)?$/;
|
45 |
-
print STDERR "** Warning: line $line_number contains suspicious control character: $line\n" if $line =~ /[\x00-\x1F]/;
|
46 |
-
my $s1 = $util->slot_value_in_double_colon_del_list($line, "s1");
|
47 |
-
my $s2 = $util->slot_value_in_double_colon_del_list($line, "s2");
|
48 |
-
$s1 = $util->dequote_string($s1); # 'can\'t' => can't
|
49 |
-
$s2 = $util->dequote_string($s2);
|
50 |
-
my $cost = $util->slot_value_in_double_colon_del_list($line, "cost");
|
51 |
-
if (($s1 eq "") && ($s2 eq "")) {
|
52 |
-
print STDERR "Ignoring bad line $line_number in $filename, because both s1 and s2 are empty strings\n";
|
53 |
-
next;
|
54 |
-
}
|
55 |
-
unless ($cost =~ /^\d+(\.\d+)?$/) {
|
56 |
-
if ($cost eq "") {
|
57 |
-
print STDERR "Ignoring bad line $line_number in $filename, because of missing cost\n";
|
58 |
-
} else {
|
59 |
-
print STDERR "Ignoring bad line $line_number in $filename, because of ill-formed cost $cost\n";
|
60 |
-
}
|
61 |
-
next;
|
62 |
-
}
|
63 |
-
my $lang_code1_s = $util->slot_value_in_double_colon_del_list($line, "lc1");
|
64 |
-
my $lang_code2_s = $util->slot_value_in_double_colon_del_list($line, "lc2");
|
65 |
-
my @lang_codes_1 = ($lang_code1_s eq "") ? ("") : split(/,\s*/, $lang_code1_s);
|
66 |
-
my @lang_codes_2 = ($lang_code2_s eq "") ? ("") : split(/,\s*/, $lang_code2_s);
|
67 |
-
my $left_context1 = $util->slot_value_in_double_colon_del_list($line, "left1");
|
68 |
-
my $left_context2 = $util->slot_value_in_double_colon_del_list($line, "left2");
|
69 |
-
my $right_context1 = $util->slot_value_in_double_colon_del_list($line, "right1");
|
70 |
-
my $right_context2 = $util->slot_value_in_double_colon_del_list($line, "right2");
|
71 |
-
my $bad_left = $util->slot_value_in_double_colon_del_list($line, "left");
|
72 |
-
if ($bad_left) {
|
73 |
-
print STDERR "** Warning: slot '::left $bad_left' in line $line_number\n";
|
74 |
-
next;
|
75 |
-
}
|
76 |
-
my $bad_right = $util->slot_value_in_double_colon_del_list($line, "right");
|
77 |
-
if ($bad_right) {
|
78 |
-
print STDERR "** Warning: slot '::right $bad_right' in line $line_number\n";
|
79 |
-
next;
|
80 |
-
}
|
81 |
-
my $in_lang_codes1 = $util->slot_value_in_double_colon_del_list($line, "in-lc1");
|
82 |
-
my $in_lang_codes2 = $util->slot_value_in_double_colon_del_list($line, "in-lc2");
|
83 |
-
my $out_lang_codes1 = $util->slot_value_in_double_colon_del_list($line, "out-lc1");
|
84 |
-
my $out_lang_codes2 = $util->slot_value_in_double_colon_del_list($line, "out-lc2");
|
85 |
-
if ($left_context1) {
|
86 |
-
if ($left_context1 =~ /^\/.*\/$/) {
|
87 |
-
$left_context1 =~ s/^\///;
|
88 |
-
$left_context1 =~ s/\/$//;
|
89 |
-
} else {
|
90 |
-
print STDERR "Ignoring unrecognized non-regular-express ::left1 $left_context1 in $line_number of $filename\n";
|
91 |
-
$left_context1 = "";
|
92 |
-
}
|
93 |
-
}
|
94 |
-
if ($left_context2) {
|
95 |
-
if ($left_context2 =~ /^\/.*\/$/) {
|
96 |
-
$left_context2 =~ s/^\///;
|
97 |
-
$left_context2 =~ s/\/$//;
|
98 |
-
} else {
|
99 |
-
$left_context2 = "";
|
100 |
-
print STDERR "Ignoring unrecognized non-regular-express ::left2 $left_context2 in $line_number of $filename\n";
|
101 |
-
}
|
102 |
-
}
|
103 |
-
if ($right_context1) {
|
104 |
-
unless ($right_context1 =~ /^(\[[^\[\]]*\])+$/) {
|
105 |
-
$right_context1 = "";
|
106 |
-
print STDERR "Ignoring unrecognized right-context ::right1 $right_context1 in $line_number of $filename\n";
|
107 |
-
}
|
108 |
-
}
|
109 |
-
if ($right_context2) {
|
110 |
-
unless ($right_context2 =~ /^(\[[^\[\]]*\])+$/) {
|
111 |
-
$right_context2 = "";
|
112 |
-
print STDERR "Ignoring unrecognized right-context ::right2 $right_context2 in $line_number of $filename\n";
|
113 |
-
}
|
114 |
-
}
|
115 |
-
foreach $lang_code1 (@lang_codes_1) {
|
116 |
-
foreach $lang_code2 (@lang_codes_2) {
|
117 |
-
$n_cost_rules++;
|
118 |
-
my $cost_rule_id = $n_cost_rules;
|
119 |
-
$ht{COST}->{$lang_code1}->{$lang_code2}->{$s1}->{$s2}->{$cost_rule_id} = $cost;
|
120 |
-
$ht{RULE_STRING}->{$lang_code1}->{$s1} = 1;
|
121 |
-
$ht{RULE_STRING}->{$lang_code2}->{$s2} = 1;
|
122 |
-
$ht{LEFT1}->{$cost_rule_id} = $left_context1;
|
123 |
-
$ht{LEFT2}->{$cost_rule_id} = $left_context2;
|
124 |
-
$ht{RIGHT1}->{$cost_rule_id} = $right_context1;
|
125 |
-
$ht{RIGHT2}->{$cost_rule_id} = $right_context2;
|
126 |
-
$ht{INLC1}->{$cost_rule_id} = $in_lang_codes1;
|
127 |
-
$ht{INLC2}->{$cost_rule_id} = $in_lang_codes2;
|
128 |
-
$ht{OUTLC1}->{$cost_rule_id} = $out_lang_codes1;
|
129 |
-
$ht{OUTLC2}->{$cost_rule_id} = $out_lang_codes2;
|
130 |
-
unless (($s1 eq $s2)
|
131 |
-
&& ($lang_code1 eq $lang_code2)
|
132 |
-
&& ($left_context1 eq $left_context2)
|
133 |
-
&& ($right_context1 eq $right_context2)
|
134 |
-
&& ($in_lang_codes1 eq $in_lang_codes2)
|
135 |
-
&& ($out_lang_codes1 eq $out_lang_codes2)) {
|
136 |
-
$n_cost_rules++;
|
137 |
-
$cost_rule_id = $n_cost_rules;
|
138 |
-
$ht{COST}->{$lang_code2}->{$lang_code1}->{$s2}->{$s1}->{$cost_rule_id} = $cost;
|
139 |
-
$ht{LEFT1}->{$cost_rule_id} = $left_context2;
|
140 |
-
$ht{LEFT2}->{$cost_rule_id} = $left_context1;
|
141 |
-
$ht{RIGHT1}->{$cost_rule_id} = $right_context2;
|
142 |
-
$ht{RIGHT2}->{$cost_rule_id} = $right_context1;
|
143 |
-
$ht{INLC1}->{$cost_rule_id} = $in_lang_codes2;
|
144 |
-
$ht{INLC2}->{$cost_rule_id} = $in_lang_codes1;
|
145 |
-
$ht{OUTLC1}->{$cost_rule_id} = $out_lang_codes2;
|
146 |
-
$ht{OUTLC2}->{$cost_rule_id} = $out_lang_codes1;
|
147 |
-
# print STDERR " Flip rule in line $line: $line\n";
|
148 |
-
}
|
149 |
-
$this->rule_string_expansion(*ht, $s1, $lang_code1);
|
150 |
-
$this->rule_string_expansion(*ht, $s2, $lang_code2);
|
151 |
-
}
|
152 |
-
}
|
153 |
-
}
|
154 |
-
close(IN);
|
155 |
-
print STDERR "Read in $n_cost_rules rules from $line_number lines in $filename\n" if $verbose;
|
156 |
-
}
|
157 |
-
|
158 |
-
sub romanized_string_to_simple_chart {
|
159 |
-
local($this, $s, *chart_ht) = @_;
|
160 |
-
|
161 |
-
my @characters = $utf8->split_into_utf8_characters($s, "return only chars, return trailing whitespaces", *dummy_ht);
|
162 |
-
$chart_ht{N_CHARS} = $#characters + 1;
|
163 |
-
$chart_ht{N_NODES} = 0;
|
164 |
-
foreach $i ((0 .. $#characters)) {
|
165 |
-
$romanizer->add_node($characters[$i], $i, ($i+1), *chart_ht, "", "");
|
166 |
-
}
|
167 |
-
}
|
168 |
-
|
169 |
-
sub linearize_chart_points {
|
170 |
-
local($this, *chart_ht, $chart_id, *sd_ht, $verbose) = @_;
|
171 |
-
|
172 |
-
$verbose = 0 unless defined($verbose);
|
173 |
-
print STDERR "Linearize $chart_id\n" if $verbose;
|
174 |
-
my $current_chart_pos = 0;
|
175 |
-
my $current_linear_chart_pos = 0;
|
176 |
-
$sd_ht{POS2LINPOS}->{$chart_id}->{$current_chart_pos} = $current_linear_chart_pos;
|
177 |
-
$sd_ht{LINPOS2POS}->{$chart_id}->{$current_linear_chart_pos} = $current_chart_pos;
|
178 |
-
print STDERR " LINPOS2POS.$chart_id LIN: $current_linear_chart_pos POS: $current_chart_pos\n" if $verbose;
|
179 |
-
my @end_chart_positions = keys %{$chart_ht{NODES_ENDING_AT}};
|
180 |
-
my $end_chart_pos = (@end_chart_positions) ? max(@end_chart_positions) : 0;
|
181 |
-
$sd_ht{MAXPOS}->{$chart_id} = $end_chart_pos;
|
182 |
-
print STDERR " Chart span: $current_chart_pos-$end_chart_pos\n" if $verbose;
|
183 |
-
while ($current_chart_pos < $end_chart_pos) {
|
184 |
-
my @node_ids = keys %{$chart_ht{NODES_STARTING_AT}->{$current_chart_pos}};
|
185 |
-
foreach $node_id (@node_ids) {
|
186 |
-
my $roman_s = $chart_ht{NODE_ROMAN}->{$node_id};
|
187 |
-
my @roman_chars = $utf8->split_into_utf8_characters($roman_s, "return only chars, return trailing whitespaces", *dummy_ht);
|
188 |
-
print STDERR " $current_chart_pos/$current_linear_chart_pos node: $node_id $roman_s (@roman_chars)\n" if $verbose;
|
189 |
-
if ($#roman_chars >= 1) {
|
190 |
-
foreach $i ((1 .. $#roman_chars)) {
|
191 |
-
$current_linear_chart_pos++;
|
192 |
-
$sd_ht{SPLITPOS2LINPOS}->{$chart_id}->{$current_chart_pos}->{$node_id}->{$i} = $current_linear_chart_pos;
|
193 |
-
$sd_ht{LINPOS2SPLITPOS}->{$chart_id}->{$current_linear_chart_pos}->{$current_chart_pos}->{$node_id}->{$i} = 1;
|
194 |
-
print STDERR " LINPOS2SPLITPOS.$chart_id LIN: $current_linear_chart_pos POS: $current_chart_pos NODE: $node_id I: $i\n" if $verbose;
|
195 |
-
}
|
196 |
-
}
|
197 |
-
}
|
198 |
-
$current_chart_pos++;
|
199 |
-
if ($util->member($current_chart_pos, @end_chart_positions)) {
|
200 |
-
$current_linear_chart_pos++;
|
201 |
-
$sd_ht{POS2LINPOS}->{$chart_id}->{$current_chart_pos} = $current_linear_chart_pos;
|
202 |
-
$sd_ht{LINPOS2POS}->{$chart_id}->{$current_linear_chart_pos} = $current_chart_pos;
|
203 |
-
print STDERR " LINPOS2POS.$chart_id LIN: $current_linear_chart_pos POS: $current_chart_pos\n" if $verbose;
|
204 |
-
}
|
205 |
-
}
|
206 |
-
$current_chart_pos = 0;
|
207 |
-
while ($current_chart_pos <= $end_chart_pos) {
|
208 |
-
my $current_linear_chart_pos = $sd_ht{POS2LINPOS}->{$chart_id}->{$current_chart_pos};
|
209 |
-
$current_linear_chart_pos = "?" unless defined($current_linear_chart_pos);
|
210 |
-
my @node_ids = keys %{$chart_ht{NODES_STARTING_AT}->{$current_chart_pos}};
|
211 |
-
# print STDERR " LINROM.$chart_id LIN: $current_linear_chart_pos POS: $current_chart_pos NODES: @node_ids\n" if $verbose;
|
212 |
-
foreach $node_id (@node_ids) {
|
213 |
-
my $end_pos = $chart_ht{NODE_END}->{$node_id};
|
214 |
-
my $end_linpos = $sd_ht{POS2LINPOS}->{$chart_id}->{$end_pos};
|
215 |
-
my $roman_s = $chart_ht{NODE_ROMAN}->{$node_id};
|
216 |
-
my @roman_chars = $utf8->split_into_utf8_characters($roman_s, "return only chars, return trailing whitespaces", *dummy_ht);
|
217 |
-
print STDERR " LINROM.$chart_id LIN: $current_linear_chart_pos POS: $current_chart_pos NODE: $node_id CHARS: @roman_chars\n" if $verbose;
|
218 |
-
if (@roman_chars) {
|
219 |
-
foreach $i ((0 .. $#roman_chars)) {
|
220 |
-
my $from_linear_chart_pos
|
221 |
-
= (($i == 0)
|
222 |
-
? $sd_ht{POS2LINPOS}->{$chart_id}->{$current_chart_pos}
|
223 |
-
: $sd_ht{SPLITPOS2LINPOS}->{$chart_id}->{$current_chart_pos}->{$node_id}->{$i});
|
224 |
-
print STDERR " FROM.$chart_id I: $i POS: $current_chart_pos NODE: $node_id FROM: $from_linear_chart_pos\n" if $verbose;
|
225 |
-
my $to_linear_chart_pos
|
226 |
-
= (($i == $#roman_chars)
|
227 |
-
? $end_linpos
|
228 |
-
: $sd_ht{SPLITPOS2LINPOS}->{$chart_id}->{$current_chart_pos}->{$node_id}->{($i+1)});
|
229 |
-
print STDERR " TO.$chart_id I: $i POS: $current_chart_pos NODE: $node_id FROM: $to_linear_chart_pos\n" if $verbose;
|
230 |
-
my $roman_char = $roman_chars[$i];
|
231 |
-
$sd_ht{LIN_IJ_ROMAN}->{$chart_id}->{$from_linear_chart_pos}->{$to_linear_chart_pos}->{$roman_char} = 1;
|
232 |
-
}
|
233 |
-
} else {
|
234 |
-
my $from_linear_chart_pos = $sd_ht{POS2LINPOS}->{$chart_id}->{$current_chart_pos};
|
235 |
-
my $to_linear_chart_pos = $sd_ht{POS2LINPOS}->{$chart_id}->{($current_chart_pos+1)};
|
236 |
-
# HHERE check this out
|
237 |
-
my $i = 1;
|
238 |
-
while (! (defined($to_linear_chart_pos))) {
|
239 |
-
$i++;
|
240 |
-
$to_linear_chart_pos = $sd_ht{POS2LINPOS}->{$chart_id}->{($current_chart_pos+$i)};
|
241 |
-
}
|
242 |
-
if (defined($from_linear_chart_pos) && defined($to_linear_chart_pos)) {
|
243 |
-
$sd_ht{LIN_IJ_ROMAN}->{$chart_id}->{$from_linear_chart_pos}->{$to_linear_chart_pos}->{""} = 1
|
244 |
-
} else {
|
245 |
-
print STDERR " UNDEF.$chart_id from: "
|
246 |
-
. ((defined($from_linear_chart_pos)) ? $from_linear_chart_pos : "?")
|
247 |
-
. " to: "
|
248 |
-
. ((defined($to_linear_chart_pos)) ? $to_linear_chart_pos : "?")
|
249 |
-
. "\n";
|
250 |
-
}
|
251 |
-
}
|
252 |
-
}
|
253 |
-
$current_chart_pos++;
|
254 |
-
}
|
255 |
-
$sd_ht{MAXLINPOS}->{$chart_id} = $sd_ht{POS2LINPOS}->{$chart_id}->{$end_chart_pos};
|
256 |
-
}
|
257 |
-
|
258 |
-
sub expand_lin_ij_roman {
|
259 |
-
local($this, *sd_ht, $chart_id, $lang_code, *ht) = @_;
|
260 |
-
|
261 |
-
foreach $start (sort { $a <=> $b } keys %{$sd_ht{LIN_IJ_ROMAN}->{$chart_id}}) {
|
262 |
-
foreach $end (sort { $a <=> $b } keys %{$sd_ht{LIN_IJ_ROMAN}->{$chart_id}->{$start}}) {
|
263 |
-
foreach $roman (sort keys %{$sd_ht{LIN_IJ_ROMAN}->{$chart_id}->{$start}->{$end}}) {
|
264 |
-
if ($ht{RULE_STRING_HAS_EXPANSION}->{$lang_code}->{$roman}
|
265 |
-
|| $ht{RULE_STRING_HAS_EXPANSION}->{""}->{$roman}) {
|
266 |
-
$this->expand_lin_ij_roman_rec(*sd_ht, $chart_id, $start, $end, $roman, $lang_code, *ht);
|
267 |
-
}
|
268 |
-
}
|
269 |
-
}
|
270 |
-
}
|
271 |
-
}
|
272 |
-
|
273 |
-
sub expand_lin_ij_roman_rec {
|
274 |
-
local($this, *sd_ht, $chart_id, $start, $end, $roman, $lang_code, *ht) = @_;
|
275 |
-
|
276 |
-
# print STDERR " expand_lin_ij_roman_rec.$chart_id $start-$end $lang_code $roman\n";
|
277 |
-
return unless $ht{RULE_STRING_HAS_EXPANSION}->{$lang_code}->{$roman}
|
278 |
-
|| $ht{RULE_STRING_HAS_EXPANSION}->{""}->{$roman};
|
279 |
-
foreach $new_end (keys %{$sd_ht{LIN_IJ_ROMAN}->{$chart_id}->{$end}}) {
|
280 |
-
foreach $next_roman (sort keys %{$sd_ht{LIN_IJ_ROMAN}->{$chart_id}->{$end}->{$new_end}}) {
|
281 |
-
my $exp_roman = join("", $roman, $next_roman);
|
282 |
-
if ($ht{RULE_STRING}->{$lang_code}->{$exp_roman}
|
283 |
-
|| $ht{RULE_STRING}->{""}->{$exp_roman}) {
|
284 |
-
$sd_ht{LIN_IJ_ROMAN}->{$chart_id}->{$start}->{$new_end}->{$exp_roman} = 1;
|
285 |
-
# print STDERR " Expansion ($start-$new_end) $exp_roman\n";
|
286 |
-
}
|
287 |
-
if ($ht{RULE_STRING_HAS_EXPANSION}->{$lang_code}->{$exp_roman}
|
288 |
-
|| $ht{RULE_STRING_HAS_EXPANSION}->{""}->{$exp_roman}) {
|
289 |
-
$this->expand_lin_ij_roman_rec(*sd_ht, $chart_id, $start, $new_end, $exp_roman, $lang_code, *ht);
|
290 |
-
}
|
291 |
-
}
|
292 |
-
}
|
293 |
-
}
|
294 |
-
|
295 |
-
sub trace_string_distance {
|
296 |
-
local($this, *sd_ht, $chart1_id, $chart2_id, $control, $line_number, $cost) = @_;
|
297 |
-
|
298 |
-
my $chart_comb_id = join("/", $chart1_id, $chart2_id);
|
299 |
-
return "mismatch" if $sd_ht{MISMATCH}->{$chart_comb_id};
|
300 |
-
my $chart1_end = $sd_ht{MAXLINPOS}->{$chart1_id};
|
301 |
-
my $chart2_end = $sd_ht{MAXLINPOS}->{$chart2_id};
|
302 |
-
my $verbose = ($control =~ /verbose/);
|
303 |
-
my $chunks_p = ($control =~ /chunks/);
|
304 |
-
my @traces = ();
|
305 |
-
my @s1_s = ();
|
306 |
-
my @s2_s = ();
|
307 |
-
my @e1_s = ();
|
308 |
-
my @e2_s = ();
|
309 |
-
my @r1_s = ();
|
310 |
-
my @r2_s = ();
|
311 |
-
my @ic_s = ();
|
312 |
-
|
313 |
-
# print STDERR "trace_string_distance $chart1_id $chart2_id $line_number\n";
|
314 |
-
while ($chart1_end || $chart2_end) {
|
315 |
-
my $incr_cost = $sd_ht{INCR_COST_IJ}->{$chart_comb_id}->{$chart1_end}->{$chart2_end};
|
316 |
-
my $prec_i = $sd_ht{PREC_I}->{$chart_comb_id}->{$chart1_end}->{$chart2_end};
|
317 |
-
my $prec_j = $sd_ht{PREC_J}->{$chart_comb_id}->{$chart1_end}->{$chart2_end};
|
318 |
-
if ($incr_cost || $verbose || $chunks_p) {
|
319 |
-
my $roman1 = $sd_ht{ROMAN1}->{$chart_comb_id}->{$chart1_end}->{$chart2_end};
|
320 |
-
my $roman2 = $sd_ht{ROMAN2}->{$chart_comb_id}->{$chart1_end}->{$chart2_end};
|
321 |
-
if ($verbose) {
|
322 |
-
push(@traces, "$prec_i-$chart1_end/$prec_j-$chart2_end:$roman1/$roman2:$incr_cost");
|
323 |
-
} else {
|
324 |
-
if (defined($roman1)) {
|
325 |
-
push(@traces, "$roman1/$roman2:$incr_cost");
|
326 |
-
} else {
|
327 |
-
$print_prec_i = (defined($prec_i)) ? $prec_i : "?";
|
328 |
-
$print_prec_j = (defined($prec_j)) ? $prec_j : "?";
|
329 |
-
print STDERR " $prec_i-$chart1_end, $prec_j-$chart2_end\n";
|
330 |
-
}
|
331 |
-
}
|
332 |
-
if ($chunks_p) {
|
333 |
-
push(@s1_s, $prec_i);
|
334 |
-
push(@s2_s, $prec_j);
|
335 |
-
push(@e1_s, $chart1_end);
|
336 |
-
push(@e2_s, $chart2_end);
|
337 |
-
push(@r1_s, $roman1);
|
338 |
-
push(@r2_s, $roman2);
|
339 |
-
push(@ic_s, $incr_cost);
|
340 |
-
}
|
341 |
-
}
|
342 |
-
$chart1_end = $prec_i;
|
343 |
-
$chart2_end = $prec_j;
|
344 |
-
}
|
345 |
-
if ($chunks_p) {
|
346 |
-
my $r1 = "";
|
347 |
-
my $r2 = "";
|
348 |
-
my $tc = 0;
|
349 |
-
my $in_chunk = 0;
|
350 |
-
foreach $i ((0 .. $#ic_s)) {
|
351 |
-
if ($ic_s[$i]) {
|
352 |
-
$r1 = $r1_s[$i] . $r1;
|
353 |
-
$r2 = $r2_s[$i] . $r2;
|
354 |
-
$tc += $ic_s[$i];
|
355 |
-
$in_chunk = 1;
|
356 |
-
} elsif ($in_chunk) {
|
357 |
-
$chunk = "$r1/$r2/$tc";
|
358 |
-
$chunk .= "*" if $cost > 5;
|
359 |
-
$sd_ht{N_COST_CHUNK}->{$chunk} = ($sd_ht{N_COST_CHUNK}->{$chunk} || 0) + 1;
|
360 |
-
$sd_ht{EX_COST_CHUNK}->{$chunk}->{$line_number} = 1;
|
361 |
-
$r1 = "";
|
362 |
-
$r2 = "";
|
363 |
-
$tc = 0;
|
364 |
-
$in_chunk = 0;
|
365 |
-
}
|
366 |
-
}
|
367 |
-
if ($in_chunk) {
|
368 |
-
$chunk = "$r1/$r2/$tc";
|
369 |
-
$chunk .= "*" if $cost > 5;
|
370 |
-
$sd_ht{N_COST_CHUNK}->{$chunk} = ($sd_ht{N_COST_CHUNK}->{$chunk} || 0) + 1;
|
371 |
-
$sd_ht{EX_COST_CHUNK}->{$chunk}->{$line_number} = 1;
|
372 |
-
}
|
373 |
-
} else {
|
374 |
-
return join(" ", reverse @traces);
|
375 |
-
}
|
376 |
-
}
|
377 |
-
|
378 |
-
sub right_context_match {
|
379 |
-
local($this, $right_context_rule, *sd_ht, $chart_id, $start_pos) = @_;
|
380 |
-
|
381 |
-
return 1 if $right_context_rule eq "";
|
382 |
-
if (($right_context_item, $right_context_rest) = ($right_context_rule =~ /^\[([^\[\]]*)\]*(.*)$/)) {
|
383 |
-
my $guarded_right_context_item = $right_context_item;
|
384 |
-
$guarded_right_context_item =~ s/\$/\\\$/g;
|
385 |
-
my @end_positions = keys %{$sd_ht{LIN_IJ_ROMAN}->{$chart_id}->{$start_pos}};
|
386 |
-
return 1 if ($#end_positions == -1)
|
387 |
-
&& (($right_context_item eq "")
|
388 |
-
|| ($right_context_item =~ /\$/));
|
389 |
-
foreach $end_pos (@end_positions) {
|
390 |
-
my @romans = keys %{$sd_ht{LIN_IJ_ROMAN}->{$chart_id}->{$start_pos}->{$end_pos}};
|
391 |
-
foreach $roman (@romans) {
|
392 |
-
if ($roman =~ /^[$guarded_right_context_item]/) {
|
393 |
-
return $this->right_context_match($right_context_rest, *sd_ht, $chart_id, $end_pos);
|
394 |
-
}
|
395 |
-
}
|
396 |
-
}
|
397 |
-
}
|
398 |
-
return 0;
|
399 |
-
}
|
400 |
-
|
401 |
-
sub string_distance {
|
402 |
-
local($this, *sd_ht, $chart1_id, $chart2_id, $lang_code1, $lang_code2, *ht, $control) = @_;
|
403 |
-
|
404 |
-
my $verbose = ($control =~ /verbose/i);
|
405 |
-
my $chart_comb_id = join("/", $chart1_id, $chart2_id);
|
406 |
-
|
407 |
-
my $chart1_end_pos = $sd_ht{MAXLINPOS}->{$chart1_id};
|
408 |
-
my $chart2_end_pos = $sd_ht{MAXLINPOS}->{$chart2_id};
|
409 |
-
print STDERR "string_distance.$chart_comb_id $chart1_end_pos/$chart2_end_pos\n" if $verbose;
|
410 |
-
$sd_ht{COST_IJ}->{$chart_comb_id}->{0}->{0} = 0;
|
411 |
-
$sd_ht{COMB_LEFT_ROMAN1}->{$chart_comb_id}->{0}->{0} = "";
|
412 |
-
$sd_ht{COMB_LEFT_ROMAN2}->{$chart_comb_id}->{0}->{0} = "";
|
413 |
-
# HHERE
|
414 |
-
foreach $chart1_start ((0 .. $chart1_end_pos)) {
|
415 |
-
# print STDERR " C1 $chart1_start- ($chart1_start .. $chart1_end_pos)\n";
|
416 |
-
my $prev_further_expansion_possible = 0;
|
417 |
-
my @chart1_ends = sort { $a <=> $b } keys %{$sd_ht{LIN_IJ_ROMAN}->{$chart1_id}->{$chart1_start}};
|
418 |
-
my $max_chart1_ends = (@chart1_ends) ? $chart1_ends[$#chart1_ends] : -1;
|
419 |
-
foreach $chart1_end (($chart1_start .. $chart1_end_pos)) {
|
420 |
-
my $further_expansion_possible = ($chart1_start == $chart1_end)
|
421 |
-
|| defined($sd_ht{LINPOS2SPLITPOS}->{$chart1_id}->{$chart1_start})
|
422 |
-
|| ($chart1_end < $max_chart1_ends);
|
423 |
-
my @romans1 = (($chart1_start == $chart1_end)
|
424 |
-
? ("")
|
425 |
-
: (sort keys %{$sd_ht{LIN_IJ_ROMAN}->{$chart1_id}->{$chart1_start}->{$chart1_end}}));
|
426 |
-
if ($#romans1 == -1) {
|
427 |
-
$further_expansion_possible = 1 if $prev_further_expansion_possible;
|
428 |
-
} else {
|
429 |
-
$prev_further_expansion_possible = 0;
|
430 |
-
}
|
431 |
-
# print STDERR " C1 $chart1_start-$chart1_end romans1: @romans1 {$further_expansion_possible} *l*\n";
|
432 |
-
foreach $roman1 (@romans1) {
|
433 |
-
# print STDERR " C1 $chart1_start-$chart1_end $roman1 {$further_expansion_possible} *?*\n";
|
434 |
-
next unless $ht{RULE_STRING}->{$lang_code1}->{$roman1}
|
435 |
-
|| $ht{RULE_STRING}->{""}->{$roman1};
|
436 |
-
# print STDERR " C1 $chart1_start-$chart1_end $roman1 {$further_expansion_possible} ***\n";
|
437 |
-
foreach $lang_code1o (($lang_code1, "")) {
|
438 |
-
foreach $lang_code2o (($lang_code2, "")) {
|
439 |
-
my @chart2_starts = (sort { $a <=> $b } keys %{$sd_ht{COST_IJ}->{$chart_comb_id}->{$chart1_start}});
|
440 |
-
foreach $chart2_start (@chart2_starts) {
|
441 |
-
# print STDERR " C1 $chart1_start-$chart1_end $roman1 C2 $chart2_start- (@chart2_starts)\n";
|
442 |
-
foreach $chart2_end (($chart2_start .. $chart2_end_pos)) {
|
443 |
-
print STDERR " C1 $chart1_start-$chart1_end $roman1 C2 $chart2_start-$chart2_end\n";
|
444 |
-
my @romans2 = (($chart2_start == $chart2_end)
|
445 |
-
? ("")
|
446 |
-
: (sort keys %{$sd_ht{LIN_IJ_ROMAN}->{$chart2_id}->{$chart2_start}->{$chart2_end}}));
|
447 |
-
foreach $roman2 (@romans2) {
|
448 |
-
if ($roman1 eq $roman2) {
|
449 |
-
print STDERR " C1 $chart1_start-$chart1_end $roman1 C2 $chart2_start-$chart2_end $roman2 (IDENTITY)\n";
|
450 |
-
my $cost = 0;
|
451 |
-
my $preceding_cost = $sd_ht{COST_IJ}->{$chart_comb_id}->{$chart1_start}->{$chart2_start};
|
452 |
-
my $combined_cost = $preceding_cost + $cost;
|
453 |
-
my $old_cost = $sd_ht{COST_IJ}->{$chart_comb_id}->{$chart1_end}->{$chart2_end};
|
454 |
-
if ((! defined($old_cost)) || ($combined_cost < $old_cost)) {
|
455 |
-
$sd_ht{COST_IJ}->{$chart_comb_id}->{$chart1_end}->{$chart2_end} = $combined_cost;
|
456 |
-
push(@chart2_starts, $chart2_end) unless $util->member($chart2_end, @chart2_starts);
|
457 |
-
$sd_ht{PREC_I}->{$chart_comb_id}->{$chart1_end}->{$chart2_end} = $chart1_start;
|
458 |
-
$sd_ht{PREC_J}->{$chart_comb_id}->{$chart1_end}->{$chart2_end} = $chart2_start;
|
459 |
-
$sd_ht{ROMAN1}->{$chart_comb_id}->{$chart1_end}->{$chart2_end} = $roman1;
|
460 |
-
$sd_ht{ROMAN2}->{$chart_comb_id}->{$chart1_end}->{$chart2_end} = $roman2;
|
461 |
-
$sd_ht{COMB_LEFT_ROMAN1}->{$chart_comb_id}->{$chart1_end}->{$chart2_end}
|
462 |
-
= $sd_ht{COMB_LEFT_ROMAN1}->{$chart_comb_id}->{$chart1_start}->{$chart2_start} . $roman1;
|
463 |
-
$sd_ht{COMB_LEFT_ROMAN2}->{$chart_comb_id}->{$chart1_end}->{$chart2_end}
|
464 |
-
= $sd_ht{COMB_LEFT_ROMAN2}->{$chart_comb_id}->{$chart1_start}->{$chart2_start} . $roman2;
|
465 |
-
$comb_left_roman1 = $sd_ht{COMB_LEFT_ROMAN1}->{$chart_comb_id}->{$chart1_end}->{$chart2_end};
|
466 |
-
$sd_ht{INCR_COST_IJ}->{$chart_comb_id}->{$chart1_end}->{$chart2_end} = $cost;
|
467 |
-
$sd_ht{COST_RULE}->{$chart_comb_id}->{$chart1_end}->{$chart2_end} = "IDENTITY";
|
468 |
-
print STDERR " New cost $chart1_end/$chart2_end: $combined_cost (+$cost from $chart1_start/$chart2_start $roman1/$roman2)\n" if $verbose;
|
469 |
-
}
|
470 |
-
} else {
|
471 |
-
next unless $ht{RULE_STRING}->{$lang_code2o}->{$roman2};
|
472 |
-
print STDERR " C1 $chart1_start-$chart1_end $roman1 C2 $chart2_start-$chart2_end $roman2\n";
|
473 |
-
next unless defined($ht{COST}->{$lang_code1o}->{$lang_code2o}->{$roman1}->{$roman2});
|
474 |
-
my @cost_rule_ids = keys %{$ht{COST}->{$lang_code1o}->{$lang_code2o}->{$roman1}->{$roman2}};
|
475 |
-
foreach $cost_rule_id (@cost_rule_ids) {
|
476 |
-
## check whether any context requirements are satisfied
|
477 |
-
# left context rules are regular expressions
|
478 |
-
my $left_context_rule1 = $ht{LEFT1}->{$cost_rule_id};
|
479 |
-
if ($left_context_rule1) {
|
480 |
-
my $comb_left_roman1 = $sd_ht{COMB_LEFT_ROMAN1}->{$chart_comb_id}->{$chart1_start}->{$chart2_start};
|
481 |
-
if (defined($comb_left_roman1)) {
|
482 |
-
next unless $comb_left_roman1 =~ /$left_context_rule1/;
|
483 |
-
} else {
|
484 |
-
print STDERR " No comb_left_roman1 value for $chart_comb_id $chart1_start,$chart2_start\n";
|
485 |
-
}
|
486 |
-
}
|
487 |
-
my $left_context_rule2 = $ht{LEFT2}->{$cost_rule_id};
|
488 |
-
if ($left_context_rule2) {
|
489 |
-
my $comb_left_roman2 = $sd_ht{COMB_LEFT_ROMAN2}->{$chart_comb_id}->{$chart1_start}->{$chart2_start};
|
490 |
-
if (defined($comb_left_roman2)) {
|
491 |
-
next unless $comb_left_roman2 =~ /$left_context_rule2/;
|
492 |
-
} else {
|
493 |
-
print STDERR " No comb_left_roman2 value for $chart_comb_id $chart1_start,$chart2_start\n";
|
494 |
-
}
|
495 |
-
}
|
496 |
-
my $right_context_rule1 = $ht{RIGHT1}->{$cost_rule_id};
|
497 |
-
if ($right_context_rule1) {
|
498 |
-
my $match_p = $this->right_context_match($right_context_rule1, *sd_ht, $chart1_id, $chart1_end);
|
499 |
-
# print STDERR " Match?($right_context_rule1, 1, $chart1_end) = $match_p\n";
|
500 |
-
next unless $match_p;
|
501 |
-
}
|
502 |
-
my $right_context_rule2 = $ht{RIGHT2}->{$cost_rule_id};
|
503 |
-
if ($right_context_rule2) {
|
504 |
-
my $match_p = $this->right_context_match($right_context_rule2, *sd_ht, $chart2_id, $chart2_end);
|
505 |
-
# print STDERR " Match?($right_context_rule2, 2, $chart2_end) = $match_p\n";
|
506 |
-
next unless $match_p;
|
507 |
-
}
|
508 |
-
my $cost = $ht{COST}->{$lang_code1o}->{$lang_code2o}->{$roman1}->{$roman2}->{$cost_rule_id};
|
509 |
-
my $preceding_cost = $sd_ht{COST_IJ}->{$chart_comb_id}->{$chart1_start}->{$chart2_start};
|
510 |
-
my $combined_cost = $preceding_cost + $cost;
|
511 |
-
my $old_cost = $sd_ht{COST_IJ}->{$chart_comb_id}->{$chart1_end}->{$chart2_end};
|
512 |
-
if ((! defined($old_cost)) || ($combined_cost < $old_cost)) {
|
513 |
-
$sd_ht{COST_IJ}->{$chart_comb_id}->{$chart1_end}->{$chart2_end} = $combined_cost;
|
514 |
-
push(@chart2_starts, $chart2_end) unless $util->member($chart2_end, @chart2_starts);
|
515 |
-
$sd_ht{PREC_I}->{$chart_comb_id}->{$chart1_end}->{$chart2_end} = $chart1_start;
|
516 |
-
$sd_ht{PREC_J}->{$chart_comb_id}->{$chart1_end}->{$chart2_end} = $chart2_start;
|
517 |
-
$sd_ht{ROMAN1}->{$chart_comb_id}->{$chart1_end}->{$chart2_end} = $roman1;
|
518 |
-
$sd_ht{ROMAN2}->{$chart_comb_id}->{$chart1_end}->{$chart2_end} = $roman2;
|
519 |
-
$sd_ht{COMB_LEFT_ROMAN1}->{$chart_comb_id}->{$chart1_end}->{$chart2_end}
|
520 |
-
= $sd_ht{COMB_LEFT_ROMAN1}->{$chart_comb_id}->{$chart1_start}->{$chart2_start} . $roman1;
|
521 |
-
$sd_ht{COMB_LEFT_ROMAN2}->{$chart_comb_id}->{$chart1_end}->{$chart2_end}
|
522 |
-
= $sd_ht{COMB_LEFT_ROMAN2}->{$chart_comb_id}->{$chart1_start}->{$chart2_start} . $roman2;
|
523 |
-
$comb_left_roman1 = $sd_ht{COMB_LEFT_ROMAN1}->{$chart_comb_id}->{$chart1_end}->{$chart2_end};
|
524 |
-
# print STDERR " Comb-left-roman1($chart_comb_id,$chart1_end,$chart2_end) = $comb_left_roman1\n";
|
525 |
-
$sd_ht{INCR_COST_IJ}->{$chart_comb_id}->{$chart1_end}->{$chart2_end} = $cost;
|
526 |
-
$sd_ht{COST_RULE}->{$chart_comb_id}->{$chart1_end}->{$chart2_end} = $cost_rule_id;
|
527 |
-
print STDERR " New cost $chart1_end/$chart2_end: $combined_cost (+$cost from $chart1_start/$chart2_start $roman1/$roman2)\n" if $verbose;
|
528 |
-
}
|
529 |
-
}
|
530 |
-
}
|
531 |
-
}
|
532 |
-
}
|
533 |
-
}
|
534 |
-
}
|
535 |
-
}
|
536 |
-
$further_expansion_possible = 1
|
537 |
-
if $ht{RULE_STRING_HAS_EXPANSION}->{$lang_code1}->{$roman1}
|
538 |
-
|| $ht{RULE_STRING_HAS_EXPANSION}->{""}->{$roman1};
|
539 |
-
# print STDERR " further_expansion_possible: $further_expansion_possible (lc: $lang_code1 r1: $roman1) ***\n";
|
540 |
-
}
|
541 |
-
# print STDERR " last C1 $chart1_start-$chart1_end (@romans1)\n" unless $further_expansion_possible;
|
542 |
-
last unless $further_expansion_possible;
|
543 |
-
$prev_further_expansion_possible = 1 if $further_expansion_possible;
|
544 |
-
}
|
545 |
-
}
|
546 |
-
my $total_cost = $sd_ht{COST_IJ}->{$chart_comb_id}->{$chart1_end_pos}->{$chart2_end_pos};
|
547 |
-
unless (defined($total_cost)) {
|
548 |
-
$total_cost = 99.9999;
|
549 |
-
$sd_ht{MISMATCH}->{$chart_comb_id} = 1;
|
550 |
-
}
|
551 |
-
return $total_cost;
|
552 |
-
}
|
553 |
-
|
554 |
-
sub print_sd_ht {
|
555 |
-
local($this, *sd_ht, $chart1_id, $chart2_id, *OUT) = @_;
|
556 |
-
|
557 |
-
print OUT "string-distance chart:\n";
|
558 |
-
foreach $chart_id (($chart1_id, $chart2_id)) {
|
559 |
-
print OUT "SD chart $chart_id:\n";
|
560 |
-
foreach $from_linear_chart_pos (sort { $a <=> $b } keys %{$sd_ht{LIN_IJ_ROMAN}->{$chart_id}}) {
|
561 |
-
foreach $to_linear_chart_pos (sort { $a <=> $b } keys %{$sd_ht{LIN_IJ_ROMAN}->{$chart_id}->{$from_linear_chart_pos}}) {
|
562 |
-
foreach $roman_char (sort keys %{$sd_ht{LIN_IJ_ROMAN}->{$chart_id}->{$from_linear_chart_pos}->{$to_linear_chart_pos}}) {
|
563 |
-
print OUT " Lnode($from_linear_chart_pos-$to_linear_chart_pos): $roman_char\n";
|
564 |
-
}
|
565 |
-
}
|
566 |
-
}
|
567 |
-
}
|
568 |
-
}
|
569 |
-
|
570 |
-
sub print_chart_ht {
|
571 |
-
local($this, *chart_ht, *OUT) = @_;
|
572 |
-
|
573 |
-
print OUT "uroman chart:\n";
|
574 |
-
foreach $start (sort { $a <=> $b } keys %{$chart_ht{NODES_STARTING_AT}}) {
|
575 |
-
foreach $end (sort { $a <=> $b } keys %{$chart_ht{NODES_STARTING_AND_ENDING_AT}->{$start}}) {
|
576 |
-
foreach $node_id (keys %{$chart_ht{NODES_STARTING_AND_ENDING_AT}->{$start}->{$end}}) {
|
577 |
-
$roman_s = $chart_ht{NODE_ROMAN}->{$node_id};
|
578 |
-
print OUT " Node $node_id ($start-$end): $roman_s\n";
|
579 |
-
}
|
580 |
-
}
|
581 |
-
}
|
582 |
-
}
|
583 |
-
|
584 |
-
sub normalize_string {
|
585 |
-
local($this, $s) = @_;
|
586 |
-
|
587 |
-
# $s =~ s/(\xE2\x80\x8C)//g; # delete zero width non-joiner
|
588 |
-
$s =~ s/(\xE2\x80[\x93-\x94])/-/g; # en-dash, em-dash
|
589 |
-
$s =~ s/([\x00-\x7F\xC0-\xFE][\x80-\xBF]*)\1+/$1$1/g; # shorten 3 or more occurrences of same character in a row to 2
|
590 |
-
$s =~ s/[ \t]+/ /g;
|
591 |
-
|
592 |
-
return $s;
|
593 |
-
}
|
594 |
-
|
595 |
-
my $string_distance_chart_id = 0;
|
596 |
-
sub string_distance_by_chart {
|
597 |
-
local($this, $s1, $s2, $lang_code1, $lang_code2, *ht, *pinyin_ht, $control) = @_;
|
598 |
-
|
599 |
-
$control = "" unless defined($control);
|
600 |
-
%sd_ht = ();
|
601 |
-
|
602 |
-
$s1 = $this->normalize_string($s1);
|
603 |
-
my $lc_s1 = $utf8->extended_lower_case($s1);
|
604 |
-
$string_distance_chart_id++;
|
605 |
-
my $chart1_id = $string_distance_chart_id;
|
606 |
-
*chart_ht = $romanizer->romanize($lc_s1, $lang_code1, "", *ht, *pinyin_ht, 0, "return chart", $chart1_id);
|
607 |
-
$this->linearize_chart_points(*chart_ht, $chart1_id, *sd_ht);
|
608 |
-
$this->expand_lin_ij_roman(*sd_ht, $chart1_id, $lang_code1, *ht);
|
609 |
-
|
610 |
-
$s2 = $this->normalize_string($s2);
|
611 |
-
my $lc_s2 = $utf8->extended_lower_case($s2);
|
612 |
-
$string_distance_chart_id++;
|
613 |
-
my $chart2_id = $string_distance_chart_id;
|
614 |
-
*chart_ht = $romanizer->romanize($lc_s2, $lang_code2, "", *ht, *pinyin_ht, 0, "return chart", $chart2_id);
|
615 |
-
$this->linearize_chart_points(*chart_ht, $chart2_id, *sd_ht);
|
616 |
-
$this->expand_lin_ij_roman(*sd_ht, $chart2_id, $lang_code2, *ht);
|
617 |
-
|
618 |
-
my $cost = $this->string_distance(*sd_ht, $chart1_id, $chart2_id, $lang_code1, $lang_code2, *ht, $control);
|
619 |
-
return $cost;
|
620 |
-
}
|
621 |
-
|
622 |
-
my $n_quick_romanized_string_distance = 0;
|
623 |
-
sub quick_romanized_string_distance_by_chart {
|
624 |
-
local($this, $s1, $s2, *ht, $control, $lang_code1, $lang_code2) = @_;
|
625 |
-
|
626 |
-
# my $verbose = ($s1 eq "apit") && ($s2 eq "apet");
|
627 |
-
# print STDERR "Start quick_romanized_string_distance_by_chart\n";
|
628 |
-
$s1 = lc $s1;
|
629 |
-
$s2 = lc $s2;
|
630 |
-
$control = "" unless defined($control);
|
631 |
-
$lang_code1 = "" unless defined($lang_code1);
|
632 |
-
$lang_code2 = "" unless defined($lang_code2);
|
633 |
-
my $cache_p = ($control =~ /cache/);
|
634 |
-
my $total_cost;
|
635 |
-
if ($cache_p) {
|
636 |
-
$total_cost = $ht{CACHED_QRSD}->{$s1}->{$s2};
|
637 |
-
if (defined($total_cost)) {
|
638 |
-
return $total_cost;
|
639 |
-
}
|
640 |
-
}
|
641 |
-
my @lang_codes1 = ($lang_code1 eq "") ? ("") : ($lang_code1, "");
|
642 |
-
my @lang_codes2 = ($lang_code2 eq "") ? ("") : ($lang_code2, "");
|
643 |
-
my $chart1_end_pos = length($s1);
|
644 |
-
my $chart2_end_pos = length($s2);
|
645 |
-
my %sd_ht = ();
|
646 |
-
$sd_ht{COST_IJ}->{0}->{0} = 0;
|
647 |
-
foreach $chart1_start ((0 .. $chart1_end_pos)) {
|
648 |
-
foreach $chart1_end (($chart1_start .. $chart1_end_pos)) {
|
649 |
-
my $substr1 = substr($s1, $chart1_start, ($chart1_end-$chart1_start));
|
650 |
-
foreach $lang_code1o (@lang_codes1) {
|
651 |
-
foreach $lang_code2o (@lang_codes2) {
|
652 |
-
# next unless defined($ht{COST}->{$lang_code1o}->{$lang_code2o}->{$substr1});
|
653 |
-
}
|
654 |
-
}
|
655 |
-
my @chart2_starts = (sort { $a <=> $b } keys %{$sd_ht{COST_IJ}->{$chart1_start}});
|
656 |
-
foreach $chart2_start (@chart2_starts) {
|
657 |
-
foreach $chart2_end (($chart2_start .. $chart2_end_pos)) {
|
658 |
-
my $substr2 = substr($s2, $chart2_start, ($chart2_end-$chart2_start));
|
659 |
-
foreach $lang_code1o (@lang_codes1) {
|
660 |
-
foreach $lang_code2o (@lang_codes2) {
|
661 |
-
if ($substr1 eq $substr2) {
|
662 |
-
my $cost = 0;
|
663 |
-
my $preceding_cost = $sd_ht{COST_IJ}->{$chart1_start}->{$chart2_start};
|
664 |
-
if (defined($preceding_cost)) {
|
665 |
-
my $combined_cost = $preceding_cost + $cost;
|
666 |
-
my $old_cost = $sd_ht{COST_IJ}->{$chart1_end}->{$chart2_end};
|
667 |
-
if ((! defined($old_cost)) || ($combined_cost < $old_cost)) {
|
668 |
-
$sd_ht{COST_IJ}->{$chart1_end}->{$chart2_end} = $combined_cost;
|
669 |
-
push(@chart2_starts, $chart2_end) unless $util->member($chart2_end, @chart2_starts);
|
670 |
-
}
|
671 |
-
}
|
672 |
-
} else {
|
673 |
-
next unless defined($ht{COST}->{$lang_code1o}->{$lang_code2o}->{$substr1}->{$substr2});
|
674 |
-
my @cost_rule_ids = keys %{$ht{COST}->{$lang_code1o}->{$lang_code2o}->{$substr1}->{$substr2}};
|
675 |
-
my $best_cost = 99.99;
|
676 |
-
foreach $cost_rule_id (@cost_rule_ids) {
|
677 |
-
my $cost = $ht{COST}->{$lang_code1o}->{$lang_code2o}->{$substr1}->{$substr2}->{$cost_rule_id};
|
678 |
-
my $left_context_rule1 = $ht{LEFT1}->{$cost_rule_id};
|
679 |
-
next if $left_context_rule1
|
680 |
-
&& (! (substr($s1, 0, $chart1_start) =~ /$left_context_rule1/));
|
681 |
-
my $left_context_rule2 = $ht{LEFT2}->{$cost_rule_id};
|
682 |
-
next if $left_context_rule2
|
683 |
-
&& (! (substr($s2, 0, $chart2_start) =~ /$left_context_rule2/));
|
684 |
-
my $right_context_rule1 = $ht{RIGHT1}->{$cost_rule_id};
|
685 |
-
my $right_context1 = substr($s1, $chart1_end);
|
686 |
-
next if $right_context_rule1
|
687 |
-
&& (! (($right_context1 =~ /^$right_context_rule1/)
|
688 |
-
|| (($right_context_rule1 =~ /^\[[^\[\]]*\$/)
|
689 |
-
&& ($right_context1 eq ""))));
|
690 |
-
my $right_context_rule2 = $ht{RIGHT2}->{$cost_rule_id};
|
691 |
-
my $right_context2 = substr($s2, $chart2_end);
|
692 |
-
next if $right_context_rule2
|
693 |
-
&& (! (($right_context2 =~ /^$right_context_rule2/)
|
694 |
-
|| (($right_context_rule2 =~ /^\[[^\[\]]*\$/)
|
695 |
-
&& ($right_context2 eq ""))));
|
696 |
-
$best_cost = $cost if $cost < $best_cost;
|
697 |
-
my $preceding_cost = $sd_ht{COST_IJ}->{$chart1_start}->{$chart2_start};
|
698 |
-
my $combined_cost = $preceding_cost + $cost;
|
699 |
-
my $old_cost = $sd_ht{COST_IJ}->{$chart1_end}->{$chart2_end};
|
700 |
-
if ((! defined($old_cost)) || ($combined_cost < $old_cost)) {
|
701 |
-
$sd_ht{COST_IJ}->{$chart1_end}->{$chart2_end} = $combined_cost;
|
702 |
-
push(@chart2_starts, $chart2_end) unless $util->member($chart2_end, @chart2_starts);
|
703 |
-
}
|
704 |
-
}
|
705 |
-
}
|
706 |
-
}
|
707 |
-
}
|
708 |
-
}
|
709 |
-
}
|
710 |
-
}
|
711 |
-
}
|
712 |
-
$total_cost = $sd_ht{COST_IJ}->{$chart1_end_pos}->{$chart2_end_pos};
|
713 |
-
$total_cost = 99.99 unless defined($total_cost);
|
714 |
-
$ht{CACHED_QRSD}->{$s1}->{$s2} = $total_cost if $cache_p;
|
715 |
-
$n_quick_romanized_string_distance++;
|
716 |
-
return $total_cost;
|
717 |
-
}
|
718 |
-
|
719 |
-
sub get_n_quick_romanized_string_distance {
|
720 |
-
return $n_quick_romanized_string_distance;
|
721 |
-
}
|
722 |
-
|
723 |
-
1;
|
724 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Albertha/qwe123/README.md
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Qwe123
|
3 |
-
emoji: 🏃
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
---
|
9 |
-
|
10 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alcedo/yunmedia/resources/chatgpt-plugin/css/chunk-vendors.0ede84b4.css
DELETED
The diff for this file is too large to render.
See raw diff
|
|
spaces/Alpaca233/SadTalker/src/face3d/models/template_model.py
DELETED
@@ -1,100 +0,0 @@
|
|
1 |
-
"""Model class template
|
2 |
-
|
3 |
-
This module provides a template for users to implement custom models.
|
4 |
-
You can specify '--model template' to use this model.
|
5 |
-
The class name should be consistent with both the filename and its model option.
|
6 |
-
The filename should be <model>_dataset.py
|
7 |
-
The class name should be <Model>Dataset.py
|
8 |
-
It implements a simple image-to-image translation baseline based on regression loss.
|
9 |
-
Given input-output pairs (data_A, data_B), it learns a network netG that can minimize the following L1 loss:
|
10 |
-
min_<netG> ||netG(data_A) - data_B||_1
|
11 |
-
You need to implement the following functions:
|
12 |
-
<modify_commandline_options>: Add model-specific options and rewrite default values for existing options.
|
13 |
-
<__init__>: Initialize this model class.
|
14 |
-
<set_input>: Unpack input data and perform data pre-processing.
|
15 |
-
<forward>: Run forward pass. This will be called by both <optimize_parameters> and <test>.
|
16 |
-
<optimize_parameters>: Update network weights; it will be called in every training iteration.
|
17 |
-
"""
|
18 |
-
import numpy as np
|
19 |
-
import torch
|
20 |
-
from .base_model import BaseModel
|
21 |
-
from . import networks
|
22 |
-
|
23 |
-
|
24 |
-
class TemplateModel(BaseModel):
|
25 |
-
@staticmethod
|
26 |
-
def modify_commandline_options(parser, is_train=True):
|
27 |
-
"""Add new model-specific options and rewrite default values for existing options.
|
28 |
-
|
29 |
-
Parameters:
|
30 |
-
parser -- the option parser
|
31 |
-
is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options.
|
32 |
-
|
33 |
-
Returns:
|
34 |
-
the modified parser.
|
35 |
-
"""
|
36 |
-
parser.set_defaults(dataset_mode='aligned') # You can rewrite default values for this model. For example, this model usually uses aligned dataset as its dataset.
|
37 |
-
if is_train:
|
38 |
-
parser.add_argument('--lambda_regression', type=float, default=1.0, help='weight for the regression loss') # You can define new arguments for this model.
|
39 |
-
|
40 |
-
return parser
|
41 |
-
|
42 |
-
def __init__(self, opt):
|
43 |
-
"""Initialize this model class.
|
44 |
-
|
45 |
-
Parameters:
|
46 |
-
opt -- training/test options
|
47 |
-
|
48 |
-
A few things can be done here.
|
49 |
-
- (required) call the initialization function of BaseModel
|
50 |
-
- define loss function, visualization images, model names, and optimizers
|
51 |
-
"""
|
52 |
-
BaseModel.__init__(self, opt) # call the initialization method of BaseModel
|
53 |
-
# specify the training losses you want to print out. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk.
|
54 |
-
self.loss_names = ['loss_G']
|
55 |
-
# specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images.
|
56 |
-
self.visual_names = ['data_A', 'data_B', 'output']
|
57 |
-
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks.
|
58 |
-
# you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them.
|
59 |
-
self.model_names = ['G']
|
60 |
-
# define networks; you can use opt.isTrain to specify different behaviors for training and test.
|
61 |
-
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, gpu_ids=self.gpu_ids)
|
62 |
-
if self.isTrain: # only defined during training time
|
63 |
-
# define your loss functions. You can use losses provided by torch.nn such as torch.nn.L1Loss.
|
64 |
-
# We also provide a GANLoss class "networks.GANLoss". self.criterionGAN = networks.GANLoss().to(self.device)
|
65 |
-
self.criterionLoss = torch.nn.L1Loss()
|
66 |
-
# define and initialize optimizers. You can define one optimizer for each network.
|
67 |
-
# If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
|
68 |
-
self.optimizer = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
|
69 |
-
self.optimizers = [self.optimizer]
|
70 |
-
|
71 |
-
# Our program will automatically call <model.setup> to define schedulers, load networks, and print networks
|
72 |
-
|
73 |
-
def set_input(self, input):
|
74 |
-
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
|
75 |
-
|
76 |
-
Parameters:
|
77 |
-
input: a dictionary that contains the data itself and its metadata information.
|
78 |
-
"""
|
79 |
-
AtoB = self.opt.direction == 'AtoB' # use <direction> to swap data_A and data_B
|
80 |
-
self.data_A = input['A' if AtoB else 'B'].to(self.device) # get image data A
|
81 |
-
self.data_B = input['B' if AtoB else 'A'].to(self.device) # get image data B
|
82 |
-
self.image_paths = input['A_paths' if AtoB else 'B_paths'] # get image paths
|
83 |
-
|
84 |
-
def forward(self):
|
85 |
-
"""Run forward pass. This will be called by both functions <optimize_parameters> and <test>."""
|
86 |
-
self.output = self.netG(self.data_A) # generate output image given the input data_A
|
87 |
-
|
88 |
-
def backward(self):
|
89 |
-
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
|
90 |
-
# caculate the intermediate results if necessary; here self.output has been computed during function <forward>
|
91 |
-
# calculate loss given the input and intermediate results
|
92 |
-
self.loss_G = self.criterionLoss(self.output, self.data_B) * self.opt.lambda_regression
|
93 |
-
self.loss_G.backward() # calculate gradients of network G w.r.t. loss_G
|
94 |
-
|
95 |
-
def optimize_parameters(self):
|
96 |
-
"""Update network weights; it will be called in every training iteration."""
|
97 |
-
self.forward() # first call forward to calculate intermediate results
|
98 |
-
self.optimizer.zero_grad() # clear network G's existing gradients
|
99 |
-
self.backward() # calculate gradients for network G
|
100 |
-
self.optimizer.step() # update gradients for network G
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_tensorrt_txt2img.py
DELETED
@@ -1,928 +0,0 @@
|
|
1 |
-
#
|
2 |
-
# Copyright 2023 The HuggingFace Inc. team.
|
3 |
-
# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
4 |
-
# SPDX-License-Identifier: Apache-2.0
|
5 |
-
#
|
6 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
7 |
-
# you may not use this file except in compliance with the License.
|
8 |
-
# You may obtain a copy of the License at
|
9 |
-
#
|
10 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
11 |
-
#
|
12 |
-
# Unless required by applicable law or agreed to in writing, software
|
13 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
14 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
15 |
-
# See the License for the specific language governing permissions and
|
16 |
-
# limitations under the License.
|
17 |
-
|
18 |
-
import gc
|
19 |
-
import os
|
20 |
-
from collections import OrderedDict
|
21 |
-
from copy import copy
|
22 |
-
from typing import List, Optional, Union
|
23 |
-
|
24 |
-
import numpy as np
|
25 |
-
import onnx
|
26 |
-
import onnx_graphsurgeon as gs
|
27 |
-
import tensorrt as trt
|
28 |
-
import torch
|
29 |
-
from huggingface_hub import snapshot_download
|
30 |
-
from onnx import shape_inference
|
31 |
-
from polygraphy import cuda
|
32 |
-
from polygraphy.backend.common import bytes_from_path
|
33 |
-
from polygraphy.backend.onnx.loader import fold_constants
|
34 |
-
from polygraphy.backend.trt import (
|
35 |
-
CreateConfig,
|
36 |
-
Profile,
|
37 |
-
engine_from_bytes,
|
38 |
-
engine_from_network,
|
39 |
-
network_from_onnx_path,
|
40 |
-
save_engine,
|
41 |
-
)
|
42 |
-
from polygraphy.backend.trt import util as trt_util
|
43 |
-
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
|
44 |
-
|
45 |
-
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
46 |
-
from diffusers.pipelines.stable_diffusion import (
|
47 |
-
StableDiffusionPipeline,
|
48 |
-
StableDiffusionPipelineOutput,
|
49 |
-
StableDiffusionSafetyChecker,
|
50 |
-
)
|
51 |
-
from diffusers.schedulers import DDIMScheduler
|
52 |
-
from diffusers.utils import DIFFUSERS_CACHE, logging
|
53 |
-
|
54 |
-
|
55 |
-
"""
|
56 |
-
Installation instructions
|
57 |
-
python3 -m pip install --upgrade transformers diffusers>=0.16.0
|
58 |
-
python3 -m pip install --upgrade tensorrt>=8.6.1
|
59 |
-
python3 -m pip install --upgrade polygraphy>=0.47.0 onnx-graphsurgeon --extra-index-url https://pypi.ngc.nvidia.com
|
60 |
-
python3 -m pip install onnxruntime
|
61 |
-
"""
|
62 |
-
|
63 |
-
TRT_LOGGER = trt.Logger(trt.Logger.ERROR)
|
64 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
65 |
-
|
66 |
-
# Map of numpy dtype -> torch dtype
|
67 |
-
numpy_to_torch_dtype_dict = {
|
68 |
-
np.uint8: torch.uint8,
|
69 |
-
np.int8: torch.int8,
|
70 |
-
np.int16: torch.int16,
|
71 |
-
np.int32: torch.int32,
|
72 |
-
np.int64: torch.int64,
|
73 |
-
np.float16: torch.float16,
|
74 |
-
np.float32: torch.float32,
|
75 |
-
np.float64: torch.float64,
|
76 |
-
np.complex64: torch.complex64,
|
77 |
-
np.complex128: torch.complex128,
|
78 |
-
}
|
79 |
-
if np.version.full_version >= "1.24.0":
|
80 |
-
numpy_to_torch_dtype_dict[np.bool_] = torch.bool
|
81 |
-
else:
|
82 |
-
numpy_to_torch_dtype_dict[np.bool] = torch.bool
|
83 |
-
|
84 |
-
# Map of torch dtype -> numpy dtype
|
85 |
-
torch_to_numpy_dtype_dict = {value: key for (key, value) in numpy_to_torch_dtype_dict.items()}
|
86 |
-
|
87 |
-
|
88 |
-
def device_view(t):
|
89 |
-
return cuda.DeviceView(ptr=t.data_ptr(), shape=t.shape, dtype=torch_to_numpy_dtype_dict[t.dtype])
|
90 |
-
|
91 |
-
|
92 |
-
class Engine:
|
93 |
-
def __init__(self, engine_path):
|
94 |
-
self.engine_path = engine_path
|
95 |
-
self.engine = None
|
96 |
-
self.context = None
|
97 |
-
self.buffers = OrderedDict()
|
98 |
-
self.tensors = OrderedDict()
|
99 |
-
|
100 |
-
def __del__(self):
|
101 |
-
[buf.free() for buf in self.buffers.values() if isinstance(buf, cuda.DeviceArray)]
|
102 |
-
del self.engine
|
103 |
-
del self.context
|
104 |
-
del self.buffers
|
105 |
-
del self.tensors
|
106 |
-
|
107 |
-
def build(
|
108 |
-
self,
|
109 |
-
onnx_path,
|
110 |
-
fp16,
|
111 |
-
input_profile=None,
|
112 |
-
enable_preview=False,
|
113 |
-
enable_all_tactics=False,
|
114 |
-
timing_cache=None,
|
115 |
-
workspace_size=0,
|
116 |
-
):
|
117 |
-
logger.warning(f"Building TensorRT engine for {onnx_path}: {self.engine_path}")
|
118 |
-
p = Profile()
|
119 |
-
if input_profile:
|
120 |
-
for name, dims in input_profile.items():
|
121 |
-
assert len(dims) == 3
|
122 |
-
p.add(name, min=dims[0], opt=dims[1], max=dims[2])
|
123 |
-
|
124 |
-
config_kwargs = {}
|
125 |
-
|
126 |
-
config_kwargs["preview_features"] = [trt.PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805]
|
127 |
-
if enable_preview:
|
128 |
-
# Faster dynamic shapes made optional since it increases engine build time.
|
129 |
-
config_kwargs["preview_features"].append(trt.PreviewFeature.FASTER_DYNAMIC_SHAPES_0805)
|
130 |
-
if workspace_size > 0:
|
131 |
-
config_kwargs["memory_pool_limits"] = {trt.MemoryPoolType.WORKSPACE: workspace_size}
|
132 |
-
if not enable_all_tactics:
|
133 |
-
config_kwargs["tactic_sources"] = []
|
134 |
-
|
135 |
-
engine = engine_from_network(
|
136 |
-
network_from_onnx_path(onnx_path, flags=[trt.OnnxParserFlag.NATIVE_INSTANCENORM]),
|
137 |
-
config=CreateConfig(fp16=fp16, profiles=[p], load_timing_cache=timing_cache, **config_kwargs),
|
138 |
-
save_timing_cache=timing_cache,
|
139 |
-
)
|
140 |
-
save_engine(engine, path=self.engine_path)
|
141 |
-
|
142 |
-
def load(self):
|
143 |
-
logger.warning(f"Loading TensorRT engine: {self.engine_path}")
|
144 |
-
self.engine = engine_from_bytes(bytes_from_path(self.engine_path))
|
145 |
-
|
146 |
-
def activate(self):
|
147 |
-
self.context = self.engine.create_execution_context()
|
148 |
-
|
149 |
-
def allocate_buffers(self, shape_dict=None, device="cuda"):
|
150 |
-
for idx in range(trt_util.get_bindings_per_profile(self.engine)):
|
151 |
-
binding = self.engine[idx]
|
152 |
-
if shape_dict and binding in shape_dict:
|
153 |
-
shape = shape_dict[binding]
|
154 |
-
else:
|
155 |
-
shape = self.engine.get_binding_shape(binding)
|
156 |
-
dtype = trt.nptype(self.engine.get_binding_dtype(binding))
|
157 |
-
if self.engine.binding_is_input(binding):
|
158 |
-
self.context.set_binding_shape(idx, shape)
|
159 |
-
tensor = torch.empty(tuple(shape), dtype=numpy_to_torch_dtype_dict[dtype]).to(device=device)
|
160 |
-
self.tensors[binding] = tensor
|
161 |
-
self.buffers[binding] = cuda.DeviceView(ptr=tensor.data_ptr(), shape=shape, dtype=dtype)
|
162 |
-
|
163 |
-
def infer(self, feed_dict, stream):
|
164 |
-
start_binding, end_binding = trt_util.get_active_profile_bindings(self.context)
|
165 |
-
# shallow copy of ordered dict
|
166 |
-
device_buffers = copy(self.buffers)
|
167 |
-
for name, buf in feed_dict.items():
|
168 |
-
assert isinstance(buf, cuda.DeviceView)
|
169 |
-
device_buffers[name] = buf
|
170 |
-
bindings = [0] * start_binding + [buf.ptr for buf in device_buffers.values()]
|
171 |
-
noerror = self.context.execute_async_v2(bindings=bindings, stream_handle=stream.ptr)
|
172 |
-
if not noerror:
|
173 |
-
raise ValueError("ERROR: inference failed.")
|
174 |
-
|
175 |
-
return self.tensors
|
176 |
-
|
177 |
-
|
178 |
-
class Optimizer:
|
179 |
-
def __init__(self, onnx_graph):
|
180 |
-
self.graph = gs.import_onnx(onnx_graph)
|
181 |
-
|
182 |
-
def cleanup(self, return_onnx=False):
|
183 |
-
self.graph.cleanup().toposort()
|
184 |
-
if return_onnx:
|
185 |
-
return gs.export_onnx(self.graph)
|
186 |
-
|
187 |
-
def select_outputs(self, keep, names=None):
|
188 |
-
self.graph.outputs = [self.graph.outputs[o] for o in keep]
|
189 |
-
if names:
|
190 |
-
for i, name in enumerate(names):
|
191 |
-
self.graph.outputs[i].name = name
|
192 |
-
|
193 |
-
def fold_constants(self, return_onnx=False):
|
194 |
-
onnx_graph = fold_constants(gs.export_onnx(self.graph), allow_onnxruntime_shape_inference=True)
|
195 |
-
self.graph = gs.import_onnx(onnx_graph)
|
196 |
-
if return_onnx:
|
197 |
-
return onnx_graph
|
198 |
-
|
199 |
-
def infer_shapes(self, return_onnx=False):
|
200 |
-
onnx_graph = gs.export_onnx(self.graph)
|
201 |
-
if onnx_graph.ByteSize() > 2147483648:
|
202 |
-
raise TypeError("ERROR: model size exceeds supported 2GB limit")
|
203 |
-
else:
|
204 |
-
onnx_graph = shape_inference.infer_shapes(onnx_graph)
|
205 |
-
|
206 |
-
self.graph = gs.import_onnx(onnx_graph)
|
207 |
-
if return_onnx:
|
208 |
-
return onnx_graph
|
209 |
-
|
210 |
-
|
211 |
-
class BaseModel:
|
212 |
-
def __init__(self, model, fp16=False, device="cuda", max_batch_size=16, embedding_dim=768, text_maxlen=77):
|
213 |
-
self.model = model
|
214 |
-
self.name = "SD Model"
|
215 |
-
self.fp16 = fp16
|
216 |
-
self.device = device
|
217 |
-
|
218 |
-
self.min_batch = 1
|
219 |
-
self.max_batch = max_batch_size
|
220 |
-
self.min_image_shape = 256 # min image resolution: 256x256
|
221 |
-
self.max_image_shape = 1024 # max image resolution: 1024x1024
|
222 |
-
self.min_latent_shape = self.min_image_shape // 8
|
223 |
-
self.max_latent_shape = self.max_image_shape // 8
|
224 |
-
|
225 |
-
self.embedding_dim = embedding_dim
|
226 |
-
self.text_maxlen = text_maxlen
|
227 |
-
|
228 |
-
def get_model(self):
|
229 |
-
return self.model
|
230 |
-
|
231 |
-
def get_input_names(self):
|
232 |
-
pass
|
233 |
-
|
234 |
-
def get_output_names(self):
|
235 |
-
pass
|
236 |
-
|
237 |
-
def get_dynamic_axes(self):
|
238 |
-
return None
|
239 |
-
|
240 |
-
def get_sample_input(self, batch_size, image_height, image_width):
|
241 |
-
pass
|
242 |
-
|
243 |
-
def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
|
244 |
-
return None
|
245 |
-
|
246 |
-
def get_shape_dict(self, batch_size, image_height, image_width):
|
247 |
-
return None
|
248 |
-
|
249 |
-
def optimize(self, onnx_graph):
|
250 |
-
opt = Optimizer(onnx_graph)
|
251 |
-
opt.cleanup()
|
252 |
-
opt.fold_constants()
|
253 |
-
opt.infer_shapes()
|
254 |
-
onnx_opt_graph = opt.cleanup(return_onnx=True)
|
255 |
-
return onnx_opt_graph
|
256 |
-
|
257 |
-
def check_dims(self, batch_size, image_height, image_width):
|
258 |
-
assert batch_size >= self.min_batch and batch_size <= self.max_batch
|
259 |
-
assert image_height % 8 == 0 or image_width % 8 == 0
|
260 |
-
latent_height = image_height // 8
|
261 |
-
latent_width = image_width // 8
|
262 |
-
assert latent_height >= self.min_latent_shape and latent_height <= self.max_latent_shape
|
263 |
-
assert latent_width >= self.min_latent_shape and latent_width <= self.max_latent_shape
|
264 |
-
return (latent_height, latent_width)
|
265 |
-
|
266 |
-
def get_minmax_dims(self, batch_size, image_height, image_width, static_batch, static_shape):
|
267 |
-
min_batch = batch_size if static_batch else self.min_batch
|
268 |
-
max_batch = batch_size if static_batch else self.max_batch
|
269 |
-
latent_height = image_height // 8
|
270 |
-
latent_width = image_width // 8
|
271 |
-
min_image_height = image_height if static_shape else self.min_image_shape
|
272 |
-
max_image_height = image_height if static_shape else self.max_image_shape
|
273 |
-
min_image_width = image_width if static_shape else self.min_image_shape
|
274 |
-
max_image_width = image_width if static_shape else self.max_image_shape
|
275 |
-
min_latent_height = latent_height if static_shape else self.min_latent_shape
|
276 |
-
max_latent_height = latent_height if static_shape else self.max_latent_shape
|
277 |
-
min_latent_width = latent_width if static_shape else self.min_latent_shape
|
278 |
-
max_latent_width = latent_width if static_shape else self.max_latent_shape
|
279 |
-
return (
|
280 |
-
min_batch,
|
281 |
-
max_batch,
|
282 |
-
min_image_height,
|
283 |
-
max_image_height,
|
284 |
-
min_image_width,
|
285 |
-
max_image_width,
|
286 |
-
min_latent_height,
|
287 |
-
max_latent_height,
|
288 |
-
min_latent_width,
|
289 |
-
max_latent_width,
|
290 |
-
)
|
291 |
-
|
292 |
-
|
293 |
-
def getOnnxPath(model_name, onnx_dir, opt=True):
|
294 |
-
return os.path.join(onnx_dir, model_name + (".opt" if opt else "") + ".onnx")
|
295 |
-
|
296 |
-
|
297 |
-
def getEnginePath(model_name, engine_dir):
|
298 |
-
return os.path.join(engine_dir, model_name + ".plan")
|
299 |
-
|
300 |
-
|
301 |
-
def build_engines(
|
302 |
-
models: dict,
|
303 |
-
engine_dir,
|
304 |
-
onnx_dir,
|
305 |
-
onnx_opset,
|
306 |
-
opt_image_height,
|
307 |
-
opt_image_width,
|
308 |
-
opt_batch_size=1,
|
309 |
-
force_engine_rebuild=False,
|
310 |
-
static_batch=False,
|
311 |
-
static_shape=True,
|
312 |
-
enable_preview=False,
|
313 |
-
enable_all_tactics=False,
|
314 |
-
timing_cache=None,
|
315 |
-
max_workspace_size=0,
|
316 |
-
):
|
317 |
-
built_engines = {}
|
318 |
-
if not os.path.isdir(onnx_dir):
|
319 |
-
os.makedirs(onnx_dir)
|
320 |
-
if not os.path.isdir(engine_dir):
|
321 |
-
os.makedirs(engine_dir)
|
322 |
-
|
323 |
-
# Export models to ONNX
|
324 |
-
for model_name, model_obj in models.items():
|
325 |
-
engine_path = getEnginePath(model_name, engine_dir)
|
326 |
-
if force_engine_rebuild or not os.path.exists(engine_path):
|
327 |
-
logger.warning("Building Engines...")
|
328 |
-
logger.warning("Engine build can take a while to complete")
|
329 |
-
onnx_path = getOnnxPath(model_name, onnx_dir, opt=False)
|
330 |
-
onnx_opt_path = getOnnxPath(model_name, onnx_dir)
|
331 |
-
if force_engine_rebuild or not os.path.exists(onnx_opt_path):
|
332 |
-
if force_engine_rebuild or not os.path.exists(onnx_path):
|
333 |
-
logger.warning(f"Exporting model: {onnx_path}")
|
334 |
-
model = model_obj.get_model()
|
335 |
-
with torch.inference_mode(), torch.autocast("cuda"):
|
336 |
-
inputs = model_obj.get_sample_input(opt_batch_size, opt_image_height, opt_image_width)
|
337 |
-
torch.onnx.export(
|
338 |
-
model,
|
339 |
-
inputs,
|
340 |
-
onnx_path,
|
341 |
-
export_params=True,
|
342 |
-
opset_version=onnx_opset,
|
343 |
-
do_constant_folding=True,
|
344 |
-
input_names=model_obj.get_input_names(),
|
345 |
-
output_names=model_obj.get_output_names(),
|
346 |
-
dynamic_axes=model_obj.get_dynamic_axes(),
|
347 |
-
)
|
348 |
-
del model
|
349 |
-
torch.cuda.empty_cache()
|
350 |
-
gc.collect()
|
351 |
-
else:
|
352 |
-
logger.warning(f"Found cached model: {onnx_path}")
|
353 |
-
|
354 |
-
# Optimize onnx
|
355 |
-
if force_engine_rebuild or not os.path.exists(onnx_opt_path):
|
356 |
-
logger.warning(f"Generating optimizing model: {onnx_opt_path}")
|
357 |
-
onnx_opt_graph = model_obj.optimize(onnx.load(onnx_path))
|
358 |
-
onnx.save(onnx_opt_graph, onnx_opt_path)
|
359 |
-
else:
|
360 |
-
logger.warning(f"Found cached optimized model: {onnx_opt_path} ")
|
361 |
-
|
362 |
-
# Build TensorRT engines
|
363 |
-
for model_name, model_obj in models.items():
|
364 |
-
engine_path = getEnginePath(model_name, engine_dir)
|
365 |
-
engine = Engine(engine_path)
|
366 |
-
onnx_path = getOnnxPath(model_name, onnx_dir, opt=False)
|
367 |
-
onnx_opt_path = getOnnxPath(model_name, onnx_dir)
|
368 |
-
|
369 |
-
if force_engine_rebuild or not os.path.exists(engine.engine_path):
|
370 |
-
engine.build(
|
371 |
-
onnx_opt_path,
|
372 |
-
fp16=True,
|
373 |
-
input_profile=model_obj.get_input_profile(
|
374 |
-
opt_batch_size,
|
375 |
-
opt_image_height,
|
376 |
-
opt_image_width,
|
377 |
-
static_batch=static_batch,
|
378 |
-
static_shape=static_shape,
|
379 |
-
),
|
380 |
-
enable_preview=enable_preview,
|
381 |
-
timing_cache=timing_cache,
|
382 |
-
workspace_size=max_workspace_size,
|
383 |
-
)
|
384 |
-
built_engines[model_name] = engine
|
385 |
-
|
386 |
-
# Load and activate TensorRT engines
|
387 |
-
for model_name, model_obj in models.items():
|
388 |
-
engine = built_engines[model_name]
|
389 |
-
engine.load()
|
390 |
-
engine.activate()
|
391 |
-
|
392 |
-
return built_engines
|
393 |
-
|
394 |
-
|
395 |
-
def runEngine(engine, feed_dict, stream):
|
396 |
-
return engine.infer(feed_dict, stream)
|
397 |
-
|
398 |
-
|
399 |
-
class CLIP(BaseModel):
|
400 |
-
def __init__(self, model, device, max_batch_size, embedding_dim):
|
401 |
-
super(CLIP, self).__init__(
|
402 |
-
model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim
|
403 |
-
)
|
404 |
-
self.name = "CLIP"
|
405 |
-
|
406 |
-
def get_input_names(self):
|
407 |
-
return ["input_ids"]
|
408 |
-
|
409 |
-
def get_output_names(self):
|
410 |
-
return ["text_embeddings", "pooler_output"]
|
411 |
-
|
412 |
-
def get_dynamic_axes(self):
|
413 |
-
return {"input_ids": {0: "B"}, "text_embeddings": {0: "B"}}
|
414 |
-
|
415 |
-
def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
|
416 |
-
self.check_dims(batch_size, image_height, image_width)
|
417 |
-
min_batch, max_batch, _, _, _, _, _, _, _, _ = self.get_minmax_dims(
|
418 |
-
batch_size, image_height, image_width, static_batch, static_shape
|
419 |
-
)
|
420 |
-
return {
|
421 |
-
"input_ids": [(min_batch, self.text_maxlen), (batch_size, self.text_maxlen), (max_batch, self.text_maxlen)]
|
422 |
-
}
|
423 |
-
|
424 |
-
def get_shape_dict(self, batch_size, image_height, image_width):
|
425 |
-
self.check_dims(batch_size, image_height, image_width)
|
426 |
-
return {
|
427 |
-
"input_ids": (batch_size, self.text_maxlen),
|
428 |
-
"text_embeddings": (batch_size, self.text_maxlen, self.embedding_dim),
|
429 |
-
}
|
430 |
-
|
431 |
-
def get_sample_input(self, batch_size, image_height, image_width):
|
432 |
-
self.check_dims(batch_size, image_height, image_width)
|
433 |
-
return torch.zeros(batch_size, self.text_maxlen, dtype=torch.int32, device=self.device)
|
434 |
-
|
435 |
-
def optimize(self, onnx_graph):
|
436 |
-
opt = Optimizer(onnx_graph)
|
437 |
-
opt.select_outputs([0]) # delete graph output#1
|
438 |
-
opt.cleanup()
|
439 |
-
opt.fold_constants()
|
440 |
-
opt.infer_shapes()
|
441 |
-
opt.select_outputs([0], names=["text_embeddings"]) # rename network output
|
442 |
-
opt_onnx_graph = opt.cleanup(return_onnx=True)
|
443 |
-
return opt_onnx_graph
|
444 |
-
|
445 |
-
|
446 |
-
def make_CLIP(model, device, max_batch_size, embedding_dim, inpaint=False):
|
447 |
-
return CLIP(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)
|
448 |
-
|
449 |
-
|
450 |
-
class UNet(BaseModel):
|
451 |
-
def __init__(
|
452 |
-
self, model, fp16=False, device="cuda", max_batch_size=16, embedding_dim=768, text_maxlen=77, unet_dim=4
|
453 |
-
):
|
454 |
-
super(UNet, self).__init__(
|
455 |
-
model=model,
|
456 |
-
fp16=fp16,
|
457 |
-
device=device,
|
458 |
-
max_batch_size=max_batch_size,
|
459 |
-
embedding_dim=embedding_dim,
|
460 |
-
text_maxlen=text_maxlen,
|
461 |
-
)
|
462 |
-
self.unet_dim = unet_dim
|
463 |
-
self.name = "UNet"
|
464 |
-
|
465 |
-
def get_input_names(self):
|
466 |
-
return ["sample", "timestep", "encoder_hidden_states"]
|
467 |
-
|
468 |
-
def get_output_names(self):
|
469 |
-
return ["latent"]
|
470 |
-
|
471 |
-
def get_dynamic_axes(self):
|
472 |
-
return {
|
473 |
-
"sample": {0: "2B", 2: "H", 3: "W"},
|
474 |
-
"encoder_hidden_states": {0: "2B"},
|
475 |
-
"latent": {0: "2B", 2: "H", 3: "W"},
|
476 |
-
}
|
477 |
-
|
478 |
-
def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
|
479 |
-
latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
|
480 |
-
(
|
481 |
-
min_batch,
|
482 |
-
max_batch,
|
483 |
-
_,
|
484 |
-
_,
|
485 |
-
_,
|
486 |
-
_,
|
487 |
-
min_latent_height,
|
488 |
-
max_latent_height,
|
489 |
-
min_latent_width,
|
490 |
-
max_latent_width,
|
491 |
-
) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)
|
492 |
-
return {
|
493 |
-
"sample": [
|
494 |
-
(2 * min_batch, self.unet_dim, min_latent_height, min_latent_width),
|
495 |
-
(2 * batch_size, self.unet_dim, latent_height, latent_width),
|
496 |
-
(2 * max_batch, self.unet_dim, max_latent_height, max_latent_width),
|
497 |
-
],
|
498 |
-
"encoder_hidden_states": [
|
499 |
-
(2 * min_batch, self.text_maxlen, self.embedding_dim),
|
500 |
-
(2 * batch_size, self.text_maxlen, self.embedding_dim),
|
501 |
-
(2 * max_batch, self.text_maxlen, self.embedding_dim),
|
502 |
-
],
|
503 |
-
}
|
504 |
-
|
505 |
-
def get_shape_dict(self, batch_size, image_height, image_width):
|
506 |
-
latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
|
507 |
-
return {
|
508 |
-
"sample": (2 * batch_size, self.unet_dim, latent_height, latent_width),
|
509 |
-
"encoder_hidden_states": (2 * batch_size, self.text_maxlen, self.embedding_dim),
|
510 |
-
"latent": (2 * batch_size, 4, latent_height, latent_width),
|
511 |
-
}
|
512 |
-
|
513 |
-
def get_sample_input(self, batch_size, image_height, image_width):
|
514 |
-
latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
|
515 |
-
dtype = torch.float16 if self.fp16 else torch.float32
|
516 |
-
return (
|
517 |
-
torch.randn(
|
518 |
-
2 * batch_size, self.unet_dim, latent_height, latent_width, dtype=torch.float32, device=self.device
|
519 |
-
),
|
520 |
-
torch.tensor([1.0], dtype=torch.float32, device=self.device),
|
521 |
-
torch.randn(2 * batch_size, self.text_maxlen, self.embedding_dim, dtype=dtype, device=self.device),
|
522 |
-
)
|
523 |
-
|
524 |
-
|
525 |
-
def make_UNet(model, device, max_batch_size, embedding_dim, inpaint=False):
|
526 |
-
return UNet(
|
527 |
-
model,
|
528 |
-
fp16=True,
|
529 |
-
device=device,
|
530 |
-
max_batch_size=max_batch_size,
|
531 |
-
embedding_dim=embedding_dim,
|
532 |
-
unet_dim=(9 if inpaint else 4),
|
533 |
-
)
|
534 |
-
|
535 |
-
|
536 |
-
class VAE(BaseModel):
|
537 |
-
def __init__(self, model, device, max_batch_size, embedding_dim):
|
538 |
-
super(VAE, self).__init__(
|
539 |
-
model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim
|
540 |
-
)
|
541 |
-
self.name = "VAE decoder"
|
542 |
-
|
543 |
-
def get_input_names(self):
|
544 |
-
return ["latent"]
|
545 |
-
|
546 |
-
def get_output_names(self):
|
547 |
-
return ["images"]
|
548 |
-
|
549 |
-
def get_dynamic_axes(self):
|
550 |
-
return {"latent": {0: "B", 2: "H", 3: "W"}, "images": {0: "B", 2: "8H", 3: "8W"}}
|
551 |
-
|
552 |
-
def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
|
553 |
-
latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
|
554 |
-
(
|
555 |
-
min_batch,
|
556 |
-
max_batch,
|
557 |
-
_,
|
558 |
-
_,
|
559 |
-
_,
|
560 |
-
_,
|
561 |
-
min_latent_height,
|
562 |
-
max_latent_height,
|
563 |
-
min_latent_width,
|
564 |
-
max_latent_width,
|
565 |
-
) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)
|
566 |
-
return {
|
567 |
-
"latent": [
|
568 |
-
(min_batch, 4, min_latent_height, min_latent_width),
|
569 |
-
(batch_size, 4, latent_height, latent_width),
|
570 |
-
(max_batch, 4, max_latent_height, max_latent_width),
|
571 |
-
]
|
572 |
-
}
|
573 |
-
|
574 |
-
def get_shape_dict(self, batch_size, image_height, image_width):
|
575 |
-
latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
|
576 |
-
return {
|
577 |
-
"latent": (batch_size, 4, latent_height, latent_width),
|
578 |
-
"images": (batch_size, 3, image_height, image_width),
|
579 |
-
}
|
580 |
-
|
581 |
-
def get_sample_input(self, batch_size, image_height, image_width):
|
582 |
-
latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
|
583 |
-
return torch.randn(batch_size, 4, latent_height, latent_width, dtype=torch.float32, device=self.device)
|
584 |
-
|
585 |
-
|
586 |
-
def make_VAE(model, device, max_batch_size, embedding_dim, inpaint=False):
|
587 |
-
return VAE(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)
|
588 |
-
|
589 |
-
|
590 |
-
class TensorRTStableDiffusionPipeline(StableDiffusionPipeline):
|
591 |
-
r"""
|
592 |
-
Pipeline for text-to-image generation using TensorRT accelerated Stable Diffusion.
|
593 |
-
|
594 |
-
This model inherits from [`StableDiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
595 |
-
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
596 |
-
|
597 |
-
Args:
|
598 |
-
vae ([`AutoencoderKL`]):
|
599 |
-
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
600 |
-
text_encoder ([`CLIPTextModel`]):
|
601 |
-
Frozen text-encoder. Stable Diffusion uses the text portion of
|
602 |
-
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
603 |
-
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
604 |
-
tokenizer (`CLIPTokenizer`):
|
605 |
-
Tokenizer of class
|
606 |
-
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
607 |
-
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
608 |
-
scheduler ([`SchedulerMixin`]):
|
609 |
-
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
610 |
-
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
611 |
-
safety_checker ([`StableDiffusionSafetyChecker`]):
|
612 |
-
Classification module that estimates whether generated images could be considered offensive or harmful.
|
613 |
-
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
|
614 |
-
feature_extractor ([`CLIPFeatureExtractor`]):
|
615 |
-
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
616 |
-
"""
|
617 |
-
|
618 |
-
def __init__(
|
619 |
-
self,
|
620 |
-
vae: AutoencoderKL,
|
621 |
-
text_encoder: CLIPTextModel,
|
622 |
-
tokenizer: CLIPTokenizer,
|
623 |
-
unet: UNet2DConditionModel,
|
624 |
-
scheduler: DDIMScheduler,
|
625 |
-
safety_checker: StableDiffusionSafetyChecker,
|
626 |
-
feature_extractor: CLIPFeatureExtractor,
|
627 |
-
requires_safety_checker: bool = True,
|
628 |
-
stages=["clip", "unet", "vae"],
|
629 |
-
image_height: int = 768,
|
630 |
-
image_width: int = 768,
|
631 |
-
max_batch_size: int = 16,
|
632 |
-
# ONNX export parameters
|
633 |
-
onnx_opset: int = 17,
|
634 |
-
onnx_dir: str = "onnx",
|
635 |
-
# TensorRT engine build parameters
|
636 |
-
engine_dir: str = "engine",
|
637 |
-
build_preview_features: bool = True,
|
638 |
-
force_engine_rebuild: bool = False,
|
639 |
-
timing_cache: str = "timing_cache",
|
640 |
-
):
|
641 |
-
super().__init__(
|
642 |
-
vae, text_encoder, tokenizer, unet, scheduler, safety_checker, feature_extractor, requires_safety_checker
|
643 |
-
)
|
644 |
-
|
645 |
-
self.vae.forward = self.vae.decode
|
646 |
-
|
647 |
-
self.stages = stages
|
648 |
-
self.image_height, self.image_width = image_height, image_width
|
649 |
-
self.inpaint = False
|
650 |
-
self.onnx_opset = onnx_opset
|
651 |
-
self.onnx_dir = onnx_dir
|
652 |
-
self.engine_dir = engine_dir
|
653 |
-
self.force_engine_rebuild = force_engine_rebuild
|
654 |
-
self.timing_cache = timing_cache
|
655 |
-
self.build_static_batch = False
|
656 |
-
self.build_dynamic_shape = False
|
657 |
-
self.build_preview_features = build_preview_features
|
658 |
-
|
659 |
-
self.max_batch_size = max_batch_size
|
660 |
-
# TODO: Restrict batch size to 4 for larger image dimensions as a WAR for TensorRT limitation.
|
661 |
-
if self.build_dynamic_shape or self.image_height > 512 or self.image_width > 512:
|
662 |
-
self.max_batch_size = 4
|
663 |
-
|
664 |
-
self.stream = None # loaded in loadResources()
|
665 |
-
self.models = {} # loaded in __loadModels()
|
666 |
-
self.engine = {} # loaded in build_engines()
|
667 |
-
|
668 |
-
def __loadModels(self):
|
669 |
-
# Load pipeline models
|
670 |
-
self.embedding_dim = self.text_encoder.config.hidden_size
|
671 |
-
models_args = {
|
672 |
-
"device": self.torch_device,
|
673 |
-
"max_batch_size": self.max_batch_size,
|
674 |
-
"embedding_dim": self.embedding_dim,
|
675 |
-
"inpaint": self.inpaint,
|
676 |
-
}
|
677 |
-
if "clip" in self.stages:
|
678 |
-
self.models["clip"] = make_CLIP(self.text_encoder, **models_args)
|
679 |
-
if "unet" in self.stages:
|
680 |
-
self.models["unet"] = make_UNet(self.unet, **models_args)
|
681 |
-
if "vae" in self.stages:
|
682 |
-
self.models["vae"] = make_VAE(self.vae, **models_args)
|
683 |
-
|
684 |
-
@classmethod
|
685 |
-
def set_cached_folder(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):
|
686 |
-
cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
|
687 |
-
resume_download = kwargs.pop("resume_download", False)
|
688 |
-
proxies = kwargs.pop("proxies", None)
|
689 |
-
local_files_only = kwargs.pop("local_files_only", False)
|
690 |
-
use_auth_token = kwargs.pop("use_auth_token", None)
|
691 |
-
revision = kwargs.pop("revision", None)
|
692 |
-
|
693 |
-
cls.cached_folder = (
|
694 |
-
pretrained_model_name_or_path
|
695 |
-
if os.path.isdir(pretrained_model_name_or_path)
|
696 |
-
else snapshot_download(
|
697 |
-
pretrained_model_name_or_path,
|
698 |
-
cache_dir=cache_dir,
|
699 |
-
resume_download=resume_download,
|
700 |
-
proxies=proxies,
|
701 |
-
local_files_only=local_files_only,
|
702 |
-
use_auth_token=use_auth_token,
|
703 |
-
revision=revision,
|
704 |
-
)
|
705 |
-
)
|
706 |
-
|
707 |
-
def to(self, torch_device: Optional[Union[str, torch.device]] = None, silence_dtype_warnings: bool = False):
|
708 |
-
super().to(torch_device, silence_dtype_warnings=silence_dtype_warnings)
|
709 |
-
|
710 |
-
self.onnx_dir = os.path.join(self.cached_folder, self.onnx_dir)
|
711 |
-
self.engine_dir = os.path.join(self.cached_folder, self.engine_dir)
|
712 |
-
self.timing_cache = os.path.join(self.cached_folder, self.timing_cache)
|
713 |
-
|
714 |
-
# set device
|
715 |
-
self.torch_device = self._execution_device
|
716 |
-
logger.warning(f"Running inference on device: {self.torch_device}")
|
717 |
-
|
718 |
-
# load models
|
719 |
-
self.__loadModels()
|
720 |
-
|
721 |
-
# build engines
|
722 |
-
self.engine = build_engines(
|
723 |
-
self.models,
|
724 |
-
self.engine_dir,
|
725 |
-
self.onnx_dir,
|
726 |
-
self.onnx_opset,
|
727 |
-
opt_image_height=self.image_height,
|
728 |
-
opt_image_width=self.image_width,
|
729 |
-
force_engine_rebuild=self.force_engine_rebuild,
|
730 |
-
static_batch=self.build_static_batch,
|
731 |
-
static_shape=not self.build_dynamic_shape,
|
732 |
-
enable_preview=self.build_preview_features,
|
733 |
-
timing_cache=self.timing_cache,
|
734 |
-
)
|
735 |
-
|
736 |
-
return self
|
737 |
-
|
738 |
-
def __encode_prompt(self, prompt, negative_prompt):
|
739 |
-
r"""
|
740 |
-
Encodes the prompt into text encoder hidden states.
|
741 |
-
|
742 |
-
Args:
|
743 |
-
prompt (`str` or `List[str]`, *optional*):
|
744 |
-
prompt to be encoded
|
745 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
746 |
-
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
747 |
-
`negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
|
748 |
-
Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
|
749 |
-
"""
|
750 |
-
# Tokenize prompt
|
751 |
-
text_input_ids = (
|
752 |
-
self.tokenizer(
|
753 |
-
prompt,
|
754 |
-
padding="max_length",
|
755 |
-
max_length=self.tokenizer.model_max_length,
|
756 |
-
truncation=True,
|
757 |
-
return_tensors="pt",
|
758 |
-
)
|
759 |
-
.input_ids.type(torch.int32)
|
760 |
-
.to(self.torch_device)
|
761 |
-
)
|
762 |
-
|
763 |
-
text_input_ids_inp = device_view(text_input_ids)
|
764 |
-
# NOTE: output tensor for CLIP must be cloned because it will be overwritten when called again for negative prompt
|
765 |
-
text_embeddings = runEngine(self.engine["clip"], {"input_ids": text_input_ids_inp}, self.stream)[
|
766 |
-
"text_embeddings"
|
767 |
-
].clone()
|
768 |
-
|
769 |
-
# Tokenize negative prompt
|
770 |
-
uncond_input_ids = (
|
771 |
-
self.tokenizer(
|
772 |
-
negative_prompt,
|
773 |
-
padding="max_length",
|
774 |
-
max_length=self.tokenizer.model_max_length,
|
775 |
-
truncation=True,
|
776 |
-
return_tensors="pt",
|
777 |
-
)
|
778 |
-
.input_ids.type(torch.int32)
|
779 |
-
.to(self.torch_device)
|
780 |
-
)
|
781 |
-
uncond_input_ids_inp = device_view(uncond_input_ids)
|
782 |
-
uncond_embeddings = runEngine(self.engine["clip"], {"input_ids": uncond_input_ids_inp}, self.stream)[
|
783 |
-
"text_embeddings"
|
784 |
-
]
|
785 |
-
|
786 |
-
# Concatenate the unconditional and text embeddings into a single batch to avoid doing two forward passes for classifier free guidance
|
787 |
-
text_embeddings = torch.cat([uncond_embeddings, text_embeddings]).to(dtype=torch.float16)
|
788 |
-
|
789 |
-
return text_embeddings
|
790 |
-
|
791 |
-
def __denoise_latent(
|
792 |
-
self, latents, text_embeddings, timesteps=None, step_offset=0, mask=None, masked_image_latents=None
|
793 |
-
):
|
794 |
-
if not isinstance(timesteps, torch.Tensor):
|
795 |
-
timesteps = self.scheduler.timesteps
|
796 |
-
for step_index, timestep in enumerate(timesteps):
|
797 |
-
# Expand the latents if we are doing classifier free guidance
|
798 |
-
latent_model_input = torch.cat([latents] * 2)
|
799 |
-
latent_model_input = self.scheduler.scale_model_input(latent_model_input, timestep)
|
800 |
-
if isinstance(mask, torch.Tensor):
|
801 |
-
latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
|
802 |
-
|
803 |
-
# Predict the noise residual
|
804 |
-
timestep_float = timestep.float() if timestep.dtype != torch.float32 else timestep
|
805 |
-
|
806 |
-
sample_inp = device_view(latent_model_input)
|
807 |
-
timestep_inp = device_view(timestep_float)
|
808 |
-
embeddings_inp = device_view(text_embeddings)
|
809 |
-
noise_pred = runEngine(
|
810 |
-
self.engine["unet"],
|
811 |
-
{"sample": sample_inp, "timestep": timestep_inp, "encoder_hidden_states": embeddings_inp},
|
812 |
-
self.stream,
|
813 |
-
)["latent"]
|
814 |
-
|
815 |
-
# Perform guidance
|
816 |
-
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
817 |
-
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
|
818 |
-
|
819 |
-
latents = self.scheduler.step(noise_pred, timestep, latents).prev_sample
|
820 |
-
|
821 |
-
latents = 1.0 / 0.18215 * latents
|
822 |
-
return latents
|
823 |
-
|
824 |
-
def __decode_latent(self, latents):
|
825 |
-
images = runEngine(self.engine["vae"], {"latent": device_view(latents)}, self.stream)["images"]
|
826 |
-
images = (images / 2 + 0.5).clamp(0, 1)
|
827 |
-
return images.cpu().permute(0, 2, 3, 1).float().numpy()
|
828 |
-
|
829 |
-
def __loadResources(self, image_height, image_width, batch_size):
|
830 |
-
self.stream = cuda.Stream()
|
831 |
-
|
832 |
-
# Allocate buffers for TensorRT engine bindings
|
833 |
-
for model_name, obj in self.models.items():
|
834 |
-
self.engine[model_name].allocate_buffers(
|
835 |
-
shape_dict=obj.get_shape_dict(batch_size, image_height, image_width), device=self.torch_device
|
836 |
-
)
|
837 |
-
|
838 |
-
@torch.no_grad()
|
839 |
-
def __call__(
|
840 |
-
self,
|
841 |
-
prompt: Union[str, List[str]] = None,
|
842 |
-
num_inference_steps: int = 50,
|
843 |
-
guidance_scale: float = 7.5,
|
844 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
845 |
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
846 |
-
):
|
847 |
-
r"""
|
848 |
-
Function invoked when calling the pipeline for generation.
|
849 |
-
|
850 |
-
Args:
|
851 |
-
prompt (`str` or `List[str]`, *optional*):
|
852 |
-
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
853 |
-
instead.
|
854 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
855 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
856 |
-
expense of slower inference.
|
857 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
858 |
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
859 |
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
860 |
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
861 |
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
862 |
-
usually at the expense of lower image quality.
|
863 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
864 |
-
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
865 |
-
`negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
|
866 |
-
Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
|
867 |
-
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
868 |
-
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
869 |
-
to make generation deterministic.
|
870 |
-
|
871 |
-
"""
|
872 |
-
self.generator = generator
|
873 |
-
self.denoising_steps = num_inference_steps
|
874 |
-
self.guidance_scale = guidance_scale
|
875 |
-
|
876 |
-
# Pre-compute latent input scales and linear multistep coefficients
|
877 |
-
self.scheduler.set_timesteps(self.denoising_steps, device=self.torch_device)
|
878 |
-
|
879 |
-
# Define call parameters
|
880 |
-
if prompt is not None and isinstance(prompt, str):
|
881 |
-
batch_size = 1
|
882 |
-
prompt = [prompt]
|
883 |
-
elif prompt is not None and isinstance(prompt, list):
|
884 |
-
batch_size = len(prompt)
|
885 |
-
else:
|
886 |
-
raise ValueError(f"Expected prompt to be of type list or str but got {type(prompt)}")
|
887 |
-
|
888 |
-
if negative_prompt is None:
|
889 |
-
negative_prompt = [""] * batch_size
|
890 |
-
|
891 |
-
if negative_prompt is not None and isinstance(negative_prompt, str):
|
892 |
-
negative_prompt = [negative_prompt]
|
893 |
-
|
894 |
-
assert len(prompt) == len(negative_prompt)
|
895 |
-
|
896 |
-
if batch_size > self.max_batch_size:
|
897 |
-
raise ValueError(
|
898 |
-
f"Batch size {len(prompt)} is larger than allowed {self.max_batch_size}. If dynamic shape is used, then maximum batch size is 4"
|
899 |
-
)
|
900 |
-
|
901 |
-
# load resources
|
902 |
-
self.__loadResources(self.image_height, self.image_width, batch_size)
|
903 |
-
|
904 |
-
with torch.inference_mode(), torch.autocast("cuda"), trt.Runtime(TRT_LOGGER):
|
905 |
-
# CLIP text encoder
|
906 |
-
text_embeddings = self.__encode_prompt(prompt, negative_prompt)
|
907 |
-
|
908 |
-
# Pre-initialize latents
|
909 |
-
num_channels_latents = self.unet.in_channels
|
910 |
-
latents = self.prepare_latents(
|
911 |
-
batch_size,
|
912 |
-
num_channels_latents,
|
913 |
-
self.image_height,
|
914 |
-
self.image_width,
|
915 |
-
torch.float32,
|
916 |
-
self.torch_device,
|
917 |
-
generator,
|
918 |
-
)
|
919 |
-
|
920 |
-
# UNet denoiser
|
921 |
-
latents = self.__denoise_latent(latents, text_embeddings)
|
922 |
-
|
923 |
-
# VAE decode latent
|
924 |
-
images = self.__decode_latent(latents)
|
925 |
-
|
926 |
-
images, has_nsfw_concept = self.run_safety_checker(images, self.torch_device, text_embeddings.dtype)
|
927 |
-
images = self.numpy_to_pil(images)
|
928 |
-
return StableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/UniFormerV2_mit_demo/app.py
DELETED
@@ -1,131 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
import torch
|
4 |
-
import torch.nn as nn
|
5 |
-
import numpy as np
|
6 |
-
import torch.nn.functional as F
|
7 |
-
import torchvision.transforms as T
|
8 |
-
from PIL import Image
|
9 |
-
from decord import VideoReader
|
10 |
-
from decord import cpu
|
11 |
-
from uniformerv2 import uniformerv2_b16
|
12 |
-
from mitv1_class_index import mitv1_classnames
|
13 |
-
from transforms import (
|
14 |
-
GroupNormalize, GroupScale, GroupCenterCrop,
|
15 |
-
Stack, ToTorchFormatTensor
|
16 |
-
)
|
17 |
-
|
18 |
-
import gradio as gr
|
19 |
-
from huggingface_hub import hf_hub_download
|
20 |
-
|
21 |
-
class Uniformerv2(nn.Module):
|
22 |
-
def __init__(self, model):
|
23 |
-
super().__init__()
|
24 |
-
self.backbone = model
|
25 |
-
|
26 |
-
def forward(self, x):
|
27 |
-
return self.backbone(x)
|
28 |
-
|
29 |
-
# Device on which to run the model
|
30 |
-
# Set to cuda to load on GPU
|
31 |
-
device = "cpu"
|
32 |
-
model_path = hf_hub_download(repo_id="Andy1621/uniformerv2", filename="mit_uniformerv2_b16_8x224.pyth")
|
33 |
-
# Pick a pretrained model
|
34 |
-
model = Uniformerv2(uniformerv2_b16(pretrained=False, t_size=8, no_lmhra=True, temporal_downsample=False, num_classes=339))
|
35 |
-
state_dict = torch.load(model_path, map_location='cpu')
|
36 |
-
model.load_state_dict(state_dict)
|
37 |
-
|
38 |
-
# Set to eval mode and move to desired device
|
39 |
-
model = model.to(device)
|
40 |
-
model = model.eval()
|
41 |
-
|
42 |
-
# Create an id to label name mapping
|
43 |
-
mitv1_id_to_classname = {}
|
44 |
-
for k, v in mitv1_classnames.items():
|
45 |
-
mitv1_id_to_classname[k] = v
|
46 |
-
|
47 |
-
|
48 |
-
def get_index(num_frames, num_segments=8):
|
49 |
-
seg_size = float(num_frames - 1) / num_segments
|
50 |
-
start = int(seg_size / 2)
|
51 |
-
offsets = np.array([
|
52 |
-
start + int(np.round(seg_size * idx)) for idx in range(num_segments)
|
53 |
-
])
|
54 |
-
return offsets
|
55 |
-
|
56 |
-
|
57 |
-
def load_video(video_path):
|
58 |
-
vr = VideoReader(video_path, ctx=cpu(0))
|
59 |
-
num_frames = len(vr)
|
60 |
-
frame_indices = get_index(num_frames, 8)
|
61 |
-
|
62 |
-
# transform
|
63 |
-
crop_size = 224
|
64 |
-
scale_size = 256
|
65 |
-
input_mean = [0.485, 0.456, 0.406]
|
66 |
-
input_std = [0.229, 0.224, 0.225]
|
67 |
-
|
68 |
-
transform = T.Compose([
|
69 |
-
GroupScale(int(scale_size)),
|
70 |
-
GroupCenterCrop(crop_size),
|
71 |
-
Stack(),
|
72 |
-
ToTorchFormatTensor(),
|
73 |
-
GroupNormalize(input_mean, input_std)
|
74 |
-
])
|
75 |
-
|
76 |
-
images_group = list()
|
77 |
-
for frame_index in frame_indices:
|
78 |
-
img = Image.fromarray(vr[frame_index].asnumpy())
|
79 |
-
images_group.append(img)
|
80 |
-
torch_imgs = transform(images_group)
|
81 |
-
return torch_imgs
|
82 |
-
|
83 |
-
|
84 |
-
def inference(video):
|
85 |
-
vid = load_video(video)
|
86 |
-
|
87 |
-
# The model expects inputs of shape: B x C x H x W
|
88 |
-
TC, H, W = vid.shape
|
89 |
-
inputs = vid.reshape(1, TC//3, 3, H, W).permute(0, 2, 1, 3, 4)
|
90 |
-
|
91 |
-
prediction = model(inputs)
|
92 |
-
prediction = F.softmax(prediction, dim=1).flatten()
|
93 |
-
|
94 |
-
return {mitv1_id_to_classname[str(i)]: float(prediction[i]) for i in range(339)}
|
95 |
-
|
96 |
-
|
97 |
-
def set_example_video(example: list) -> dict:
|
98 |
-
return gr.Video.update(value=example[0])
|
99 |
-
|
100 |
-
|
101 |
-
demo = gr.Blocks()
|
102 |
-
with demo:
|
103 |
-
gr.Markdown(
|
104 |
-
"""
|
105 |
-
# UniFormerV2-B
|
106 |
-
Gradio demo for <a href='https://github.com/OpenGVLab/UniFormerV2' target='_blank'>UniFormerV2</a>: To use it, simply upload your video, or click one of the examples to load them. Read more at the links below.
|
107 |
-
"""
|
108 |
-
)
|
109 |
-
|
110 |
-
with gr.Box():
|
111 |
-
with gr.Row():
|
112 |
-
with gr.Column():
|
113 |
-
with gr.Row():
|
114 |
-
input_video = gr.Video(label='Input Video')
|
115 |
-
with gr.Row():
|
116 |
-
submit_button = gr.Button('Submit')
|
117 |
-
with gr.Column():
|
118 |
-
label = gr.Label(num_top_classes=5)
|
119 |
-
with gr.Row():
|
120 |
-
example_videos = gr.Dataset(components=[input_video], samples=[['clapping.mp4'], ['jumping.mp4'], ['swimming.mp4']])
|
121 |
-
|
122 |
-
gr.Markdown(
|
123 |
-
"""
|
124 |
-
<p style='text-align: center'><a href='https://arxiv.org/abs/2211.09552' target='_blank'>[Arxiv] UniFormerV2: Spatiotemporal Learning by Arming Image ViTs with Video UniFormer</a> | <a href='https://github.com/OpenGVLab/UniFormerV2' target='_blank'>Github Repo</a></p>
|
125 |
-
"""
|
126 |
-
)
|
127 |
-
|
128 |
-
submit_button.click(fn=inference, inputs=input_video, outputs=label)
|
129 |
-
example_videos.click(fn=set_example_video, inputs=example_videos, outputs=example_videos.components)
|
130 |
-
|
131 |
-
demo.launch(enable_queue=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/foveabox/fovea_r50_fpn_4x4_2x_coco.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = './fovea_r50_fpn_4x4_1x_coco.py'
|
2 |
-
# learning policy
|
3 |
-
lr_config = dict(step=[16, 22])
|
4 |
-
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco.py
DELETED
@@ -1,71 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/retinanet_r50_fpn.py',
|
3 |
-
'../_base_/datasets/coco_detection.py',
|
4 |
-
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
|
5 |
-
]
|
6 |
-
# model settings
|
7 |
-
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
|
8 |
-
model = dict(
|
9 |
-
pretrained='torchvision://resnet101',
|
10 |
-
backbone=dict(depth=101),
|
11 |
-
bbox_head=dict(
|
12 |
-
_delete_=True,
|
13 |
-
type='SABLRetinaHead',
|
14 |
-
num_classes=80,
|
15 |
-
in_channels=256,
|
16 |
-
stacked_convs=4,
|
17 |
-
feat_channels=256,
|
18 |
-
approx_anchor_generator=dict(
|
19 |
-
type='AnchorGenerator',
|
20 |
-
octave_base_scale=4,
|
21 |
-
scales_per_octave=3,
|
22 |
-
ratios=[0.5, 1.0, 2.0],
|
23 |
-
strides=[8, 16, 32, 64, 128]),
|
24 |
-
square_anchor_generator=dict(
|
25 |
-
type='AnchorGenerator',
|
26 |
-
ratios=[1.0],
|
27 |
-
scales=[4],
|
28 |
-
strides=[8, 16, 32, 64, 128]),
|
29 |
-
norm_cfg=norm_cfg,
|
30 |
-
bbox_coder=dict(
|
31 |
-
type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),
|
32 |
-
loss_cls=dict(
|
33 |
-
type='FocalLoss',
|
34 |
-
use_sigmoid=True,
|
35 |
-
gamma=2.0,
|
36 |
-
alpha=0.25,
|
37 |
-
loss_weight=1.0),
|
38 |
-
loss_bbox_cls=dict(
|
39 |
-
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),
|
40 |
-
loss_bbox_reg=dict(
|
41 |
-
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)),
|
42 |
-
# training and testing settings
|
43 |
-
train_cfg=dict(
|
44 |
-
assigner=dict(
|
45 |
-
type='ApproxMaxIoUAssigner',
|
46 |
-
pos_iou_thr=0.5,
|
47 |
-
neg_iou_thr=0.4,
|
48 |
-
min_pos_iou=0.0,
|
49 |
-
ignore_iof_thr=-1),
|
50 |
-
allowed_border=-1,
|
51 |
-
pos_weight=-1,
|
52 |
-
debug=False))
|
53 |
-
img_norm_cfg = dict(
|
54 |
-
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
55 |
-
train_pipeline = [
|
56 |
-
dict(type='LoadImageFromFile'),
|
57 |
-
dict(type='LoadAnnotations', with_bbox=True),
|
58 |
-
dict(
|
59 |
-
type='Resize',
|
60 |
-
img_scale=[(1333, 480), (1333, 960)],
|
61 |
-
multiscale_mode='range',
|
62 |
-
keep_ratio=True),
|
63 |
-
dict(type='RandomFlip', flip_ratio=0.5),
|
64 |
-
dict(type='Normalize', **img_norm_cfg),
|
65 |
-
dict(type='Pad', size_divisor=32),
|
66 |
-
dict(type='DefaultFormatBundle'),
|
67 |
-
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
|
68 |
-
]
|
69 |
-
data = dict(train=dict(pipeline=train_pipeline))
|
70 |
-
# optimizer
|
71 |
-
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/cascade_rpn_head.py
DELETED
@@ -1,784 +0,0 @@
|
|
1 |
-
from __future__ import division
|
2 |
-
import copy
|
3 |
-
import warnings
|
4 |
-
|
5 |
-
import torch
|
6 |
-
import torch.nn as nn
|
7 |
-
from mmcv import ConfigDict
|
8 |
-
from mmcv.cnn import normal_init
|
9 |
-
from mmcv.ops import DeformConv2d, batched_nms
|
10 |
-
|
11 |
-
from mmdet.core import (RegionAssigner, build_assigner, build_sampler,
|
12 |
-
images_to_levels, multi_apply)
|
13 |
-
from ..builder import HEADS, build_head
|
14 |
-
from .base_dense_head import BaseDenseHead
|
15 |
-
from .rpn_head import RPNHead
|
16 |
-
|
17 |
-
|
18 |
-
class AdaptiveConv(nn.Module):
|
19 |
-
"""AdaptiveConv used to adapt the sampling location with the anchors.
|
20 |
-
|
21 |
-
Args:
|
22 |
-
in_channels (int): Number of channels in the input image
|
23 |
-
out_channels (int): Number of channels produced by the convolution
|
24 |
-
kernel_size (int or tuple): Size of the conv kernel. Default: 3
|
25 |
-
stride (int or tuple, optional): Stride of the convolution. Default: 1
|
26 |
-
padding (int or tuple, optional): Zero-padding added to both sides of
|
27 |
-
the input. Default: 1
|
28 |
-
dilation (int or tuple, optional): Spacing between kernel elements.
|
29 |
-
Default: 3
|
30 |
-
groups (int, optional): Number of blocked connections from input
|
31 |
-
channels to output channels. Default: 1
|
32 |
-
bias (bool, optional): If set True, adds a learnable bias to the
|
33 |
-
output. Default: False.
|
34 |
-
type (str, optional): Type of adaptive conv, can be either 'offset'
|
35 |
-
(arbitrary anchors) or 'dilation' (uniform anchor).
|
36 |
-
Default: 'dilation'.
|
37 |
-
"""
|
38 |
-
|
39 |
-
def __init__(self,
|
40 |
-
in_channels,
|
41 |
-
out_channels,
|
42 |
-
kernel_size=3,
|
43 |
-
stride=1,
|
44 |
-
padding=1,
|
45 |
-
dilation=3,
|
46 |
-
groups=1,
|
47 |
-
bias=False,
|
48 |
-
type='dilation'):
|
49 |
-
super(AdaptiveConv, self).__init__()
|
50 |
-
assert type in ['offset', 'dilation']
|
51 |
-
self.adapt_type = type
|
52 |
-
|
53 |
-
assert kernel_size == 3, 'Adaptive conv only supports kernels 3'
|
54 |
-
if self.adapt_type == 'offset':
|
55 |
-
assert stride == 1 and padding == 1 and groups == 1, \
|
56 |
-
'Adaptive conv offset mode only supports padding: {1}, ' \
|
57 |
-
f'stride: {1}, groups: {1}'
|
58 |
-
self.conv = DeformConv2d(
|
59 |
-
in_channels,
|
60 |
-
out_channels,
|
61 |
-
kernel_size,
|
62 |
-
padding=padding,
|
63 |
-
stride=stride,
|
64 |
-
groups=groups,
|
65 |
-
bias=bias)
|
66 |
-
else:
|
67 |
-
self.conv = nn.Conv2d(
|
68 |
-
in_channels,
|
69 |
-
out_channels,
|
70 |
-
kernel_size,
|
71 |
-
padding=dilation,
|
72 |
-
dilation=dilation)
|
73 |
-
|
74 |
-
def init_weights(self):
|
75 |
-
"""Init weights."""
|
76 |
-
normal_init(self.conv, std=0.01)
|
77 |
-
|
78 |
-
def forward(self, x, offset):
|
79 |
-
"""Forward function."""
|
80 |
-
if self.adapt_type == 'offset':
|
81 |
-
N, _, H, W = x.shape
|
82 |
-
assert offset is not None
|
83 |
-
assert H * W == offset.shape[1]
|
84 |
-
# reshape [N, NA, 18] to (N, 18, H, W)
|
85 |
-
offset = offset.permute(0, 2, 1).reshape(N, -1, H, W)
|
86 |
-
offset = offset.contiguous()
|
87 |
-
x = self.conv(x, offset)
|
88 |
-
else:
|
89 |
-
assert offset is None
|
90 |
-
x = self.conv(x)
|
91 |
-
return x
|
92 |
-
|
93 |
-
|
94 |
-
@HEADS.register_module()
|
95 |
-
class StageCascadeRPNHead(RPNHead):
|
96 |
-
"""Stage of CascadeRPNHead.
|
97 |
-
|
98 |
-
Args:
|
99 |
-
in_channels (int): Number of channels in the input feature map.
|
100 |
-
anchor_generator (dict): anchor generator config.
|
101 |
-
adapt_cfg (dict): adaptation config.
|
102 |
-
bridged_feature (bool, optional): whether update rpn feature.
|
103 |
-
Default: False.
|
104 |
-
with_cls (bool, optional): wheather use classification branch.
|
105 |
-
Default: True.
|
106 |
-
sampling (bool, optional): wheather use sampling. Default: True.
|
107 |
-
"""
|
108 |
-
|
109 |
-
def __init__(self,
|
110 |
-
in_channels,
|
111 |
-
anchor_generator=dict(
|
112 |
-
type='AnchorGenerator',
|
113 |
-
scales=[8],
|
114 |
-
ratios=[1.0],
|
115 |
-
strides=[4, 8, 16, 32, 64]),
|
116 |
-
adapt_cfg=dict(type='dilation', dilation=3),
|
117 |
-
bridged_feature=False,
|
118 |
-
with_cls=True,
|
119 |
-
sampling=True,
|
120 |
-
**kwargs):
|
121 |
-
self.with_cls = with_cls
|
122 |
-
self.anchor_strides = anchor_generator['strides']
|
123 |
-
self.anchor_scales = anchor_generator['scales']
|
124 |
-
self.bridged_feature = bridged_feature
|
125 |
-
self.adapt_cfg = adapt_cfg
|
126 |
-
super(StageCascadeRPNHead, self).__init__(
|
127 |
-
in_channels, anchor_generator=anchor_generator, **kwargs)
|
128 |
-
|
129 |
-
# override sampling and sampler
|
130 |
-
self.sampling = sampling
|
131 |
-
if self.train_cfg:
|
132 |
-
self.assigner = build_assigner(self.train_cfg.assigner)
|
133 |
-
# use PseudoSampler when sampling is False
|
134 |
-
if self.sampling and hasattr(self.train_cfg, 'sampler'):
|
135 |
-
sampler_cfg = self.train_cfg.sampler
|
136 |
-
else:
|
137 |
-
sampler_cfg = dict(type='PseudoSampler')
|
138 |
-
self.sampler = build_sampler(sampler_cfg, context=self)
|
139 |
-
|
140 |
-
def _init_layers(self):
|
141 |
-
"""Init layers of a CascadeRPN stage."""
|
142 |
-
self.rpn_conv = AdaptiveConv(self.in_channels, self.feat_channels,
|
143 |
-
**self.adapt_cfg)
|
144 |
-
if self.with_cls:
|
145 |
-
self.rpn_cls = nn.Conv2d(self.feat_channels,
|
146 |
-
self.num_anchors * self.cls_out_channels,
|
147 |
-
1)
|
148 |
-
self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1)
|
149 |
-
self.relu = nn.ReLU(inplace=True)
|
150 |
-
|
151 |
-
def init_weights(self):
|
152 |
-
"""Init weights of a CascadeRPN stage."""
|
153 |
-
self.rpn_conv.init_weights()
|
154 |
-
normal_init(self.rpn_reg, std=0.01)
|
155 |
-
if self.with_cls:
|
156 |
-
normal_init(self.rpn_cls, std=0.01)
|
157 |
-
|
158 |
-
def forward_single(self, x, offset):
|
159 |
-
"""Forward function of single scale."""
|
160 |
-
bridged_x = x
|
161 |
-
x = self.relu(self.rpn_conv(x, offset))
|
162 |
-
if self.bridged_feature:
|
163 |
-
bridged_x = x # update feature
|
164 |
-
cls_score = self.rpn_cls(x) if self.with_cls else None
|
165 |
-
bbox_pred = self.rpn_reg(x)
|
166 |
-
return bridged_x, cls_score, bbox_pred
|
167 |
-
|
168 |
-
def forward(self, feats, offset_list=None):
|
169 |
-
"""Forward function."""
|
170 |
-
if offset_list is None:
|
171 |
-
offset_list = [None for _ in range(len(feats))]
|
172 |
-
return multi_apply(self.forward_single, feats, offset_list)
|
173 |
-
|
174 |
-
def _region_targets_single(self,
|
175 |
-
anchors,
|
176 |
-
valid_flags,
|
177 |
-
gt_bboxes,
|
178 |
-
gt_bboxes_ignore,
|
179 |
-
gt_labels,
|
180 |
-
img_meta,
|
181 |
-
featmap_sizes,
|
182 |
-
label_channels=1):
|
183 |
-
"""Get anchor targets based on region for single level."""
|
184 |
-
assign_result = self.assigner.assign(
|
185 |
-
anchors,
|
186 |
-
valid_flags,
|
187 |
-
gt_bboxes,
|
188 |
-
img_meta,
|
189 |
-
featmap_sizes,
|
190 |
-
self.anchor_scales[0],
|
191 |
-
self.anchor_strides,
|
192 |
-
gt_bboxes_ignore=gt_bboxes_ignore,
|
193 |
-
gt_labels=None,
|
194 |
-
allowed_border=self.train_cfg.allowed_border)
|
195 |
-
flat_anchors = torch.cat(anchors)
|
196 |
-
sampling_result = self.sampler.sample(assign_result, flat_anchors,
|
197 |
-
gt_bboxes)
|
198 |
-
|
199 |
-
num_anchors = flat_anchors.shape[0]
|
200 |
-
bbox_targets = torch.zeros_like(flat_anchors)
|
201 |
-
bbox_weights = torch.zeros_like(flat_anchors)
|
202 |
-
labels = flat_anchors.new_zeros(num_anchors, dtype=torch.long)
|
203 |
-
label_weights = flat_anchors.new_zeros(num_anchors, dtype=torch.float)
|
204 |
-
|
205 |
-
pos_inds = sampling_result.pos_inds
|
206 |
-
neg_inds = sampling_result.neg_inds
|
207 |
-
if len(pos_inds) > 0:
|
208 |
-
if not self.reg_decoded_bbox:
|
209 |
-
pos_bbox_targets = self.bbox_coder.encode(
|
210 |
-
sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
|
211 |
-
else:
|
212 |
-
pos_bbox_targets = sampling_result.pos_gt_bboxes
|
213 |
-
bbox_targets[pos_inds, :] = pos_bbox_targets
|
214 |
-
bbox_weights[pos_inds, :] = 1.0
|
215 |
-
if gt_labels is None:
|
216 |
-
labels[pos_inds] = 1
|
217 |
-
else:
|
218 |
-
labels[pos_inds] = gt_labels[
|
219 |
-
sampling_result.pos_assigned_gt_inds]
|
220 |
-
if self.train_cfg.pos_weight <= 0:
|
221 |
-
label_weights[pos_inds] = 1.0
|
222 |
-
else:
|
223 |
-
label_weights[pos_inds] = self.train_cfg.pos_weight
|
224 |
-
if len(neg_inds) > 0:
|
225 |
-
label_weights[neg_inds] = 1.0
|
226 |
-
|
227 |
-
return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
|
228 |
-
neg_inds)
|
229 |
-
|
230 |
-
def region_targets(self,
|
231 |
-
anchor_list,
|
232 |
-
valid_flag_list,
|
233 |
-
gt_bboxes_list,
|
234 |
-
img_metas,
|
235 |
-
featmap_sizes,
|
236 |
-
gt_bboxes_ignore_list=None,
|
237 |
-
gt_labels_list=None,
|
238 |
-
label_channels=1,
|
239 |
-
unmap_outputs=True):
|
240 |
-
"""See :func:`StageCascadeRPNHead.get_targets`."""
|
241 |
-
num_imgs = len(img_metas)
|
242 |
-
assert len(anchor_list) == len(valid_flag_list) == num_imgs
|
243 |
-
|
244 |
-
# anchor number of multi levels
|
245 |
-
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
|
246 |
-
|
247 |
-
# compute targets for each image
|
248 |
-
if gt_bboxes_ignore_list is None:
|
249 |
-
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
|
250 |
-
if gt_labels_list is None:
|
251 |
-
gt_labels_list = [None for _ in range(num_imgs)]
|
252 |
-
(all_labels, all_label_weights, all_bbox_targets, all_bbox_weights,
|
253 |
-
pos_inds_list, neg_inds_list) = multi_apply(
|
254 |
-
self._region_targets_single,
|
255 |
-
anchor_list,
|
256 |
-
valid_flag_list,
|
257 |
-
gt_bboxes_list,
|
258 |
-
gt_bboxes_ignore_list,
|
259 |
-
gt_labels_list,
|
260 |
-
img_metas,
|
261 |
-
featmap_sizes=featmap_sizes,
|
262 |
-
label_channels=label_channels)
|
263 |
-
# no valid anchors
|
264 |
-
if any([labels is None for labels in all_labels]):
|
265 |
-
return None
|
266 |
-
# sampled anchors of all images
|
267 |
-
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
|
268 |
-
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
|
269 |
-
# split targets to a list w.r.t. multiple levels
|
270 |
-
labels_list = images_to_levels(all_labels, num_level_anchors)
|
271 |
-
label_weights_list = images_to_levels(all_label_weights,
|
272 |
-
num_level_anchors)
|
273 |
-
bbox_targets_list = images_to_levels(all_bbox_targets,
|
274 |
-
num_level_anchors)
|
275 |
-
bbox_weights_list = images_to_levels(all_bbox_weights,
|
276 |
-
num_level_anchors)
|
277 |
-
return (labels_list, label_weights_list, bbox_targets_list,
|
278 |
-
bbox_weights_list, num_total_pos, num_total_neg)
|
279 |
-
|
280 |
-
def get_targets(self,
|
281 |
-
anchor_list,
|
282 |
-
valid_flag_list,
|
283 |
-
gt_bboxes,
|
284 |
-
img_metas,
|
285 |
-
featmap_sizes,
|
286 |
-
gt_bboxes_ignore=None,
|
287 |
-
label_channels=1):
|
288 |
-
"""Compute regression and classification targets for anchors.
|
289 |
-
|
290 |
-
Args:
|
291 |
-
anchor_list (list[list]): Multi level anchors of each image.
|
292 |
-
valid_flag_list (list[list]): Multi level valid flags of each
|
293 |
-
image.
|
294 |
-
gt_bboxes (list[Tensor]): Ground truth bboxes of each image.
|
295 |
-
img_metas (list[dict]): Meta info of each image.
|
296 |
-
featmap_sizes (list[Tensor]): Feature mapsize each level
|
297 |
-
gt_bboxes_ignore (list[Tensor]): Ignore bboxes of each images
|
298 |
-
label_channels (int): Channel of label.
|
299 |
-
|
300 |
-
Returns:
|
301 |
-
cls_reg_targets (tuple)
|
302 |
-
"""
|
303 |
-
if isinstance(self.assigner, RegionAssigner):
|
304 |
-
cls_reg_targets = self.region_targets(
|
305 |
-
anchor_list,
|
306 |
-
valid_flag_list,
|
307 |
-
gt_bboxes,
|
308 |
-
img_metas,
|
309 |
-
featmap_sizes,
|
310 |
-
gt_bboxes_ignore_list=gt_bboxes_ignore,
|
311 |
-
label_channels=label_channels)
|
312 |
-
else:
|
313 |
-
cls_reg_targets = super(StageCascadeRPNHead, self).get_targets(
|
314 |
-
anchor_list,
|
315 |
-
valid_flag_list,
|
316 |
-
gt_bboxes,
|
317 |
-
img_metas,
|
318 |
-
gt_bboxes_ignore_list=gt_bboxes_ignore,
|
319 |
-
label_channels=label_channels)
|
320 |
-
return cls_reg_targets
|
321 |
-
|
322 |
-
def anchor_offset(self, anchor_list, anchor_strides, featmap_sizes):
|
323 |
-
""" Get offest for deformable conv based on anchor shape
|
324 |
-
NOTE: currently support deformable kernel_size=3 and dilation=1
|
325 |
-
|
326 |
-
Args:
|
327 |
-
anchor_list (list[list[tensor])): [NI, NLVL, NA, 4] list of
|
328 |
-
multi-level anchors
|
329 |
-
anchor_strides (list[int]): anchor stride of each level
|
330 |
-
|
331 |
-
Returns:
|
332 |
-
offset_list (list[tensor]): [NLVL, NA, 2, 18]: offset of DeformConv
|
333 |
-
kernel.
|
334 |
-
"""
|
335 |
-
|
336 |
-
def _shape_offset(anchors, stride, ks=3, dilation=1):
|
337 |
-
# currently support kernel_size=3 and dilation=1
|
338 |
-
assert ks == 3 and dilation == 1
|
339 |
-
pad = (ks - 1) // 2
|
340 |
-
idx = torch.arange(-pad, pad + 1, dtype=dtype, device=device)
|
341 |
-
yy, xx = torch.meshgrid(idx, idx) # return order matters
|
342 |
-
xx = xx.reshape(-1)
|
343 |
-
yy = yy.reshape(-1)
|
344 |
-
w = (anchors[:, 2] - anchors[:, 0]) / stride
|
345 |
-
h = (anchors[:, 3] - anchors[:, 1]) / stride
|
346 |
-
w = w / (ks - 1) - dilation
|
347 |
-
h = h / (ks - 1) - dilation
|
348 |
-
offset_x = w[:, None] * xx # (NA, ks**2)
|
349 |
-
offset_y = h[:, None] * yy # (NA, ks**2)
|
350 |
-
return offset_x, offset_y
|
351 |
-
|
352 |
-
def _ctr_offset(anchors, stride, featmap_size):
|
353 |
-
feat_h, feat_w = featmap_size
|
354 |
-
assert len(anchors) == feat_h * feat_w
|
355 |
-
|
356 |
-
x = (anchors[:, 0] + anchors[:, 2]) * 0.5
|
357 |
-
y = (anchors[:, 1] + anchors[:, 3]) * 0.5
|
358 |
-
# compute centers on feature map
|
359 |
-
x = x / stride
|
360 |
-
y = y / stride
|
361 |
-
# compute predefine centers
|
362 |
-
xx = torch.arange(0, feat_w, device=anchors.device)
|
363 |
-
yy = torch.arange(0, feat_h, device=anchors.device)
|
364 |
-
yy, xx = torch.meshgrid(yy, xx)
|
365 |
-
xx = xx.reshape(-1).type_as(x)
|
366 |
-
yy = yy.reshape(-1).type_as(y)
|
367 |
-
|
368 |
-
offset_x = x - xx # (NA, )
|
369 |
-
offset_y = y - yy # (NA, )
|
370 |
-
return offset_x, offset_y
|
371 |
-
|
372 |
-
num_imgs = len(anchor_list)
|
373 |
-
num_lvls = len(anchor_list[0])
|
374 |
-
dtype = anchor_list[0][0].dtype
|
375 |
-
device = anchor_list[0][0].device
|
376 |
-
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
|
377 |
-
|
378 |
-
offset_list = []
|
379 |
-
for i in range(num_imgs):
|
380 |
-
mlvl_offset = []
|
381 |
-
for lvl in range(num_lvls):
|
382 |
-
c_offset_x, c_offset_y = _ctr_offset(anchor_list[i][lvl],
|
383 |
-
anchor_strides[lvl],
|
384 |
-
featmap_sizes[lvl])
|
385 |
-
s_offset_x, s_offset_y = _shape_offset(anchor_list[i][lvl],
|
386 |
-
anchor_strides[lvl])
|
387 |
-
|
388 |
-
# offset = ctr_offset + shape_offset
|
389 |
-
offset_x = s_offset_x + c_offset_x[:, None]
|
390 |
-
offset_y = s_offset_y + c_offset_y[:, None]
|
391 |
-
|
392 |
-
# offset order (y0, x0, y1, x2, .., y8, x8, y9, x9)
|
393 |
-
offset = torch.stack([offset_y, offset_x], dim=-1)
|
394 |
-
offset = offset.reshape(offset.size(0), -1) # [NA, 2*ks**2]
|
395 |
-
mlvl_offset.append(offset)
|
396 |
-
offset_list.append(torch.cat(mlvl_offset)) # [totalNA, 2*ks**2]
|
397 |
-
offset_list = images_to_levels(offset_list, num_level_anchors)
|
398 |
-
return offset_list
|
399 |
-
|
400 |
-
def loss_single(self, cls_score, bbox_pred, anchors, labels, label_weights,
|
401 |
-
bbox_targets, bbox_weights, num_total_samples):
|
402 |
-
"""Loss function on single scale."""
|
403 |
-
# classification loss
|
404 |
-
if self.with_cls:
|
405 |
-
labels = labels.reshape(-1)
|
406 |
-
label_weights = label_weights.reshape(-1)
|
407 |
-
cls_score = cls_score.permute(0, 2, 3,
|
408 |
-
1).reshape(-1, self.cls_out_channels)
|
409 |
-
loss_cls = self.loss_cls(
|
410 |
-
cls_score, labels, label_weights, avg_factor=num_total_samples)
|
411 |
-
# regression loss
|
412 |
-
bbox_targets = bbox_targets.reshape(-1, 4)
|
413 |
-
bbox_weights = bbox_weights.reshape(-1, 4)
|
414 |
-
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
|
415 |
-
if self.reg_decoded_bbox:
|
416 |
-
# When the regression loss (e.g. `IouLoss`, `GIouLoss`)
|
417 |
-
# is applied directly on the decoded bounding boxes, it
|
418 |
-
# decodes the already encoded coordinates to absolute format.
|
419 |
-
anchors = anchors.reshape(-1, 4)
|
420 |
-
bbox_pred = self.bbox_coder.decode(anchors, bbox_pred)
|
421 |
-
loss_reg = self.loss_bbox(
|
422 |
-
bbox_pred,
|
423 |
-
bbox_targets,
|
424 |
-
bbox_weights,
|
425 |
-
avg_factor=num_total_samples)
|
426 |
-
if self.with_cls:
|
427 |
-
return loss_cls, loss_reg
|
428 |
-
return None, loss_reg
|
429 |
-
|
430 |
-
def loss(self,
|
431 |
-
anchor_list,
|
432 |
-
valid_flag_list,
|
433 |
-
cls_scores,
|
434 |
-
bbox_preds,
|
435 |
-
gt_bboxes,
|
436 |
-
img_metas,
|
437 |
-
gt_bboxes_ignore=None):
|
438 |
-
"""Compute losses of the head.
|
439 |
-
|
440 |
-
Args:
|
441 |
-
anchor_list (list[list]): Multi level anchors of each image.
|
442 |
-
cls_scores (list[Tensor]): Box scores for each scale level
|
443 |
-
Has shape (N, num_anchors * num_classes, H, W)
|
444 |
-
bbox_preds (list[Tensor]): Box energies / deltas for each scale
|
445 |
-
level with shape (N, num_anchors * 4, H, W)
|
446 |
-
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
|
447 |
-
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
|
448 |
-
img_metas (list[dict]): Meta information of each image, e.g.,
|
449 |
-
image size, scaling factor, etc.
|
450 |
-
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
|
451 |
-
boxes can be ignored when computing the loss. Default: None
|
452 |
-
|
453 |
-
Returns:
|
454 |
-
dict[str, Tensor]: A dictionary of loss components.
|
455 |
-
"""
|
456 |
-
featmap_sizes = [featmap.size()[-2:] for featmap in bbox_preds]
|
457 |
-
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
|
458 |
-
cls_reg_targets = self.get_targets(
|
459 |
-
anchor_list,
|
460 |
-
valid_flag_list,
|
461 |
-
gt_bboxes,
|
462 |
-
img_metas,
|
463 |
-
featmap_sizes,
|
464 |
-
gt_bboxes_ignore=gt_bboxes_ignore,
|
465 |
-
label_channels=label_channels)
|
466 |
-
if cls_reg_targets is None:
|
467 |
-
return None
|
468 |
-
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
|
469 |
-
num_total_pos, num_total_neg) = cls_reg_targets
|
470 |
-
if self.sampling:
|
471 |
-
num_total_samples = num_total_pos + num_total_neg
|
472 |
-
else:
|
473 |
-
# 200 is hard-coded average factor,
|
474 |
-
# which follows guided anchoring.
|
475 |
-
num_total_samples = sum([label.numel()
|
476 |
-
for label in labels_list]) / 200.0
|
477 |
-
|
478 |
-
# change per image, per level anchor_list to per_level, per_image
|
479 |
-
mlvl_anchor_list = list(zip(*anchor_list))
|
480 |
-
# concat mlvl_anchor_list
|
481 |
-
mlvl_anchor_list = [
|
482 |
-
torch.cat(anchors, dim=0) for anchors in mlvl_anchor_list
|
483 |
-
]
|
484 |
-
|
485 |
-
losses = multi_apply(
|
486 |
-
self.loss_single,
|
487 |
-
cls_scores,
|
488 |
-
bbox_preds,
|
489 |
-
mlvl_anchor_list,
|
490 |
-
labels_list,
|
491 |
-
label_weights_list,
|
492 |
-
bbox_targets_list,
|
493 |
-
bbox_weights_list,
|
494 |
-
num_total_samples=num_total_samples)
|
495 |
-
if self.with_cls:
|
496 |
-
return dict(loss_rpn_cls=losses[0], loss_rpn_reg=losses[1])
|
497 |
-
return dict(loss_rpn_reg=losses[1])
|
498 |
-
|
499 |
-
def get_bboxes(self,
|
500 |
-
anchor_list,
|
501 |
-
cls_scores,
|
502 |
-
bbox_preds,
|
503 |
-
img_metas,
|
504 |
-
cfg,
|
505 |
-
rescale=False):
|
506 |
-
"""Get proposal predict."""
|
507 |
-
assert len(cls_scores) == len(bbox_preds)
|
508 |
-
num_levels = len(cls_scores)
|
509 |
-
|
510 |
-
result_list = []
|
511 |
-
for img_id in range(len(img_metas)):
|
512 |
-
cls_score_list = [
|
513 |
-
cls_scores[i][img_id].detach() for i in range(num_levels)
|
514 |
-
]
|
515 |
-
bbox_pred_list = [
|
516 |
-
bbox_preds[i][img_id].detach() for i in range(num_levels)
|
517 |
-
]
|
518 |
-
img_shape = img_metas[img_id]['img_shape']
|
519 |
-
scale_factor = img_metas[img_id]['scale_factor']
|
520 |
-
proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list,
|
521 |
-
anchor_list[img_id], img_shape,
|
522 |
-
scale_factor, cfg, rescale)
|
523 |
-
result_list.append(proposals)
|
524 |
-
return result_list
|
525 |
-
|
526 |
-
def refine_bboxes(self, anchor_list, bbox_preds, img_metas):
|
527 |
-
"""Refine bboxes through stages."""
|
528 |
-
num_levels = len(bbox_preds)
|
529 |
-
new_anchor_list = []
|
530 |
-
for img_id in range(len(img_metas)):
|
531 |
-
mlvl_anchors = []
|
532 |
-
for i in range(num_levels):
|
533 |
-
bbox_pred = bbox_preds[i][img_id].detach()
|
534 |
-
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
|
535 |
-
img_shape = img_metas[img_id]['img_shape']
|
536 |
-
bboxes = self.bbox_coder.decode(anchor_list[img_id][i],
|
537 |
-
bbox_pred, img_shape)
|
538 |
-
mlvl_anchors.append(bboxes)
|
539 |
-
new_anchor_list.append(mlvl_anchors)
|
540 |
-
return new_anchor_list
|
541 |
-
|
542 |
-
# TODO: temporary plan
|
543 |
-
def _get_bboxes_single(self,
|
544 |
-
cls_scores,
|
545 |
-
bbox_preds,
|
546 |
-
mlvl_anchors,
|
547 |
-
img_shape,
|
548 |
-
scale_factor,
|
549 |
-
cfg,
|
550 |
-
rescale=False):
|
551 |
-
"""Transform outputs for a single batch item into bbox predictions.
|
552 |
-
|
553 |
-
Args:
|
554 |
-
cls_scores (list[Tensor]): Box scores for each scale level
|
555 |
-
Has shape (num_anchors * num_classes, H, W).
|
556 |
-
bbox_preds (list[Tensor]): Box energies / deltas for each scale
|
557 |
-
level with shape (num_anchors * 4, H, W).
|
558 |
-
mlvl_anchors (list[Tensor]): Box reference for each scale level
|
559 |
-
with shape (num_total_anchors, 4).
|
560 |
-
img_shape (tuple[int]): Shape of the input image,
|
561 |
-
(height, width, 3).
|
562 |
-
scale_factor (ndarray): Scale factor of the image arange as
|
563 |
-
(w_scale, h_scale, w_scale, h_scale).
|
564 |
-
cfg (mmcv.Config): Test / postprocessing configuration,
|
565 |
-
if None, test_cfg would be used.
|
566 |
-
rescale (bool): If True, return boxes in original image space.
|
567 |
-
|
568 |
-
Returns:
|
569 |
-
Tensor: Labeled boxes have the shape of (n,5), where the
|
570 |
-
first 4 columns are bounding box positions
|
571 |
-
(tl_x, tl_y, br_x, br_y) and the 5-th column is a score
|
572 |
-
between 0 and 1.
|
573 |
-
"""
|
574 |
-
cfg = self.test_cfg if cfg is None else cfg
|
575 |
-
cfg = copy.deepcopy(cfg)
|
576 |
-
# bboxes from different level should be independent during NMS,
|
577 |
-
# level_ids are used as labels for batched NMS to separate them
|
578 |
-
level_ids = []
|
579 |
-
mlvl_scores = []
|
580 |
-
mlvl_bbox_preds = []
|
581 |
-
mlvl_valid_anchors = []
|
582 |
-
for idx in range(len(cls_scores)):
|
583 |
-
rpn_cls_score = cls_scores[idx]
|
584 |
-
rpn_bbox_pred = bbox_preds[idx]
|
585 |
-
assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:]
|
586 |
-
rpn_cls_score = rpn_cls_score.permute(1, 2, 0)
|
587 |
-
if self.use_sigmoid_cls:
|
588 |
-
rpn_cls_score = rpn_cls_score.reshape(-1)
|
589 |
-
scores = rpn_cls_score.sigmoid()
|
590 |
-
else:
|
591 |
-
rpn_cls_score = rpn_cls_score.reshape(-1, 2)
|
592 |
-
# We set FG labels to [0, num_class-1] and BG label to
|
593 |
-
# num_class in RPN head since mmdet v2.5, which is unified to
|
594 |
-
# be consistent with other head since mmdet v2.0. In mmdet v2.0
|
595 |
-
# to v2.4 we keep BG label as 0 and FG label as 1 in rpn head.
|
596 |
-
scores = rpn_cls_score.softmax(dim=1)[:, 0]
|
597 |
-
rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4)
|
598 |
-
anchors = mlvl_anchors[idx]
|
599 |
-
if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre:
|
600 |
-
# sort is faster than topk
|
601 |
-
# _, topk_inds = scores.topk(cfg.nms_pre)
|
602 |
-
if torch.onnx.is_in_onnx_export():
|
603 |
-
# sort op will be converted to TopK in onnx
|
604 |
-
# and k<=3480 in TensorRT
|
605 |
-
_, topk_inds = scores.topk(cfg.nms_pre)
|
606 |
-
scores = scores[topk_inds]
|
607 |
-
else:
|
608 |
-
ranked_scores, rank_inds = scores.sort(descending=True)
|
609 |
-
topk_inds = rank_inds[:cfg.nms_pre]
|
610 |
-
scores = ranked_scores[:cfg.nms_pre]
|
611 |
-
rpn_bbox_pred = rpn_bbox_pred[topk_inds, :]
|
612 |
-
anchors = anchors[topk_inds, :]
|
613 |
-
mlvl_scores.append(scores)
|
614 |
-
mlvl_bbox_preds.append(rpn_bbox_pred)
|
615 |
-
mlvl_valid_anchors.append(anchors)
|
616 |
-
level_ids.append(
|
617 |
-
scores.new_full((scores.size(0), ), idx, dtype=torch.long))
|
618 |
-
|
619 |
-
scores = torch.cat(mlvl_scores)
|
620 |
-
anchors = torch.cat(mlvl_valid_anchors)
|
621 |
-
rpn_bbox_pred = torch.cat(mlvl_bbox_preds)
|
622 |
-
proposals = self.bbox_coder.decode(
|
623 |
-
anchors, rpn_bbox_pred, max_shape=img_shape)
|
624 |
-
ids = torch.cat(level_ids)
|
625 |
-
|
626 |
-
# Skip nonzero op while exporting to ONNX
|
627 |
-
if cfg.min_bbox_size > 0 and (not torch.onnx.is_in_onnx_export()):
|
628 |
-
w = proposals[:, 2] - proposals[:, 0]
|
629 |
-
h = proposals[:, 3] - proposals[:, 1]
|
630 |
-
valid_inds = torch.nonzero(
|
631 |
-
(w >= cfg.min_bbox_size)
|
632 |
-
& (h >= cfg.min_bbox_size),
|
633 |
-
as_tuple=False).squeeze()
|
634 |
-
if valid_inds.sum().item() != len(proposals):
|
635 |
-
proposals = proposals[valid_inds, :]
|
636 |
-
scores = scores[valid_inds]
|
637 |
-
ids = ids[valid_inds]
|
638 |
-
|
639 |
-
# deprecate arguments warning
|
640 |
-
if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg:
|
641 |
-
warnings.warn(
|
642 |
-
'In rpn_proposal or test_cfg, '
|
643 |
-
'nms_thr has been moved to a dict named nms as '
|
644 |
-
'iou_threshold, max_num has been renamed as max_per_img, '
|
645 |
-
'name of original arguments and the way to specify '
|
646 |
-
'iou_threshold of NMS will be deprecated.')
|
647 |
-
if 'nms' not in cfg:
|
648 |
-
cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr))
|
649 |
-
if 'max_num' in cfg:
|
650 |
-
if 'max_per_img' in cfg:
|
651 |
-
assert cfg.max_num == cfg.max_per_img, f'You ' \
|
652 |
-
f'set max_num and ' \
|
653 |
-
f'max_per_img at the same time, but get {cfg.max_num} ' \
|
654 |
-
f'and {cfg.max_per_img} respectively' \
|
655 |
-
'Please delete max_num which will be deprecated.'
|
656 |
-
else:
|
657 |
-
cfg.max_per_img = cfg.max_num
|
658 |
-
if 'nms_thr' in cfg:
|
659 |
-
assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set' \
|
660 |
-
f' iou_threshold in nms and ' \
|
661 |
-
f'nms_thr at the same time, but get' \
|
662 |
-
f' {cfg.nms.iou_threshold} and {cfg.nms_thr}' \
|
663 |
-
f' respectively. Please delete the nms_thr ' \
|
664 |
-
f'which will be deprecated.'
|
665 |
-
|
666 |
-
dets, keep = batched_nms(proposals, scores, ids, cfg.nms)
|
667 |
-
return dets[:cfg.max_per_img]
|
668 |
-
|
669 |
-
|
670 |
-
@HEADS.register_module()
|
671 |
-
class CascadeRPNHead(BaseDenseHead):
|
672 |
-
"""The CascadeRPNHead will predict more accurate region proposals, which is
|
673 |
-
required for two-stage detectors (such as Fast/Faster R-CNN). CascadeRPN
|
674 |
-
consists of a sequence of RPNStage to progressively improve the accuracy of
|
675 |
-
the detected proposals.
|
676 |
-
|
677 |
-
More details can be found in ``https://arxiv.org/abs/1909.06720``.
|
678 |
-
|
679 |
-
Args:
|
680 |
-
num_stages (int): number of CascadeRPN stages.
|
681 |
-
stages (list[dict]): list of configs to build the stages.
|
682 |
-
train_cfg (list[dict]): list of configs at training time each stage.
|
683 |
-
test_cfg (dict): config at testing time.
|
684 |
-
"""
|
685 |
-
|
686 |
-
def __init__(self, num_stages, stages, train_cfg, test_cfg):
|
687 |
-
super(CascadeRPNHead, self).__init__()
|
688 |
-
assert num_stages == len(stages)
|
689 |
-
self.num_stages = num_stages
|
690 |
-
self.stages = nn.ModuleList()
|
691 |
-
for i in range(len(stages)):
|
692 |
-
train_cfg_i = train_cfg[i] if train_cfg is not None else None
|
693 |
-
stages[i].update(train_cfg=train_cfg_i)
|
694 |
-
stages[i].update(test_cfg=test_cfg)
|
695 |
-
self.stages.append(build_head(stages[i]))
|
696 |
-
self.train_cfg = train_cfg
|
697 |
-
self.test_cfg = test_cfg
|
698 |
-
|
699 |
-
def init_weights(self):
|
700 |
-
"""Init weight of CascadeRPN."""
|
701 |
-
for i in range(self.num_stages):
|
702 |
-
self.stages[i].init_weights()
|
703 |
-
|
704 |
-
def loss(self):
|
705 |
-
"""loss() is implemented in StageCascadeRPNHead."""
|
706 |
-
pass
|
707 |
-
|
708 |
-
def get_bboxes(self):
|
709 |
-
"""get_bboxes() is implemented in StageCascadeRPNHead."""
|
710 |
-
pass
|
711 |
-
|
712 |
-
def forward_train(self,
|
713 |
-
x,
|
714 |
-
img_metas,
|
715 |
-
gt_bboxes,
|
716 |
-
gt_labels=None,
|
717 |
-
gt_bboxes_ignore=None,
|
718 |
-
proposal_cfg=None):
|
719 |
-
"""Forward train function."""
|
720 |
-
assert gt_labels is None, 'RPN does not require gt_labels'
|
721 |
-
|
722 |
-
featmap_sizes = [featmap.size()[-2:] for featmap in x]
|
723 |
-
device = x[0].device
|
724 |
-
anchor_list, valid_flag_list = self.stages[0].get_anchors(
|
725 |
-
featmap_sizes, img_metas, device=device)
|
726 |
-
|
727 |
-
losses = dict()
|
728 |
-
|
729 |
-
for i in range(self.num_stages):
|
730 |
-
stage = self.stages[i]
|
731 |
-
|
732 |
-
if stage.adapt_cfg['type'] == 'offset':
|
733 |
-
offset_list = stage.anchor_offset(anchor_list,
|
734 |
-
stage.anchor_strides,
|
735 |
-
featmap_sizes)
|
736 |
-
else:
|
737 |
-
offset_list = None
|
738 |
-
x, cls_score, bbox_pred = stage(x, offset_list)
|
739 |
-
rpn_loss_inputs = (anchor_list, valid_flag_list, cls_score,
|
740 |
-
bbox_pred, gt_bboxes, img_metas)
|
741 |
-
stage_loss = stage.loss(*rpn_loss_inputs)
|
742 |
-
for name, value in stage_loss.items():
|
743 |
-
losses['s{}.{}'.format(i, name)] = value
|
744 |
-
|
745 |
-
# refine boxes
|
746 |
-
if i < self.num_stages - 1:
|
747 |
-
anchor_list = stage.refine_bboxes(anchor_list, bbox_pred,
|
748 |
-
img_metas)
|
749 |
-
if proposal_cfg is None:
|
750 |
-
return losses
|
751 |
-
else:
|
752 |
-
proposal_list = self.stages[-1].get_bboxes(anchor_list, cls_score,
|
753 |
-
bbox_pred, img_metas,
|
754 |
-
self.test_cfg)
|
755 |
-
return losses, proposal_list
|
756 |
-
|
757 |
-
def simple_test_rpn(self, x, img_metas):
|
758 |
-
"""Simple forward test function."""
|
759 |
-
featmap_sizes = [featmap.size()[-2:] for featmap in x]
|
760 |
-
device = x[0].device
|
761 |
-
anchor_list, _ = self.stages[0].get_anchors(
|
762 |
-
featmap_sizes, img_metas, device=device)
|
763 |
-
|
764 |
-
for i in range(self.num_stages):
|
765 |
-
stage = self.stages[i]
|
766 |
-
if stage.adapt_cfg['type'] == 'offset':
|
767 |
-
offset_list = stage.anchor_offset(anchor_list,
|
768 |
-
stage.anchor_strides,
|
769 |
-
featmap_sizes)
|
770 |
-
else:
|
771 |
-
offset_list = None
|
772 |
-
x, cls_score, bbox_pred = stage(x, offset_list)
|
773 |
-
if i < self.num_stages - 1:
|
774 |
-
anchor_list = stage.refine_bboxes(anchor_list, bbox_pred,
|
775 |
-
img_metas)
|
776 |
-
|
777 |
-
proposal_list = self.stages[-1].get_bboxes(anchor_list, cls_score,
|
778 |
-
bbox_pred, img_metas,
|
779 |
-
self.test_cfg)
|
780 |
-
return proposal_list
|
781 |
-
|
782 |
-
def aug_test_rpn(self, x, img_metas):
|
783 |
-
"""Augmented forward test function."""
|
784 |
-
raise NotImplementedError
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/rpn.py
DELETED
@@ -1,154 +0,0 @@
|
|
1 |
-
import mmcv
|
2 |
-
from mmcv.image import tensor2imgs
|
3 |
-
|
4 |
-
from mmdet.core import bbox_mapping
|
5 |
-
from ..builder import DETECTORS, build_backbone, build_head, build_neck
|
6 |
-
from .base import BaseDetector
|
7 |
-
|
8 |
-
|
9 |
-
@DETECTORS.register_module()
|
10 |
-
class RPN(BaseDetector):
|
11 |
-
"""Implementation of Region Proposal Network."""
|
12 |
-
|
13 |
-
def __init__(self,
|
14 |
-
backbone,
|
15 |
-
neck,
|
16 |
-
rpn_head,
|
17 |
-
train_cfg,
|
18 |
-
test_cfg,
|
19 |
-
pretrained=None):
|
20 |
-
super(RPN, self).__init__()
|
21 |
-
self.backbone = build_backbone(backbone)
|
22 |
-
self.neck = build_neck(neck) if neck is not None else None
|
23 |
-
rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
|
24 |
-
rpn_head.update(train_cfg=rpn_train_cfg)
|
25 |
-
rpn_head.update(test_cfg=test_cfg.rpn)
|
26 |
-
self.rpn_head = build_head(rpn_head)
|
27 |
-
self.train_cfg = train_cfg
|
28 |
-
self.test_cfg = test_cfg
|
29 |
-
self.init_weights(pretrained=pretrained)
|
30 |
-
|
31 |
-
def init_weights(self, pretrained=None):
|
32 |
-
"""Initialize the weights in detector.
|
33 |
-
|
34 |
-
Args:
|
35 |
-
pretrained (str, optional): Path to pre-trained weights.
|
36 |
-
Defaults to None.
|
37 |
-
"""
|
38 |
-
super(RPN, self).init_weights(pretrained)
|
39 |
-
self.backbone.init_weights(pretrained=pretrained)
|
40 |
-
if self.with_neck:
|
41 |
-
self.neck.init_weights()
|
42 |
-
self.rpn_head.init_weights()
|
43 |
-
|
44 |
-
def extract_feat(self, img):
|
45 |
-
"""Extract features.
|
46 |
-
|
47 |
-
Args:
|
48 |
-
img (torch.Tensor): Image tensor with shape (n, c, h ,w).
|
49 |
-
|
50 |
-
Returns:
|
51 |
-
list[torch.Tensor]: Multi-level features that may have
|
52 |
-
different resolutions.
|
53 |
-
"""
|
54 |
-
x = self.backbone(img)
|
55 |
-
if self.with_neck:
|
56 |
-
x = self.neck(x)
|
57 |
-
return x
|
58 |
-
|
59 |
-
def forward_dummy(self, img):
|
60 |
-
"""Dummy forward function."""
|
61 |
-
x = self.extract_feat(img)
|
62 |
-
rpn_outs = self.rpn_head(x)
|
63 |
-
return rpn_outs
|
64 |
-
|
65 |
-
def forward_train(self,
|
66 |
-
img,
|
67 |
-
img_metas,
|
68 |
-
gt_bboxes=None,
|
69 |
-
gt_bboxes_ignore=None):
|
70 |
-
"""
|
71 |
-
Args:
|
72 |
-
img (Tensor): Input images of shape (N, C, H, W).
|
73 |
-
Typically these should be mean centered and std scaled.
|
74 |
-
img_metas (list[dict]): A List of image info dict where each dict
|
75 |
-
has: 'img_shape', 'scale_factor', 'flip', and may also contain
|
76 |
-
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
|
77 |
-
For details on the values of these keys see
|
78 |
-
:class:`mmdet.datasets.pipelines.Collect`.
|
79 |
-
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
|
80 |
-
image in [tl_x, tl_y, br_x, br_y] format.
|
81 |
-
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
|
82 |
-
boxes can be ignored when computing the loss.
|
83 |
-
|
84 |
-
Returns:
|
85 |
-
dict[str, Tensor]: A dictionary of loss components.
|
86 |
-
"""
|
87 |
-
if (isinstance(self.train_cfg.rpn, dict)
|
88 |
-
and self.train_cfg.rpn.get('debug', False)):
|
89 |
-
self.rpn_head.debug_imgs = tensor2imgs(img)
|
90 |
-
|
91 |
-
x = self.extract_feat(img)
|
92 |
-
losses = self.rpn_head.forward_train(x, img_metas, gt_bboxes, None,
|
93 |
-
gt_bboxes_ignore)
|
94 |
-
return losses
|
95 |
-
|
96 |
-
def simple_test(self, img, img_metas, rescale=False):
|
97 |
-
"""Test function without test time augmentation.
|
98 |
-
|
99 |
-
Args:
|
100 |
-
imgs (list[torch.Tensor]): List of multiple images
|
101 |
-
img_metas (list[dict]): List of image information.
|
102 |
-
rescale (bool, optional): Whether to rescale the results.
|
103 |
-
Defaults to False.
|
104 |
-
|
105 |
-
Returns:
|
106 |
-
list[np.ndarray]: proposals
|
107 |
-
"""
|
108 |
-
x = self.extract_feat(img)
|
109 |
-
proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
|
110 |
-
if rescale:
|
111 |
-
for proposals, meta in zip(proposal_list, img_metas):
|
112 |
-
proposals[:, :4] /= proposals.new_tensor(meta['scale_factor'])
|
113 |
-
|
114 |
-
return [proposal.cpu().numpy() for proposal in proposal_list]
|
115 |
-
|
116 |
-
def aug_test(self, imgs, img_metas, rescale=False):
|
117 |
-
"""Test function with test time augmentation.
|
118 |
-
|
119 |
-
Args:
|
120 |
-
imgs (list[torch.Tensor]): List of multiple images
|
121 |
-
img_metas (list[dict]): List of image information.
|
122 |
-
rescale (bool, optional): Whether to rescale the results.
|
123 |
-
Defaults to False.
|
124 |
-
|
125 |
-
Returns:
|
126 |
-
list[np.ndarray]: proposals
|
127 |
-
"""
|
128 |
-
proposal_list = self.rpn_head.aug_test_rpn(
|
129 |
-
self.extract_feats(imgs), img_metas)
|
130 |
-
if not rescale:
|
131 |
-
for proposals, img_meta in zip(proposal_list, img_metas[0]):
|
132 |
-
img_shape = img_meta['img_shape']
|
133 |
-
scale_factor = img_meta['scale_factor']
|
134 |
-
flip = img_meta['flip']
|
135 |
-
flip_direction = img_meta['flip_direction']
|
136 |
-
proposals[:, :4] = bbox_mapping(proposals[:, :4], img_shape,
|
137 |
-
scale_factor, flip,
|
138 |
-
flip_direction)
|
139 |
-
return [proposal.cpu().numpy() for proposal in proposal_list]
|
140 |
-
|
141 |
-
def show_result(self, data, result, top_k=20, **kwargs):
|
142 |
-
"""Show RPN proposals on the image.
|
143 |
-
|
144 |
-
Args:
|
145 |
-
data (str or np.ndarray): Image filename or loaded image.
|
146 |
-
result (Tensor or tuple): The results to draw over `img`
|
147 |
-
bbox_result or (bbox_result, segm_result).
|
148 |
-
top_k (int): Plot the first k bboxes only
|
149 |
-
if set positive. Default: 20
|
150 |
-
|
151 |
-
Returns:
|
152 |
-
np.ndarray: The image with bboxes drawn on it.
|
153 |
-
"""
|
154 |
-
mmcv.imshow_bboxes(data, result, top_k=top_k)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Audio-AGI/AudioSep/models/CLAP/open_clip/model.py
DELETED
@@ -1,935 +0,0 @@
|
|
1 |
-
""" CLAP Model
|
2 |
-
|
3 |
-
Adapted from CLIP: https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
|
4 |
-
Adapted to the Audio Task.
|
5 |
-
"""
|
6 |
-
|
7 |
-
from collections import OrderedDict
|
8 |
-
from dataclasses import dataclass
|
9 |
-
from email.mime import audio
|
10 |
-
from typing import Tuple, Union, Callable, Optional
|
11 |
-
|
12 |
-
import numpy as np
|
13 |
-
import torch
|
14 |
-
import torch.nn.functional as F
|
15 |
-
from torch import nn
|
16 |
-
|
17 |
-
from .timm_model import TimmModel
|
18 |
-
import logging
|
19 |
-
from .utils import freeze_batch_norm_2d
|
20 |
-
|
21 |
-
from .pann_model import create_pann_model
|
22 |
-
from .htsat import create_htsat_model
|
23 |
-
from transformers import BertModel, RobertaModel, BartModel, RobertaConfig
|
24 |
-
from transformers.tokenization_utils_base import BatchEncoding
|
25 |
-
|
26 |
-
|
27 |
-
class MLPLayers(nn.Module):
|
28 |
-
def __init__(self, units=[512, 512, 512], nonlin=nn.ReLU(), dropout=0.1):
|
29 |
-
super(MLPLayers, self).__init__()
|
30 |
-
self.nonlin = nonlin
|
31 |
-
self.dropout = dropout
|
32 |
-
|
33 |
-
sequence = []
|
34 |
-
for u0, u1 in zip(units[:-1], units[1:]):
|
35 |
-
sequence.append(nn.Linear(u0, u1))
|
36 |
-
sequence.append(self.nonlin)
|
37 |
-
sequence.append(nn.Dropout(self.dropout))
|
38 |
-
sequence = sequence[:-2]
|
39 |
-
|
40 |
-
self.sequential = nn.Sequential(*sequence)
|
41 |
-
|
42 |
-
def forward(self, X):
|
43 |
-
X = self.sequential(X)
|
44 |
-
return X
|
45 |
-
|
46 |
-
|
47 |
-
class Bottleneck(nn.Module):
|
48 |
-
expansion = 4
|
49 |
-
|
50 |
-
def __init__(self, inplanes, planes, stride=1):
|
51 |
-
super().__init__()
|
52 |
-
|
53 |
-
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
|
54 |
-
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
|
55 |
-
self.bn1 = nn.BatchNorm2d(planes)
|
56 |
-
|
57 |
-
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
|
58 |
-
self.bn2 = nn.BatchNorm2d(planes)
|
59 |
-
|
60 |
-
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
|
61 |
-
|
62 |
-
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
|
63 |
-
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
|
64 |
-
|
65 |
-
self.relu = nn.ReLU(inplace=True)
|
66 |
-
self.downsample = None
|
67 |
-
self.stride = stride
|
68 |
-
|
69 |
-
if stride > 1 or inplanes != planes * Bottleneck.expansion:
|
70 |
-
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
|
71 |
-
self.downsample = nn.Sequential(
|
72 |
-
OrderedDict(
|
73 |
-
[
|
74 |
-
("-1", nn.AvgPool2d(stride)),
|
75 |
-
(
|
76 |
-
"0",
|
77 |
-
nn.Conv2d(
|
78 |
-
inplanes,
|
79 |
-
planes * self.expansion,
|
80 |
-
1,
|
81 |
-
stride=1,
|
82 |
-
bias=False,
|
83 |
-
),
|
84 |
-
),
|
85 |
-
("1", nn.BatchNorm2d(planes * self.expansion)),
|
86 |
-
]
|
87 |
-
)
|
88 |
-
)
|
89 |
-
|
90 |
-
def forward(self, x: torch.Tensor):
|
91 |
-
identity = x
|
92 |
-
|
93 |
-
out = self.relu(self.bn1(self.conv1(x)))
|
94 |
-
out = self.relu(self.bn2(self.conv2(out)))
|
95 |
-
out = self.avgpool(out)
|
96 |
-
out = self.bn3(self.conv3(out))
|
97 |
-
|
98 |
-
if self.downsample is not None:
|
99 |
-
identity = self.downsample(x)
|
100 |
-
|
101 |
-
out += identity
|
102 |
-
out = self.relu(out)
|
103 |
-
return out
|
104 |
-
|
105 |
-
|
106 |
-
class AttentionPool2d(nn.Module):
|
107 |
-
def __init__(
|
108 |
-
self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None
|
109 |
-
):
|
110 |
-
super().__init__()
|
111 |
-
self.positional_embedding = nn.Parameter(
|
112 |
-
torch.randn(spacial_dim**2 + 1, embed_dim) / embed_dim**0.5
|
113 |
-
)
|
114 |
-
self.k_proj = nn.Linear(embed_dim, embed_dim)
|
115 |
-
self.q_proj = nn.Linear(embed_dim, embed_dim)
|
116 |
-
self.v_proj = nn.Linear(embed_dim, embed_dim)
|
117 |
-
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
|
118 |
-
self.num_heads = num_heads
|
119 |
-
|
120 |
-
def forward(self, x):
|
121 |
-
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(
|
122 |
-
2, 0, 1
|
123 |
-
) # NCHW -> (HW)NC
|
124 |
-
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
|
125 |
-
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
|
126 |
-
x, _ = F.multi_head_attention_forward(
|
127 |
-
query=x,
|
128 |
-
key=x,
|
129 |
-
value=x,
|
130 |
-
embed_dim_to_check=x.shape[-1],
|
131 |
-
num_heads=self.num_heads,
|
132 |
-
q_proj_weight=self.q_proj.weight,
|
133 |
-
k_proj_weight=self.k_proj.weight,
|
134 |
-
v_proj_weight=self.v_proj.weight,
|
135 |
-
in_proj_weight=None,
|
136 |
-
in_proj_bias=torch.cat(
|
137 |
-
[self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]
|
138 |
-
),
|
139 |
-
bias_k=None,
|
140 |
-
bias_v=None,
|
141 |
-
add_zero_attn=False,
|
142 |
-
dropout_p=0,
|
143 |
-
out_proj_weight=self.c_proj.weight,
|
144 |
-
out_proj_bias=self.c_proj.bias,
|
145 |
-
use_separate_proj_weight=True,
|
146 |
-
training=self.training,
|
147 |
-
need_weights=False,
|
148 |
-
)
|
149 |
-
|
150 |
-
return x[0]
|
151 |
-
|
152 |
-
|
153 |
-
class ModifiedResNet(nn.Module):
|
154 |
-
"""
|
155 |
-
A ResNet class that is similar to torchvision's but contains the following changes:
|
156 |
-
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
|
157 |
-
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
|
158 |
-
- The final pooling layer is a QKV attention instead of an average pool
|
159 |
-
"""
|
160 |
-
|
161 |
-
def __init__(self, layers, output_dim, heads, image_size=224, width=64):
|
162 |
-
super().__init__()
|
163 |
-
self.output_dim = output_dim
|
164 |
-
self.image_size = image_size
|
165 |
-
|
166 |
-
# the 3-layer stem
|
167 |
-
self.conv1 = nn.Conv2d(
|
168 |
-
3, width // 2, kernel_size=3, stride=2, padding=1, bias=False
|
169 |
-
)
|
170 |
-
self.bn1 = nn.BatchNorm2d(width // 2)
|
171 |
-
self.conv2 = nn.Conv2d(
|
172 |
-
width // 2, width // 2, kernel_size=3, padding=1, bias=False
|
173 |
-
)
|
174 |
-
self.bn2 = nn.BatchNorm2d(width // 2)
|
175 |
-
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
|
176 |
-
self.bn3 = nn.BatchNorm2d(width)
|
177 |
-
self.avgpool = nn.AvgPool2d(2)
|
178 |
-
self.relu = nn.ReLU(inplace=True)
|
179 |
-
|
180 |
-
# residual layers
|
181 |
-
self._inplanes = width # this is a *mutable* variable used during construction
|
182 |
-
self.layer1 = self._make_layer(width, layers[0])
|
183 |
-
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
|
184 |
-
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
|
185 |
-
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
|
186 |
-
|
187 |
-
embed_dim = width * 32 # the ResNet feature dimension
|
188 |
-
self.attnpool = AttentionPool2d(image_size // 32, embed_dim, heads, output_dim)
|
189 |
-
|
190 |
-
self.init_parameters()
|
191 |
-
|
192 |
-
def _make_layer(self, planes, blocks, stride=1):
|
193 |
-
layers = [Bottleneck(self._inplanes, planes, stride)]
|
194 |
-
|
195 |
-
self._inplanes = planes * Bottleneck.expansion
|
196 |
-
for _ in range(1, blocks):
|
197 |
-
layers.append(Bottleneck(self._inplanes, planes))
|
198 |
-
|
199 |
-
return nn.Sequential(*layers)
|
200 |
-
|
201 |
-
def init_parameters(self):
|
202 |
-
if self.attnpool is not None:
|
203 |
-
std = self.attnpool.c_proj.in_features**-0.5
|
204 |
-
nn.init.normal_(self.attnpool.q_proj.weight, std=std)
|
205 |
-
nn.init.normal_(self.attnpool.k_proj.weight, std=std)
|
206 |
-
nn.init.normal_(self.attnpool.v_proj.weight, std=std)
|
207 |
-
nn.init.normal_(self.attnpool.c_proj.weight, std=std)
|
208 |
-
|
209 |
-
for resnet_block in [self.layer1, self.layer2, self.layer3, self.layer4]:
|
210 |
-
for name, param in resnet_block.named_parameters():
|
211 |
-
if name.endswith("bn3.weight"):
|
212 |
-
nn.init.zeros_(param)
|
213 |
-
|
214 |
-
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
|
215 |
-
assert (
|
216 |
-
unlocked_groups == 0
|
217 |
-
), "partial locking not currently supported for this model"
|
218 |
-
for param in self.parameters():
|
219 |
-
param.requires_grad = False
|
220 |
-
if freeze_bn_stats:
|
221 |
-
freeze_batch_norm_2d(self)
|
222 |
-
|
223 |
-
def stem(self, x):
|
224 |
-
for conv, bn in [
|
225 |
-
(self.conv1, self.bn1),
|
226 |
-
(self.conv2, self.bn2),
|
227 |
-
(self.conv3, self.bn3),
|
228 |
-
]:
|
229 |
-
x = self.relu(bn(conv(x)))
|
230 |
-
x = self.avgpool(x)
|
231 |
-
return x
|
232 |
-
|
233 |
-
def forward(self, x):
|
234 |
-
x = self.stem(x)
|
235 |
-
x = self.layer1(x)
|
236 |
-
x = self.layer2(x)
|
237 |
-
x = self.layer3(x)
|
238 |
-
x = self.layer4(x)
|
239 |
-
x = self.attnpool(x)
|
240 |
-
|
241 |
-
return x
|
242 |
-
|
243 |
-
|
244 |
-
class LayerNorm(nn.LayerNorm):
|
245 |
-
"""Subclass torch's LayerNorm to handle fp16."""
|
246 |
-
|
247 |
-
def forward(self, x: torch.Tensor):
|
248 |
-
orig_type = x.dtype
|
249 |
-
x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
|
250 |
-
return x.to(orig_type)
|
251 |
-
|
252 |
-
|
253 |
-
class QuickGELU(nn.Module):
|
254 |
-
# NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory
|
255 |
-
def forward(self, x: torch.Tensor):
|
256 |
-
return x * torch.sigmoid(1.702 * x)
|
257 |
-
|
258 |
-
|
259 |
-
class ResidualAttentionBlock(nn.Module):
|
260 |
-
def __init__(self, d_model: int, n_head: int, act_layer: Callable = nn.GELU):
|
261 |
-
super().__init__()
|
262 |
-
|
263 |
-
self.attn = nn.MultiheadAttention(d_model, n_head)
|
264 |
-
self.ln_1 = LayerNorm(d_model)
|
265 |
-
self.mlp = nn.Sequential(
|
266 |
-
OrderedDict(
|
267 |
-
[
|
268 |
-
("c_fc", nn.Linear(d_model, d_model * 4)),
|
269 |
-
("gelu", act_layer()),
|
270 |
-
("c_proj", nn.Linear(d_model * 4, d_model)),
|
271 |
-
]
|
272 |
-
)
|
273 |
-
)
|
274 |
-
self.ln_2 = LayerNorm(d_model)
|
275 |
-
|
276 |
-
def attention(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
|
277 |
-
return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask)[0]
|
278 |
-
|
279 |
-
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
|
280 |
-
x = x + self.attention(self.ln_1(x), attn_mask=attn_mask)
|
281 |
-
x = x + self.mlp(self.ln_2(x))
|
282 |
-
return x
|
283 |
-
|
284 |
-
|
285 |
-
class Transformer(nn.Module):
|
286 |
-
def __init__(
|
287 |
-
self, width: int, layers: int, heads: int, act_layer: Callable = nn.GELU
|
288 |
-
):
|
289 |
-
super().__init__()
|
290 |
-
self.width = width
|
291 |
-
self.layers = layers
|
292 |
-
self.resblocks = nn.ModuleList(
|
293 |
-
[
|
294 |
-
ResidualAttentionBlock(width, heads, act_layer=act_layer)
|
295 |
-
for _ in range(layers)
|
296 |
-
]
|
297 |
-
)
|
298 |
-
|
299 |
-
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
|
300 |
-
for r in self.resblocks:
|
301 |
-
x = r(x, attn_mask=attn_mask)
|
302 |
-
return x
|
303 |
-
|
304 |
-
|
305 |
-
class VisualTransformer(nn.Module):
|
306 |
-
def __init__(
|
307 |
-
self,
|
308 |
-
image_size: int,
|
309 |
-
patch_size: int,
|
310 |
-
width: int,
|
311 |
-
layers: int,
|
312 |
-
heads: int,
|
313 |
-
output_dim: int,
|
314 |
-
act_layer: Callable = nn.GELU,
|
315 |
-
):
|
316 |
-
super().__init__()
|
317 |
-
self.image_size = image_size
|
318 |
-
self.output_dim = output_dim
|
319 |
-
self.conv1 = nn.Conv2d(
|
320 |
-
in_channels=3,
|
321 |
-
out_channels=width,
|
322 |
-
kernel_size=patch_size,
|
323 |
-
stride=patch_size,
|
324 |
-
bias=False,
|
325 |
-
)
|
326 |
-
|
327 |
-
scale = width**-0.5
|
328 |
-
self.class_embedding = nn.Parameter(scale * torch.randn(width))
|
329 |
-
self.positional_embedding = nn.Parameter(
|
330 |
-
scale * torch.randn((image_size // patch_size) ** 2 + 1, width)
|
331 |
-
)
|
332 |
-
self.ln_pre = LayerNorm(width)
|
333 |
-
|
334 |
-
self.text_branch = Transformer(width, layers, heads, act_layer=act_layer)
|
335 |
-
|
336 |
-
self.ln_post = LayerNorm(width)
|
337 |
-
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
|
338 |
-
|
339 |
-
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
|
340 |
-
assert (
|
341 |
-
unlocked_groups == 0
|
342 |
-
), "partial locking not currently supported for this model"
|
343 |
-
for param in self.parameters():
|
344 |
-
param.requires_grad = False
|
345 |
-
|
346 |
-
def forward(self, x: torch.Tensor):
|
347 |
-
x = self.conv1(x) # shape = [*, width, grid, grid]
|
348 |
-
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
|
349 |
-
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
|
350 |
-
x = torch.cat(
|
351 |
-
[
|
352 |
-
self.class_embedding.to(x.dtype)
|
353 |
-
+ torch.zeros(
|
354 |
-
x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device
|
355 |
-
),
|
356 |
-
x,
|
357 |
-
],
|
358 |
-
dim=1,
|
359 |
-
) # shape = [*, grid ** 2 + 1, width]
|
360 |
-
x = x + self.positional_embedding.to(x.dtype)
|
361 |
-
x = self.ln_pre(x)
|
362 |
-
|
363 |
-
x = x.permute(1, 0, 2) # NLD -> LND
|
364 |
-
x = self.text_branch(x)
|
365 |
-
x = x.permute(1, 0, 2) # LND -> NLD
|
366 |
-
|
367 |
-
x = self.ln_post(x[:, 0, :])
|
368 |
-
|
369 |
-
if self.proj is not None:
|
370 |
-
x = x @ self.proj
|
371 |
-
|
372 |
-
return x
|
373 |
-
|
374 |
-
|
375 |
-
@dataclass
|
376 |
-
class CLAPVisionCfg:
|
377 |
-
layers: Union[Tuple[int, int, int, int], int] = 12
|
378 |
-
width: int = 768
|
379 |
-
patch_size: int = 16
|
380 |
-
image_size: Union[Tuple[int, int], int] = 224
|
381 |
-
timm_model_name: str = (
|
382 |
-
None # a valid model name overrides layers, width, patch_size
|
383 |
-
)
|
384 |
-
timm_model_pretrained: bool = (
|
385 |
-
False # use (imagenet) pretrained weights for named model
|
386 |
-
)
|
387 |
-
timm_pool: str = (
|
388 |
-
"avg" # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '')
|
389 |
-
)
|
390 |
-
timm_proj: str = (
|
391 |
-
"linear" # linear projection for timm model output ('linear', 'mlp', '')
|
392 |
-
)
|
393 |
-
|
394 |
-
|
395 |
-
# Audio Config Class
|
396 |
-
@dataclass
|
397 |
-
class CLAPAudioCfp:
|
398 |
-
model_type: str = "PANN"
|
399 |
-
model_name: str = "Cnn14"
|
400 |
-
sample_rate: int = 48000
|
401 |
-
# Param
|
402 |
-
audio_length: int = 1024
|
403 |
-
window_size: int = 1024
|
404 |
-
hop_size: int = 1024
|
405 |
-
fmin: int = 50
|
406 |
-
fmax: int = 14000
|
407 |
-
class_num: int = 527
|
408 |
-
mel_bins: int = 64
|
409 |
-
clip_samples: int = 480000
|
410 |
-
|
411 |
-
|
412 |
-
@dataclass
|
413 |
-
class CLAPTextCfg:
|
414 |
-
context_length: int
|
415 |
-
vocab_size: int
|
416 |
-
width: int
|
417 |
-
heads: int
|
418 |
-
layers: int
|
419 |
-
model_type: str
|
420 |
-
|
421 |
-
|
422 |
-
class CLAP(nn.Module):
|
423 |
-
def __init__(
|
424 |
-
self,
|
425 |
-
embed_dim: int,
|
426 |
-
audio_cfg: CLAPAudioCfp,
|
427 |
-
text_cfg: CLAPTextCfg,
|
428 |
-
quick_gelu: bool = False,
|
429 |
-
enable_fusion: bool = False,
|
430 |
-
fusion_type: str = "None",
|
431 |
-
joint_embed_shape: int = 512,
|
432 |
-
mlp_act: str = "relu",
|
433 |
-
):
|
434 |
-
super().__init__()
|
435 |
-
if isinstance(audio_cfg, dict):
|
436 |
-
audio_cfg = CLAPAudioCfp(**audio_cfg)
|
437 |
-
if isinstance(text_cfg, dict):
|
438 |
-
text_cfg = CLAPTextCfg(**text_cfg)
|
439 |
-
|
440 |
-
self.audio_cfg = audio_cfg
|
441 |
-
self.text_cfg = text_cfg
|
442 |
-
self.enable_fusion = enable_fusion
|
443 |
-
self.fusion_type = fusion_type
|
444 |
-
self.joint_embed_shape = joint_embed_shape
|
445 |
-
self.mlp_act = mlp_act
|
446 |
-
|
447 |
-
self.context_length = text_cfg.context_length
|
448 |
-
|
449 |
-
# OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more
|
450 |
-
# memory efficient in recent PyTorch releases (>= 1.10).
|
451 |
-
# NOTE: timm models always use native GELU regardless of quick_gelu flag.
|
452 |
-
act_layer = QuickGELU if quick_gelu else nn.GELU
|
453 |
-
|
454 |
-
if mlp_act == "relu":
|
455 |
-
mlp_act_layer = nn.ReLU()
|
456 |
-
elif mlp_act == "gelu":
|
457 |
-
mlp_act_layer = nn.GELU()
|
458 |
-
else:
|
459 |
-
raise NotImplementedError
|
460 |
-
|
461 |
-
# audio branch
|
462 |
-
# audio branch parameters
|
463 |
-
if audio_cfg.model_type == "PANN":
|
464 |
-
self.audio_branch = create_pann_model(audio_cfg, enable_fusion, fusion_type)
|
465 |
-
elif audio_cfg.model_type == "HTSAT":
|
466 |
-
self.audio_branch = create_htsat_model(
|
467 |
-
audio_cfg, enable_fusion, fusion_type
|
468 |
-
)
|
469 |
-
else:
|
470 |
-
logging.error(f"Model config for {audio_cfg.model_type} not found")
|
471 |
-
raise RuntimeError(f"Model config for {audio_cfg.model_type} not found.")
|
472 |
-
|
473 |
-
# text branch
|
474 |
-
# text branch parameters
|
475 |
-
if text_cfg.model_type == "transformer":
|
476 |
-
self.text_branch = Transformer(
|
477 |
-
width=text_cfg.width,
|
478 |
-
layers=text_cfg.layers,
|
479 |
-
heads=text_cfg.heads,
|
480 |
-
act_layer=act_layer,
|
481 |
-
)
|
482 |
-
self.vocab_size = text_cfg.vocab_size
|
483 |
-
self.token_embedding = nn.Embedding(text_cfg.vocab_size, text_cfg.width)
|
484 |
-
self.positional_embedding = nn.Parameter(
|
485 |
-
torch.empty(self.context_length, text_cfg.width)
|
486 |
-
)
|
487 |
-
self.ln_final = LayerNorm(text_cfg.width)
|
488 |
-
self.text_transform = MLPLayers(
|
489 |
-
units=[
|
490 |
-
self.joint_embed_shape,
|
491 |
-
self.joint_embed_shape,
|
492 |
-
self.joint_embed_shape,
|
493 |
-
],
|
494 |
-
dropout=0.1,
|
495 |
-
)
|
496 |
-
self.text_projection = nn.Sequential(
|
497 |
-
nn.Linear(text_cfg.width, self.joint_embed_shape),
|
498 |
-
mlp_act_layer,
|
499 |
-
nn.Linear(self.joint_embed_shape, self.joint_embed_shape),
|
500 |
-
)
|
501 |
-
elif text_cfg.model_type == "bert":
|
502 |
-
self.text_branch = BertModel.from_pretrained("bert-base-uncased")
|
503 |
-
self.text_transform = MLPLayers(
|
504 |
-
units=[
|
505 |
-
self.joint_embed_shape,
|
506 |
-
self.joint_embed_shape,
|
507 |
-
self.joint_embed_shape,
|
508 |
-
],
|
509 |
-
dropout=0.1,
|
510 |
-
)
|
511 |
-
self.text_projection = nn.Sequential(
|
512 |
-
nn.Linear(768, self.joint_embed_shape),
|
513 |
-
mlp_act_layer,
|
514 |
-
nn.Linear(self.joint_embed_shape, self.joint_embed_shape),
|
515 |
-
)
|
516 |
-
elif text_cfg.model_type == "roberta":
|
517 |
-
self.text_branch = RobertaModel.from_pretrained("roberta-base")
|
518 |
-
|
519 |
-
self.text_transform = MLPLayers(
|
520 |
-
units=[
|
521 |
-
self.joint_embed_shape,
|
522 |
-
self.joint_embed_shape,
|
523 |
-
self.joint_embed_shape,
|
524 |
-
],
|
525 |
-
dropout=0.1,
|
526 |
-
)
|
527 |
-
self.text_projection = nn.Sequential(
|
528 |
-
nn.Linear(768, self.joint_embed_shape),
|
529 |
-
mlp_act_layer,
|
530 |
-
nn.Linear(self.joint_embed_shape, self.joint_embed_shape),
|
531 |
-
)
|
532 |
-
elif text_cfg.model_type == "bart":
|
533 |
-
self.text_branch = BartModel.from_pretrained("facebook/bart-base")
|
534 |
-
self.text_transform = MLPLayers(
|
535 |
-
units=[
|
536 |
-
self.joint_embed_shape,
|
537 |
-
self.joint_embed_shape,
|
538 |
-
self.joint_embed_shape,
|
539 |
-
],
|
540 |
-
dropout=0.1,
|
541 |
-
)
|
542 |
-
self.text_projection = nn.Sequential(
|
543 |
-
nn.Linear(768, self.joint_embed_shape),
|
544 |
-
mlp_act_layer,
|
545 |
-
nn.Linear(self.joint_embed_shape, self.joint_embed_shape),
|
546 |
-
)
|
547 |
-
else:
|
548 |
-
logging.error(f"Model config for {text_cfg.model_type} not found")
|
549 |
-
raise RuntimeError(f"Model config for {text_cfg.model_type} not found.")
|
550 |
-
self.text_branch_type = text_cfg.model_type
|
551 |
-
# text branch parameters
|
552 |
-
|
553 |
-
# audio branch parameters
|
554 |
-
self.audio_transform = MLPLayers(
|
555 |
-
units=[
|
556 |
-
self.joint_embed_shape,
|
557 |
-
self.joint_embed_shape,
|
558 |
-
self.joint_embed_shape,
|
559 |
-
],
|
560 |
-
dropout=0.1,
|
561 |
-
)
|
562 |
-
|
563 |
-
# below here is text branch parameters
|
564 |
-
|
565 |
-
# ============================================================================================================
|
566 |
-
self.audio_projection = nn.Sequential(
|
567 |
-
nn.Linear(embed_dim, self.joint_embed_shape),
|
568 |
-
mlp_act_layer,
|
569 |
-
nn.Linear(self.joint_embed_shape, self.joint_embed_shape),
|
570 |
-
)
|
571 |
-
|
572 |
-
self.logit_scale_a = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
|
573 |
-
self.logit_scale_t = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
|
574 |
-
self.register_buffer("attn_mask", self.build_attention_mask(), persistent=False)
|
575 |
-
|
576 |
-
self.init_text_branch_parameters()
|
577 |
-
|
578 |
-
def init_text_branch_parameters(self):
|
579 |
-
if self.text_branch_type == "transformer":
|
580 |
-
nn.init.normal_(self.token_embedding.weight, std=0.02)
|
581 |
-
nn.init.normal_(self.positional_embedding, std=0.01)
|
582 |
-
proj_std = (self.text_branch.width**-0.5) * (
|
583 |
-
(2 * self.text_branch.layers) ** -0.5
|
584 |
-
)
|
585 |
-
attn_std = self.text_branch.width**-0.5
|
586 |
-
fc_std = (2 * self.text_branch.width) ** -0.5
|
587 |
-
for block in self.text_branch.resblocks:
|
588 |
-
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
|
589 |
-
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
|
590 |
-
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
|
591 |
-
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
|
592 |
-
if self.text_branch_type == "bert" or self.text_branch_type == "roberta":
|
593 |
-
width = self.text_branch.embeddings.word_embeddings.weight.shape[-1]
|
594 |
-
elif self.text_branch_type == "bart":
|
595 |
-
width = self.text_branch.shared.weight.shape[-1]
|
596 |
-
else:
|
597 |
-
width = self.text_branch.width
|
598 |
-
nn.init.constant_(self.logit_scale_a, np.log(1 / 0.07))
|
599 |
-
nn.init.constant_(self.logit_scale_t, np.log(1 / 0.07))
|
600 |
-
|
601 |
-
# deprecated
|
602 |
-
# if hasattr(self.visual, 'init_parameters'):
|
603 |
-
# self.visual.init_parameters()
|
604 |
-
|
605 |
-
# if self.text_projection is not None:
|
606 |
-
# nn.init.normal_(self.text_projection, std=width**-0.5)
|
607 |
-
|
608 |
-
def build_attention_mask(self):
|
609 |
-
# lazily create causal attention mask, with full attention between the vision tokens
|
610 |
-
# pytorch uses additive attention mask; fill with -inf
|
611 |
-
mask = torch.empty(self.context_length, self.context_length)
|
612 |
-
mask.fill_(float("-inf"))
|
613 |
-
mask.triu_(1) # zero out the lower diagonal
|
614 |
-
return mask
|
615 |
-
|
616 |
-
def encode_audio(self, audio, device):
|
617 |
-
return self.audio_branch(
|
618 |
-
audio, mixup_lambda=None, device=device
|
619 |
-
) # mix lambda needs to add
|
620 |
-
|
621 |
-
# def list_of_dict_of_tensor2dict_of_tensor(self, x, device):
|
622 |
-
# tmp = {}
|
623 |
-
# for k in x[0].keys():
|
624 |
-
# tmp[k] = []
|
625 |
-
# for i in range(len(x)):
|
626 |
-
# tmp[k].append(x[i][k][:77])
|
627 |
-
# for k in x[0].keys():
|
628 |
-
# tmp[k] = torch.tensor(tmp[k]).to(device=device, non_blocking=True)
|
629 |
-
# return tmp
|
630 |
-
|
631 |
-
def encode_text(self, text, device):
|
632 |
-
if self.text_branch_type == "transformer":
|
633 |
-
text = text.to(device=device, non_blocking=True)
|
634 |
-
x = self.token_embedding(text) # [batch_size, n_ctx, d_model]
|
635 |
-
|
636 |
-
x = x + self.positional_embedding
|
637 |
-
x = x.permute(1, 0, 2) # NLD -> LND
|
638 |
-
x = self.text_branch(x, attn_mask=self.attn_mask)
|
639 |
-
x = x.permute(1, 0, 2) # LND -> NLD
|
640 |
-
x = self.ln_final(x)
|
641 |
-
|
642 |
-
# x.shape = [batch_size, n_ctx, transformer.width]
|
643 |
-
# take features from the eot embedding (eot_token is the highest number in each sequence)
|
644 |
-
x = self.text_projection(x[torch.arange(x.shape[0]), text.argmax(dim=-1)])
|
645 |
-
elif self.text_branch_type == "bert":
|
646 |
-
# text = self.list_of_dict_of_tensor2dict_of_tensor(text, device)
|
647 |
-
# text = BatchEncoding(text)
|
648 |
-
x = self.text_branch(
|
649 |
-
input_ids=text["input_ids"].to(device=device, non_blocking=True),
|
650 |
-
attention_mask=text["attention_mask"].to(
|
651 |
-
device=device, non_blocking=True
|
652 |
-
),
|
653 |
-
token_type_ids=text["token_type_ids"].to(
|
654 |
-
device=device, non_blocking=True
|
655 |
-
),
|
656 |
-
)["pooler_output"]
|
657 |
-
x = self.text_projection(x)
|
658 |
-
elif self.text_branch_type == "roberta":
|
659 |
-
x = self.text_branch(
|
660 |
-
input_ids=text["input_ids"].to(device=device, non_blocking=True),
|
661 |
-
attention_mask=text["attention_mask"].to(
|
662 |
-
device=device, non_blocking=True
|
663 |
-
),
|
664 |
-
)["pooler_output"]
|
665 |
-
x = self.text_projection(x)
|
666 |
-
elif self.text_branch_type == "bart":
|
667 |
-
x = torch.mean(
|
668 |
-
self.text_branch(
|
669 |
-
input_ids=text["input_ids"].to(device=device, non_blocking=True),
|
670 |
-
attention_mask=text["attention_mask"].to(
|
671 |
-
device=device, non_blocking=True
|
672 |
-
),
|
673 |
-
)["encoder_last_hidden_state"],
|
674 |
-
axis=1,
|
675 |
-
)
|
676 |
-
x = self.text_projection(x)
|
677 |
-
else:
|
678 |
-
logging.error(f"Model type {self.text_branch_type} not found")
|
679 |
-
raise RuntimeError(f"Model type {self.text_branch_type} not found.")
|
680 |
-
return x
|
681 |
-
|
682 |
-
def forward(self, audio, text, device=None):
|
683 |
-
"""Forward audio and text into the CLAP
|
684 |
-
|
685 |
-
Parameters
|
686 |
-
----------
|
687 |
-
audio: torch.Tensor (batch_size, audio_length)
|
688 |
-
the time-domain audio input / the batch of mel_spec and longer list.
|
689 |
-
text: torch.Tensor () // need to add
|
690 |
-
the text token input
|
691 |
-
"""
|
692 |
-
if device is None:
|
693 |
-
if audio is not None:
|
694 |
-
device = audio.device
|
695 |
-
elif text is not None:
|
696 |
-
device = text.device
|
697 |
-
if audio is None and text is None:
|
698 |
-
# a hack to get the logit scale
|
699 |
-
return self.logit_scale_a.exp(), self.logit_scale_t.exp()
|
700 |
-
elif audio is None:
|
701 |
-
return self.encode_text(text, device=device)
|
702 |
-
elif text is None:
|
703 |
-
return self.audio_projection(
|
704 |
-
self.encode_audio(audio, device=device)["embedding"]
|
705 |
-
)
|
706 |
-
audio_features = self.audio_projection(
|
707 |
-
self.encode_audio(audio, device=device)["embedding"]
|
708 |
-
)
|
709 |
-
audio_features = F.normalize(audio_features, dim=-1)
|
710 |
-
|
711 |
-
text_features = self.encode_text(text, device=device)
|
712 |
-
# print("text_features", text_features)
|
713 |
-
# print("text_features.shape", text_features.shape)
|
714 |
-
# print("text_features.type", type(text_features))
|
715 |
-
text_features = F.normalize(text_features, dim=-1)
|
716 |
-
|
717 |
-
audio_features_mlp = self.audio_transform(audio_features)
|
718 |
-
text_features_mlp = self.text_transform(text_features)
|
719 |
-
# Four outputs: audio features (basic & MLP), text features (basic & MLP)
|
720 |
-
return (
|
721 |
-
audio_features,
|
722 |
-
text_features,
|
723 |
-
audio_features_mlp,
|
724 |
-
text_features_mlp,
|
725 |
-
self.logit_scale_a.exp(),
|
726 |
-
self.logit_scale_t.exp(),
|
727 |
-
)
|
728 |
-
|
729 |
-
def get_logit_scale(self):
|
730 |
-
return self.logit_scale_a.exp(), self.logit_scale_t.exp()
|
731 |
-
|
732 |
-
def get_text_embedding(self, data):
|
733 |
-
"""Get the text embedding from the model
|
734 |
-
|
735 |
-
Parameters
|
736 |
-
----------
|
737 |
-
data: torch.Tensor
|
738 |
-
a tensor of text embedding
|
739 |
-
|
740 |
-
Returns
|
741 |
-
----------
|
742 |
-
text_embed: torch.Tensor
|
743 |
-
a tensor of text_embeds (N, D)
|
744 |
-
|
745 |
-
"""
|
746 |
-
device = next(self.parameters()).device
|
747 |
-
for k in data:
|
748 |
-
data[k] = data[k].to(device)
|
749 |
-
text_embeds = self.encode_text(data, device=device)
|
750 |
-
text_embeds = F.normalize(text_embeds, dim=-1)
|
751 |
-
|
752 |
-
return text_embeds
|
753 |
-
|
754 |
-
def get_audio_embedding(self, data):
|
755 |
-
"""Get the audio embedding from the model
|
756 |
-
|
757 |
-
Parameters
|
758 |
-
----------
|
759 |
-
data: a list of dict
|
760 |
-
the audio input dict list from 'get_audio_feature' method
|
761 |
-
|
762 |
-
Returns
|
763 |
-
----------
|
764 |
-
audio_embed: torch.Tensor
|
765 |
-
a tensor of audio_embeds (N, D)
|
766 |
-
|
767 |
-
"""
|
768 |
-
device = next(self.parameters()).device
|
769 |
-
input_dict = {}
|
770 |
-
keys = data[0].keys()
|
771 |
-
for k in keys:
|
772 |
-
input_dict[k] = torch.cat([d[k].unsqueeze(0) for d in data], dim=0).to(
|
773 |
-
device
|
774 |
-
)
|
775 |
-
|
776 |
-
audio_embeds = self.audio_projection(
|
777 |
-
self.encode_audio(input_dict, device=device)["embedding"]
|
778 |
-
)
|
779 |
-
audio_embeds = F.normalize(audio_embeds, dim=-1)
|
780 |
-
|
781 |
-
return audio_embeds
|
782 |
-
|
783 |
-
def audio_infer(self, audio, hopsize=None, device=None):
|
784 |
-
"""Forward one audio and produce the audio embedding
|
785 |
-
|
786 |
-
Parameters
|
787 |
-
----------
|
788 |
-
audio: (audio_length)
|
789 |
-
the time-domain audio input, notice that it must be only one input
|
790 |
-
hopsize: int
|
791 |
-
the overlap hopsize as the sliding window
|
792 |
-
|
793 |
-
Returns
|
794 |
-
----------
|
795 |
-
output_dict: {
|
796 |
-
key: [n, (embedding_shape)] if "HTS-AT"
|
797 |
-
or
|
798 |
-
key: [(embedding_shape)] if "PANN"
|
799 |
-
}
|
800 |
-
the list of key values of the audio branch
|
801 |
-
|
802 |
-
"""
|
803 |
-
|
804 |
-
assert not self.training, "the inference mode must be run at eval stage"
|
805 |
-
output_dict = {}
|
806 |
-
# PANN
|
807 |
-
if self.audio_cfg.model_type == "PANN":
|
808 |
-
audio_input = audio.unsqueeze(dim=0)
|
809 |
-
output_dict[key] = self.encode_audio(audio_input, device=device)[
|
810 |
-
key
|
811 |
-
].squeeze(dim=0)
|
812 |
-
elif self.audio_cfg.model_type == "HTSAT":
|
813 |
-
# repeat
|
814 |
-
audio_len = len(audio)
|
815 |
-
k = self.audio_cfg.clip_samples // audio_len
|
816 |
-
if k > 1:
|
817 |
-
audio = audio.repeat(k)
|
818 |
-
audio_len = len(audio)
|
819 |
-
|
820 |
-
if hopsize is None:
|
821 |
-
hopsize = min(hopsize, audio_len)
|
822 |
-
|
823 |
-
if audio_len > self.audio_cfg.clip_samples:
|
824 |
-
audio_input = [
|
825 |
-
audio[pos : pos + self.audio_cfg.clip_samples].clone()
|
826 |
-
for pos in range(
|
827 |
-
0, audio_len - self.audio_cfg.clip_samples, hopsize
|
828 |
-
)
|
829 |
-
]
|
830 |
-
audio_input.append(audio[-self.audio_cfg.clip_samples :].clone())
|
831 |
-
audio_input = torch.stack(audio_input)
|
832 |
-
output_dict[key] = self.encode_audio(audio_input, device=device)[key]
|
833 |
-
else:
|
834 |
-
audio_input = audio.unsqueeze(dim=0)
|
835 |
-
output_dict[key] = self.encode_audio(audio_input, device=device)[
|
836 |
-
key
|
837 |
-
].squeeze(dim=0)
|
838 |
-
|
839 |
-
return output_dict
|
840 |
-
|
841 |
-
|
842 |
-
def convert_weights_to_fp16(model: nn.Module):
|
843 |
-
"""Convert applicable model parameters to fp16"""
|
844 |
-
|
845 |
-
def _convert_weights_to_fp16(l):
|
846 |
-
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
|
847 |
-
l.weight.data = l.weight.data.half()
|
848 |
-
if l.bias is not None:
|
849 |
-
l.bias.data = l.bias.data.half()
|
850 |
-
|
851 |
-
if isinstance(l, nn.MultiheadAttention):
|
852 |
-
for attr in [
|
853 |
-
*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]],
|
854 |
-
"in_proj_bias",
|
855 |
-
"bias_k",
|
856 |
-
"bias_v",
|
857 |
-
]:
|
858 |
-
tensor = getattr(l, attr)
|
859 |
-
if tensor is not None:
|
860 |
-
tensor.data = tensor.data.half()
|
861 |
-
|
862 |
-
for name in ["text_projection", "proj"]:
|
863 |
-
if hasattr(l, name):
|
864 |
-
attr = getattr(l, name)
|
865 |
-
if attr is not None:
|
866 |
-
attr.data = attr.data.half()
|
867 |
-
|
868 |
-
model.apply(_convert_weights_to_fp16)
|
869 |
-
|
870 |
-
|
871 |
-
# Ignore the state dict of the vision part
|
872 |
-
def build_model_from_openai_state_dict(
|
873 |
-
state_dict: dict, model_cfg, enable_fusion: bool = False, fusion_type: str = "None"
|
874 |
-
):
|
875 |
-
|
876 |
-
embed_dim = model_cfg["embed_dim"]
|
877 |
-
audio_cfg = model_cfg["audio_cfg"]
|
878 |
-
text_cfg = model_cfg["text_cfg"]
|
879 |
-
context_length = state_dict["positional_embedding"].shape[0]
|
880 |
-
vocab_size = state_dict["token_embedding.weight"].shape[0]
|
881 |
-
transformer_width = state_dict["ln_final.weight"].shape[0]
|
882 |
-
transformer_heads = transformer_width // 64
|
883 |
-
transformer_layers = len(
|
884 |
-
set(
|
885 |
-
k.split(".")[2]
|
886 |
-
for k in state_dict
|
887 |
-
if k.startswith(f"transformer.resblocks")
|
888 |
-
)
|
889 |
-
)
|
890 |
-
|
891 |
-
audio_cfg = CLAPAudioCfp(**audio_cfg)
|
892 |
-
text_cfg = CLAPTextCfg(**text_cfg)
|
893 |
-
|
894 |
-
model = CLAP(
|
895 |
-
embed_dim,
|
896 |
-
audio_cfg=audio_cfg,
|
897 |
-
text_cfg=text_cfg,
|
898 |
-
quick_gelu=True, # OpenAI models were trained with QuickGELU
|
899 |
-
enable_fusion=enable_fusion,
|
900 |
-
fusion_type=fusion_type,
|
901 |
-
)
|
902 |
-
state_dict["logit_scale_a"] = state_dict["logit_scale"]
|
903 |
-
state_dict["logit_scale_t"] = state_dict["logit_scale"]
|
904 |
-
pop_keys = list(state_dict.keys())[::]
|
905 |
-
# pop the visual branch saved weights
|
906 |
-
for key in pop_keys:
|
907 |
-
if key.startswith("visual."):
|
908 |
-
state_dict.pop(key, None)
|
909 |
-
|
910 |
-
for key in ["logit_scale", "input_resolution", "context_length", "vocab_size"]:
|
911 |
-
state_dict.pop(key, None)
|
912 |
-
|
913 |
-
# not use fp16
|
914 |
-
# convert_weights_to_fp16(model)
|
915 |
-
model.load_state_dict(state_dict, strict=False)
|
916 |
-
return model.eval()
|
917 |
-
|
918 |
-
|
919 |
-
def trace_model(model, batch_size=256, device=torch.device("cpu")):
|
920 |
-
model.eval()
|
921 |
-
audio_length = model.audio_cfg.audio_length
|
922 |
-
example_audio = torch.ones((batch_size, audio_length), device=device)
|
923 |
-
example_text = torch.zeros(
|
924 |
-
(batch_size, model.context_length), dtype=torch.int, device=device
|
925 |
-
)
|
926 |
-
model = torch.jit.trace_module(
|
927 |
-
model,
|
928 |
-
inputs=dict(
|
929 |
-
forward=(example_audio, example_text),
|
930 |
-
encode_text=(example_text,),
|
931 |
-
encode_image=(example_audio,),
|
932 |
-
),
|
933 |
-
)
|
934 |
-
model.audio_cfg.audio_length = audio_length # Question: what does this do?
|
935 |
-
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/solver/lr_scheduler.py
DELETED
@@ -1,238 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import logging
|
3 |
-
import math
|
4 |
-
from bisect import bisect_right
|
5 |
-
from typing import List
|
6 |
-
import torch
|
7 |
-
from fvcore.common.param_scheduler import (
|
8 |
-
CompositeParamScheduler,
|
9 |
-
ConstantParamScheduler,
|
10 |
-
LinearParamScheduler,
|
11 |
-
ParamScheduler,
|
12 |
-
)
|
13 |
-
|
14 |
-
logger = logging.getLogger(__name__)
|
15 |
-
|
16 |
-
|
17 |
-
class WarmupParamScheduler(CompositeParamScheduler):
|
18 |
-
"""
|
19 |
-
Add an initial warmup stage to another scheduler.
|
20 |
-
"""
|
21 |
-
|
22 |
-
def __init__(
|
23 |
-
self,
|
24 |
-
scheduler: ParamScheduler,
|
25 |
-
warmup_factor: float,
|
26 |
-
warmup_length: float,
|
27 |
-
warmup_method: str = "linear",
|
28 |
-
):
|
29 |
-
"""
|
30 |
-
Args:
|
31 |
-
scheduler: warmup will be added at the beginning of this scheduler
|
32 |
-
warmup_factor: the factor w.r.t the initial value of ``scheduler``, e.g. 0.001
|
33 |
-
warmup_length: the relative length (in [0, 1]) of warmup steps w.r.t the entire
|
34 |
-
training, e.g. 0.01
|
35 |
-
warmup_method: one of "linear" or "constant"
|
36 |
-
"""
|
37 |
-
end_value = scheduler(warmup_length) # the value to reach when warmup ends
|
38 |
-
start_value = warmup_factor * scheduler(0.0)
|
39 |
-
if warmup_method == "constant":
|
40 |
-
warmup = ConstantParamScheduler(start_value)
|
41 |
-
elif warmup_method == "linear":
|
42 |
-
warmup = LinearParamScheduler(start_value, end_value)
|
43 |
-
else:
|
44 |
-
raise ValueError("Unknown warmup method: {}".format(warmup_method))
|
45 |
-
super().__init__(
|
46 |
-
[warmup, scheduler],
|
47 |
-
interval_scaling=["rescaled", "fixed"],
|
48 |
-
lengths=[warmup_length, 1 - warmup_length],
|
49 |
-
)
|
50 |
-
|
51 |
-
|
52 |
-
class LRMultiplier(torch.optim.lr_scheduler._LRScheduler):
|
53 |
-
"""
|
54 |
-
A LRScheduler which uses fvcore :class:`ParamScheduler` to multiply the
|
55 |
-
learning rate of each param in the optimizer.
|
56 |
-
Every step, the learning rate of each parameter becomes its initial value
|
57 |
-
multiplied by the output of the given :class:`ParamScheduler`.
|
58 |
-
|
59 |
-
The absolute learning rate value of each parameter can be different.
|
60 |
-
This scheduler can be used as long as the relative scale among them do
|
61 |
-
not change during training.
|
62 |
-
|
63 |
-
Examples:
|
64 |
-
::
|
65 |
-
LRMultiplier(
|
66 |
-
opt,
|
67 |
-
WarmupParamScheduler(
|
68 |
-
MultiStepParamScheduler(
|
69 |
-
[1, 0.1, 0.01],
|
70 |
-
milestones=[60000, 80000],
|
71 |
-
num_updates=90000,
|
72 |
-
), 0.001, 100 / 90000
|
73 |
-
),
|
74 |
-
max_iter=90000
|
75 |
-
)
|
76 |
-
"""
|
77 |
-
|
78 |
-
# NOTES: in the most general case, every LR can use its own scheduler.
|
79 |
-
# Supporting this requires interaction with the optimizer when its parameter
|
80 |
-
# group is initialized. For example, classyvision implements its own optimizer
|
81 |
-
# that allows different schedulers for every parameter group.
|
82 |
-
# To avoid this complexity, we use this class to support the most common cases
|
83 |
-
# where the relative scale among all LRs stay unchanged during training. In this
|
84 |
-
# case we only need a total of one scheduler that defines the relative LR multiplier.
|
85 |
-
|
86 |
-
def __init__(
|
87 |
-
self,
|
88 |
-
optimizer: torch.optim.Optimizer,
|
89 |
-
multiplier: ParamScheduler,
|
90 |
-
max_iter: int,
|
91 |
-
last_iter: int = -1,
|
92 |
-
):
|
93 |
-
"""
|
94 |
-
Args:
|
95 |
-
optimizer, last_iter: See ``torch.optim.lr_scheduler._LRScheduler``.
|
96 |
-
``last_iter`` is the same as ``last_epoch``.
|
97 |
-
multiplier: a fvcore ParamScheduler that defines the multiplier on
|
98 |
-
every LR of the optimizer
|
99 |
-
max_iter: the total number of training iterations
|
100 |
-
"""
|
101 |
-
if not isinstance(multiplier, ParamScheduler):
|
102 |
-
raise ValueError(
|
103 |
-
"_LRMultiplier(multiplier=) must be an instance of fvcore "
|
104 |
-
f"ParamScheduler. Got {multiplier} instead."
|
105 |
-
)
|
106 |
-
self._multiplier = multiplier
|
107 |
-
self._max_iter = max_iter
|
108 |
-
super().__init__(optimizer, last_epoch=last_iter)
|
109 |
-
|
110 |
-
def state_dict(self):
|
111 |
-
# fvcore schedulers are stateless. Only keep pytorch scheduler states
|
112 |
-
return {"base_lrs": self.base_lrs, "last_epoch": self.last_epoch}
|
113 |
-
|
114 |
-
def get_lr(self) -> List[float]:
|
115 |
-
multiplier = self._multiplier(self.last_epoch / self._max_iter)
|
116 |
-
return [base_lr * multiplier for base_lr in self.base_lrs]
|
117 |
-
|
118 |
-
|
119 |
-
"""
|
120 |
-
Content below is no longer needed!
|
121 |
-
"""
|
122 |
-
|
123 |
-
# NOTE: PyTorch's LR scheduler interface uses names that assume the LR changes
|
124 |
-
# only on epoch boundaries. We typically use iteration based schedules instead.
|
125 |
-
# As a result, "epoch" (e.g., as in self.last_epoch) should be understood to mean
|
126 |
-
# "iteration" instead.
|
127 |
-
|
128 |
-
# FIXME: ideally this would be achieved with a CombinedLRScheduler, separating
|
129 |
-
# MultiStepLR with WarmupLR but the current LRScheduler design doesn't allow it.
|
130 |
-
|
131 |
-
|
132 |
-
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
|
133 |
-
def __init__(
|
134 |
-
self,
|
135 |
-
optimizer: torch.optim.Optimizer,
|
136 |
-
milestones: List[int],
|
137 |
-
gamma: float = 0.1,
|
138 |
-
warmup_factor: float = 0.001,
|
139 |
-
warmup_iters: int = 1000,
|
140 |
-
warmup_method: str = "linear",
|
141 |
-
last_epoch: int = -1,
|
142 |
-
):
|
143 |
-
logger.warning(
|
144 |
-
"WarmupMultiStepLR is deprecated! Use LRMultipilier with fvcore ParamScheduler instead!"
|
145 |
-
)
|
146 |
-
if not list(milestones) == sorted(milestones):
|
147 |
-
raise ValueError(
|
148 |
-
"Milestones should be a list of" " increasing integers. Got {}", milestones
|
149 |
-
)
|
150 |
-
self.milestones = milestones
|
151 |
-
self.gamma = gamma
|
152 |
-
self.warmup_factor = warmup_factor
|
153 |
-
self.warmup_iters = warmup_iters
|
154 |
-
self.warmup_method = warmup_method
|
155 |
-
super().__init__(optimizer, last_epoch)
|
156 |
-
|
157 |
-
def get_lr(self) -> List[float]:
|
158 |
-
warmup_factor = _get_warmup_factor_at_iter(
|
159 |
-
self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor
|
160 |
-
)
|
161 |
-
return [
|
162 |
-
base_lr * warmup_factor * self.gamma ** bisect_right(self.milestones, self.last_epoch)
|
163 |
-
for base_lr in self.base_lrs
|
164 |
-
]
|
165 |
-
|
166 |
-
def _compute_values(self) -> List[float]:
|
167 |
-
# The new interface
|
168 |
-
return self.get_lr()
|
169 |
-
|
170 |
-
|
171 |
-
class WarmupCosineLR(torch.optim.lr_scheduler._LRScheduler):
|
172 |
-
def __init__(
|
173 |
-
self,
|
174 |
-
optimizer: torch.optim.Optimizer,
|
175 |
-
max_iters: int,
|
176 |
-
warmup_factor: float = 0.001,
|
177 |
-
warmup_iters: int = 1000,
|
178 |
-
warmup_method: str = "linear",
|
179 |
-
last_epoch: int = -1,
|
180 |
-
):
|
181 |
-
logger.warning(
|
182 |
-
"WarmupCosineLR is deprecated! Use LRMultipilier with fvcore ParamScheduler instead!"
|
183 |
-
)
|
184 |
-
self.max_iters = max_iters
|
185 |
-
self.warmup_factor = warmup_factor
|
186 |
-
self.warmup_iters = warmup_iters
|
187 |
-
self.warmup_method = warmup_method
|
188 |
-
super().__init__(optimizer, last_epoch)
|
189 |
-
|
190 |
-
def get_lr(self) -> List[float]:
|
191 |
-
warmup_factor = _get_warmup_factor_at_iter(
|
192 |
-
self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor
|
193 |
-
)
|
194 |
-
# Different definitions of half-cosine with warmup are possible. For
|
195 |
-
# simplicity we multiply the standard half-cosine schedule by the warmup
|
196 |
-
# factor. An alternative is to start the period of the cosine at warmup_iters
|
197 |
-
# instead of at 0. In the case that warmup_iters << max_iters the two are
|
198 |
-
# very close to each other.
|
199 |
-
return [
|
200 |
-
base_lr
|
201 |
-
* warmup_factor
|
202 |
-
* 0.5
|
203 |
-
* (1.0 + math.cos(math.pi * self.last_epoch / self.max_iters))
|
204 |
-
for base_lr in self.base_lrs
|
205 |
-
]
|
206 |
-
|
207 |
-
def _compute_values(self) -> List[float]:
|
208 |
-
# The new interface
|
209 |
-
return self.get_lr()
|
210 |
-
|
211 |
-
|
212 |
-
def _get_warmup_factor_at_iter(
|
213 |
-
method: str, iter: int, warmup_iters: int, warmup_factor: float
|
214 |
-
) -> float:
|
215 |
-
"""
|
216 |
-
Return the learning rate warmup factor at a specific iteration.
|
217 |
-
See :paper:`ImageNet in 1h` for more details.
|
218 |
-
|
219 |
-
Args:
|
220 |
-
method (str): warmup method; either "constant" or "linear".
|
221 |
-
iter (int): iteration at which to calculate the warmup factor.
|
222 |
-
warmup_iters (int): the number of warmup iterations.
|
223 |
-
warmup_factor (float): the base warmup factor (the meaning changes according
|
224 |
-
to the method used).
|
225 |
-
|
226 |
-
Returns:
|
227 |
-
float: the effective warmup factor at the given iteration.
|
228 |
-
"""
|
229 |
-
if iter >= warmup_iters:
|
230 |
-
return 1.0
|
231 |
-
|
232 |
-
if method == "constant":
|
233 |
-
return warmup_factor
|
234 |
-
elif method == "linear":
|
235 |
-
alpha = iter / warmup_iters
|
236 |
-
return warmup_factor * (1 - alpha) + alpha
|
237 |
-
else:
|
238 |
-
raise ValueError("Unknown warmup method: {}".format(method))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/_static/css/custom.css
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
-
* some extra css to make markdown look similar between github/sphinx
|
4 |
-
*/
|
5 |
-
|
6 |
-
/*
|
7 |
-
* Below is for install.md:
|
8 |
-
*/
|
9 |
-
.rst-content code {
|
10 |
-
white-space: pre;
|
11 |
-
border: 0px;
|
12 |
-
}
|
13 |
-
|
14 |
-
.rst-content th {
|
15 |
-
border: 1px solid #e1e4e5;
|
16 |
-
}
|
17 |
-
|
18 |
-
.rst-content th p {
|
19 |
-
/* otherwise will be default 24px for regular paragraph */
|
20 |
-
margin-bottom: 0px;
|
21 |
-
}
|
22 |
-
|
23 |
-
.rst-content .line-block {
|
24 |
-
/* otherwise will be 24px */
|
25 |
-
margin-bottom: 0px;
|
26 |
-
}
|
27 |
-
|
28 |
-
div.section > details {
|
29 |
-
padding-bottom: 1em;
|
30 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BREWDAcademy/Brewd-Diffusion/app.py
DELETED
@@ -1,391 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
|
3 |
-
from __future__ import annotations
|
4 |
-
|
5 |
-
import os
|
6 |
-
import random
|
7 |
-
|
8 |
-
import gradio as gr
|
9 |
-
import numpy as np
|
10 |
-
import PIL.Image
|
11 |
-
import torch
|
12 |
-
from diffusers import AutoencoderKL, StableDiffusionXLPipeline
|
13 |
-
import uuid
|
14 |
-
|
15 |
-
DESCRIPTION = '''# BREWD Stable Diffusion: SSD-1B
|
16 |
-
'''
|
17 |
-
if not torch.cuda.is_available():
|
18 |
-
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
|
19 |
-
|
20 |
-
MAX_SEED = np.iinfo(np.int32).max
|
21 |
-
CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "1") == "1"
|
22 |
-
MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1024"))
|
23 |
-
USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "1") == "1"
|
24 |
-
ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
|
25 |
-
ENABLE_REFINER = os.getenv("ENABLE_REFINER", "0") == "1"
|
26 |
-
|
27 |
-
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
28 |
-
|
29 |
-
style_list = [
|
30 |
-
{
|
31 |
-
"name": "(No style)",
|
32 |
-
"prompt": "{prompt}",
|
33 |
-
"negative_prompt": "",
|
34 |
-
},
|
35 |
-
{
|
36 |
-
"name": "Cinematic",
|
37 |
-
"prompt": "cinematic still {prompt} . emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy",
|
38 |
-
"negative_prompt": "anime, cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured",
|
39 |
-
},
|
40 |
-
{
|
41 |
-
"name": "Photographic",
|
42 |
-
"prompt": "cinematic photo {prompt} . 35mm photograph, film, bokeh, professional, 4k, highly detailed",
|
43 |
-
"negative_prompt": "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly",
|
44 |
-
},
|
45 |
-
{
|
46 |
-
"name": "Anime",
|
47 |
-
"prompt": "anime artwork {prompt} . anime style, key visual, vibrant, studio anime, highly detailed",
|
48 |
-
"negative_prompt": "photo, deformed, black and white, realism, disfigured, low contrast",
|
49 |
-
},
|
50 |
-
{
|
51 |
-
"name": "Manga",
|
52 |
-
"prompt": "manga style {prompt} . vibrant, high-energy, detailed, iconic, Japanese comic style",
|
53 |
-
"negative_prompt": "ugly, deformed, noisy, blurry, low contrast, realism, photorealistic, Western comic style",
|
54 |
-
},
|
55 |
-
{
|
56 |
-
"name": "Digital Art",
|
57 |
-
"prompt": "concept art {prompt} . digital artwork, illustrative, painterly, matte painting, highly detailed",
|
58 |
-
"negative_prompt": "photo, photorealistic, realism, ugly",
|
59 |
-
},
|
60 |
-
{
|
61 |
-
"name": "Pixel art",
|
62 |
-
"prompt": "pixel-art {prompt} . low-res, blocky, pixel art style, 8-bit graphics",
|
63 |
-
"negative_prompt": "sloppy, messy, blurry, noisy, highly detailed, ultra textured, photo, realistic",
|
64 |
-
},
|
65 |
-
{
|
66 |
-
"name": "Fantasy art",
|
67 |
-
"prompt": "ethereal fantasy concept art of {prompt} . magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy",
|
68 |
-
"negative_prompt": "photographic, realistic, realism, 35mm film, dslr, cropped, frame, text, deformed, glitch, noise, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, sloppy, duplicate, mutated, black and white",
|
69 |
-
},
|
70 |
-
{
|
71 |
-
"name": "Neonpunk",
|
72 |
-
"prompt": "neonpunk style {prompt} . cyberpunk, vaporwave, neon, vibes, vibrant, stunningly beautiful, crisp, detailed, sleek, ultramodern, magenta highlights, dark purple shadows, high contrast, cinematic, ultra detailed, intricate, professional",
|
73 |
-
"negative_prompt": "painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured",
|
74 |
-
},
|
75 |
-
{
|
76 |
-
"name": "3D Model",
|
77 |
-
"prompt": "professional 3d model {prompt} . octane render, highly detailed, volumetric, dramatic lighting",
|
78 |
-
"negative_prompt": "ugly, deformed, noisy, low poly, blurry, painting",
|
79 |
-
},
|
80 |
-
]
|
81 |
-
|
82 |
-
styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
|
83 |
-
STYLE_NAMES = list(styles.keys())
|
84 |
-
DEFAULT_STYLE_NAME = "Cinematic"
|
85 |
-
|
86 |
-
|
87 |
-
def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str, str]:
|
88 |
-
p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
|
89 |
-
if not negative:
|
90 |
-
negative = ""
|
91 |
-
return p.replace("{prompt}", positive), n + negative
|
92 |
-
|
93 |
-
|
94 |
-
if torch.cuda.is_available():
|
95 |
-
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
96 |
-
pipe = StableDiffusionXLPipeline.from_pretrained(
|
97 |
-
"segmind/SSD-1B",
|
98 |
-
vae=vae,
|
99 |
-
torch_dtype=torch.float16,
|
100 |
-
use_safetensors=True,
|
101 |
-
variant="fp16",
|
102 |
-
)
|
103 |
-
if ENABLE_REFINER:
|
104 |
-
refiner = DiffusionPipeline.from_pretrained(
|
105 |
-
"stabilityai/stable-diffusion-xl-refiner-1.0",
|
106 |
-
vae=vae,
|
107 |
-
torch_dtype=torch.float16,
|
108 |
-
use_safetensors=True,
|
109 |
-
variant="fp16",
|
110 |
-
)
|
111 |
-
|
112 |
-
if ENABLE_CPU_OFFLOAD:
|
113 |
-
pipe.enable_model_cpu_offload()
|
114 |
-
if ENABLE_REFINER:
|
115 |
-
refiner.enable_model_cpu_offload()
|
116 |
-
else:
|
117 |
-
pipe.to(device)
|
118 |
-
if ENABLE_REFINER:
|
119 |
-
refiner.to(device)
|
120 |
-
print("Loaded on Device!")
|
121 |
-
|
122 |
-
if USE_TORCH_COMPILE:
|
123 |
-
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
124 |
-
if ENABLE_REFINER:
|
125 |
-
refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True)
|
126 |
-
print("Model Compiled!")
|
127 |
-
|
128 |
-
def save_image(img):
|
129 |
-
unique_name = str(uuid.uuid4()) + '.png'
|
130 |
-
img.save(unique_name)
|
131 |
-
return unique_name
|
132 |
-
|
133 |
-
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
134 |
-
if randomize_seed:
|
135 |
-
seed = random.randint(0, MAX_SEED)
|
136 |
-
return seed
|
137 |
-
|
138 |
-
def generate(
|
139 |
-
prompt: str,
|
140 |
-
negative_prompt: str = "",
|
141 |
-
style: str = DEFAULT_STYLE_NAME,
|
142 |
-
prompt_2: str = "",
|
143 |
-
negative_prompt_2: str = "",
|
144 |
-
use_negative_prompt: bool = False,
|
145 |
-
use_prompt_2: bool = False,
|
146 |
-
use_negative_prompt_2: bool = False,
|
147 |
-
seed: int = 0,
|
148 |
-
width: int = 1024,
|
149 |
-
height: int = 1024,
|
150 |
-
guidance_scale_base: float = 5.0,
|
151 |
-
guidance_scale_refiner: float = 5.0,
|
152 |
-
num_inference_steps_base: int = 25,
|
153 |
-
num_inference_steps_refiner: int = 25,
|
154 |
-
apply_refiner: bool = False,
|
155 |
-
randomize_seed: bool = False,
|
156 |
-
progress = gr.Progress(track_tqdm=True)
|
157 |
-
):
|
158 |
-
seed = randomize_seed_fn(seed, randomize_seed)
|
159 |
-
generator = torch.Generator().manual_seed(seed)
|
160 |
-
|
161 |
-
if not use_negative_prompt:
|
162 |
-
negative_prompt = None # type: ignore
|
163 |
-
if not use_prompt_2:
|
164 |
-
prompt_2 = None # type: ignore
|
165 |
-
if not use_negative_prompt_2:
|
166 |
-
negative_prompt_2 = None # type: ignore
|
167 |
-
prompt, negative_prompt = apply_style(style, prompt, negative_prompt)
|
168 |
-
if not apply_refiner:
|
169 |
-
image = pipe(
|
170 |
-
prompt=prompt,
|
171 |
-
negative_prompt=negative_prompt,
|
172 |
-
prompt_2=prompt_2,
|
173 |
-
negative_prompt_2=negative_prompt_2,
|
174 |
-
width=width,
|
175 |
-
height=height,
|
176 |
-
guidance_scale=guidance_scale_base,
|
177 |
-
num_inference_steps=num_inference_steps_base,
|
178 |
-
generator=generator,
|
179 |
-
output_type="pil",
|
180 |
-
).images[0]
|
181 |
-
else:
|
182 |
-
latents = pipe(
|
183 |
-
prompt=prompt,
|
184 |
-
negative_prompt=negative_prompt,
|
185 |
-
prompt_2=prompt_2,
|
186 |
-
negative_prompt_2=negative_prompt_2,
|
187 |
-
width=width,
|
188 |
-
height=height,
|
189 |
-
guidance_scale=guidance_scale_base,
|
190 |
-
num_inference_steps=num_inference_steps_base,
|
191 |
-
generator=generator,
|
192 |
-
output_type="latent",
|
193 |
-
).images
|
194 |
-
image = refiner(
|
195 |
-
prompt=prompt,
|
196 |
-
negative_prompt=negative_prompt,
|
197 |
-
prompt_2=prompt_2,
|
198 |
-
negative_prompt_2=negative_prompt_2,
|
199 |
-
guidance_scale=guidance_scale_refiner,
|
200 |
-
num_inference_steps=num_inference_steps_refiner,
|
201 |
-
image=latents,
|
202 |
-
generator=generator,
|
203 |
-
).images[0]
|
204 |
-
|
205 |
-
image_path = save_image(image)
|
206 |
-
print(image_path)
|
207 |
-
return [image_path], seed
|
208 |
-
|
209 |
-
examples = [
|
210 |
-
'3D digital art of a playful squirrel with oversized glasses reading a book, surrounded by autumn leaves, serene, natural background',
|
211 |
-
'A fluffy bunny wearing a flower crown, hopping through a vibrant meadow, with a soft, colorful, and peaceful scenery',
|
212 |
-
'Professional portrait photo of a whimsical owl wearing a detective hat, perched on a branch, investigating the forest mysteries, under the moonlight',
|
213 |
-
'A curious fox exploring a quaint, rustic village, with cobblestone streets and flower-laden cottages, under the soft glow of dawn',
|
214 |
-
'A serene lake reflecting the whimsical dance of butterflies, surrounded by blossoming flowers, as the sun casts a gentle, golden glow',
|
215 |
-
'Cinematic still of a gentle deer prancing through an enchanted forest, with fairy lights illuminating the path, creating a magical, peaceful ambiance'
|
216 |
-
]
|
217 |
-
|
218 |
-
|
219 |
-
with gr.Blocks(css="style.css") as demo:
|
220 |
-
gr.Markdown(DESCRIPTION)
|
221 |
-
gr.DuplicateButton(
|
222 |
-
value="Duplicate Space for private use",
|
223 |
-
elem_id="duplicate-button",
|
224 |
-
visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
|
225 |
-
)
|
226 |
-
with gr.Group():
|
227 |
-
with gr.Row():
|
228 |
-
prompt = gr.Text(
|
229 |
-
label="Prompt",
|
230 |
-
show_label=False,
|
231 |
-
max_lines=1,
|
232 |
-
placeholder="Enter your prompt",
|
233 |
-
container=False,
|
234 |
-
)
|
235 |
-
run_button = gr.Button("Run", scale=0)
|
236 |
-
result = gr.Gallery(label="Result", columns=1, show_label=False)
|
237 |
-
with gr.Accordion("Advanced options", open=False):
|
238 |
-
with gr.Row():
|
239 |
-
use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
|
240 |
-
use_prompt_2 = gr.Checkbox(label="Use prompt 2", value=False)
|
241 |
-
use_negative_prompt_2 = gr.Checkbox(label="Use negative prompt 2", value=False)
|
242 |
-
style_selection = gr.Radio(
|
243 |
-
show_label=True, container=True, interactive=True,
|
244 |
-
choices=STYLE_NAMES,
|
245 |
-
value=DEFAULT_STYLE_NAME,
|
246 |
-
label='Image Style'
|
247 |
-
)
|
248 |
-
negative_prompt = gr.Text(
|
249 |
-
label="Negative prompt",
|
250 |
-
max_lines=1,
|
251 |
-
placeholder="Enter a negative prompt",
|
252 |
-
visible=False,
|
253 |
-
)
|
254 |
-
prompt_2 = gr.Text(
|
255 |
-
label="Prompt 2",
|
256 |
-
max_lines=1,
|
257 |
-
placeholder="Enter your prompt",
|
258 |
-
visible=False,
|
259 |
-
)
|
260 |
-
negative_prompt_2 = gr.Text(
|
261 |
-
label="Negative prompt 2",
|
262 |
-
max_lines=1,
|
263 |
-
placeholder="Enter a negative prompt",
|
264 |
-
visible=False,
|
265 |
-
)
|
266 |
-
seed = gr.Slider(
|
267 |
-
label="Seed",
|
268 |
-
minimum=0,
|
269 |
-
maximum=MAX_SEED,
|
270 |
-
step=1,
|
271 |
-
value=0,
|
272 |
-
)
|
273 |
-
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
274 |
-
with gr.Row(visible=False):
|
275 |
-
width = gr.Slider(
|
276 |
-
label="Width",
|
277 |
-
minimum=256,
|
278 |
-
maximum=MAX_IMAGE_SIZE,
|
279 |
-
step=32,
|
280 |
-
value=1024,
|
281 |
-
)
|
282 |
-
height = gr.Slider(
|
283 |
-
label="Height",
|
284 |
-
minimum=256,
|
285 |
-
maximum=MAX_IMAGE_SIZE,
|
286 |
-
step=32,
|
287 |
-
value=1024,
|
288 |
-
)
|
289 |
-
apply_refiner = gr.Checkbox(label="Apply refiner", value=False, visible=ENABLE_REFINER)
|
290 |
-
with gr.Row():
|
291 |
-
guidance_scale_base = gr.Slider(
|
292 |
-
label="Guidance scale for base",
|
293 |
-
minimum=1,
|
294 |
-
maximum=20,
|
295 |
-
step=0.1,
|
296 |
-
value=9.0,
|
297 |
-
)
|
298 |
-
num_inference_steps_base = gr.Slider(
|
299 |
-
label="Number of inference steps for base",
|
300 |
-
minimum=10,
|
301 |
-
maximum=100,
|
302 |
-
step=1,
|
303 |
-
value=25,
|
304 |
-
)
|
305 |
-
with gr.Row(visible=False) as refiner_params:
|
306 |
-
guidance_scale_refiner = gr.Slider(
|
307 |
-
label="Guidance scale for refiner",
|
308 |
-
minimum=1,
|
309 |
-
maximum=20,
|
310 |
-
step=0.1,
|
311 |
-
value=5.0,
|
312 |
-
)
|
313 |
-
num_inference_steps_refiner = gr.Slider(
|
314 |
-
label="Number of inference steps for refiner",
|
315 |
-
minimum=10,
|
316 |
-
maximum=100,
|
317 |
-
step=1,
|
318 |
-
value=25,
|
319 |
-
)
|
320 |
-
|
321 |
-
gr.Examples(
|
322 |
-
examples=examples,
|
323 |
-
inputs=prompt,
|
324 |
-
outputs=[result, seed],
|
325 |
-
fn=generate,
|
326 |
-
cache_examples=CACHE_EXAMPLES,
|
327 |
-
)
|
328 |
-
|
329 |
-
use_negative_prompt.change(
|
330 |
-
fn=lambda x: gr.update(visible=x),
|
331 |
-
inputs=use_negative_prompt,
|
332 |
-
outputs=negative_prompt,
|
333 |
-
queue=False,
|
334 |
-
api_name=False,
|
335 |
-
)
|
336 |
-
use_prompt_2.change(
|
337 |
-
fn=lambda x: gr.update(visible=x),
|
338 |
-
inputs=use_prompt_2,
|
339 |
-
outputs=prompt_2,
|
340 |
-
queue=False,
|
341 |
-
api_name=False,
|
342 |
-
)
|
343 |
-
use_negative_prompt_2.change(
|
344 |
-
fn=lambda x: gr.update(visible=x),
|
345 |
-
inputs=use_negative_prompt_2,
|
346 |
-
outputs=negative_prompt_2,
|
347 |
-
queue=False,
|
348 |
-
api_name=False,
|
349 |
-
)
|
350 |
-
apply_refiner.change(
|
351 |
-
fn=lambda x: gr.update(visible=x),
|
352 |
-
inputs=apply_refiner,
|
353 |
-
outputs=refiner_params,
|
354 |
-
queue=False,
|
355 |
-
api_name=False,
|
356 |
-
)
|
357 |
-
|
358 |
-
gr.on(
|
359 |
-
triggers=[
|
360 |
-
prompt.submit,
|
361 |
-
negative_prompt.submit,
|
362 |
-
prompt_2.submit,
|
363 |
-
negative_prompt_2.submit,
|
364 |
-
run_button.click,
|
365 |
-
],
|
366 |
-
fn=generate,
|
367 |
-
inputs=[
|
368 |
-
prompt,
|
369 |
-
negative_prompt,
|
370 |
-
style_selection,
|
371 |
-
prompt_2,
|
372 |
-
negative_prompt_2,
|
373 |
-
use_negative_prompt,
|
374 |
-
use_prompt_2,
|
375 |
-
use_negative_prompt_2,
|
376 |
-
seed,
|
377 |
-
width,
|
378 |
-
height,
|
379 |
-
guidance_scale_base,
|
380 |
-
guidance_scale_refiner,
|
381 |
-
num_inference_steps_base,
|
382 |
-
num_inference_steps_refiner,
|
383 |
-
apply_refiner,
|
384 |
-
randomize_seed
|
385 |
-
],
|
386 |
-
outputs=[result, seed],
|
387 |
-
api_name="run",
|
388 |
-
)
|
389 |
-
|
390 |
-
if __name__ == "__main__":
|
391 |
-
demo.queue(max_size=20).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BartPoint/VoiceChange_Beta/infer_pack/commons.py
DELETED
@@ -1,166 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import numpy as np
|
3 |
-
import torch
|
4 |
-
from torch import nn
|
5 |
-
from torch.nn import functional as F
|
6 |
-
|
7 |
-
|
8 |
-
def init_weights(m, mean=0.0, std=0.01):
|
9 |
-
classname = m.__class__.__name__
|
10 |
-
if classname.find("Conv") != -1:
|
11 |
-
m.weight.data.normal_(mean, std)
|
12 |
-
|
13 |
-
|
14 |
-
def get_padding(kernel_size, dilation=1):
|
15 |
-
return int((kernel_size * dilation - dilation) / 2)
|
16 |
-
|
17 |
-
|
18 |
-
def convert_pad_shape(pad_shape):
|
19 |
-
l = pad_shape[::-1]
|
20 |
-
pad_shape = [item for sublist in l for item in sublist]
|
21 |
-
return pad_shape
|
22 |
-
|
23 |
-
|
24 |
-
def kl_divergence(m_p, logs_p, m_q, logs_q):
|
25 |
-
"""KL(P||Q)"""
|
26 |
-
kl = (logs_q - logs_p) - 0.5
|
27 |
-
kl += (
|
28 |
-
0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
|
29 |
-
)
|
30 |
-
return kl
|
31 |
-
|
32 |
-
|
33 |
-
def rand_gumbel(shape):
|
34 |
-
"""Sample from the Gumbel distribution, protect from overflows."""
|
35 |
-
uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
|
36 |
-
return -torch.log(-torch.log(uniform_samples))
|
37 |
-
|
38 |
-
|
39 |
-
def rand_gumbel_like(x):
|
40 |
-
g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
|
41 |
-
return g
|
42 |
-
|
43 |
-
|
44 |
-
def slice_segments(x, ids_str, segment_size=4):
|
45 |
-
ret = torch.zeros_like(x[:, :, :segment_size])
|
46 |
-
for i in range(x.size(0)):
|
47 |
-
idx_str = ids_str[i]
|
48 |
-
idx_end = idx_str + segment_size
|
49 |
-
ret[i] = x[i, :, idx_str:idx_end]
|
50 |
-
return ret
|
51 |
-
|
52 |
-
|
53 |
-
def slice_segments2(x, ids_str, segment_size=4):
|
54 |
-
ret = torch.zeros_like(x[:, :segment_size])
|
55 |
-
for i in range(x.size(0)):
|
56 |
-
idx_str = ids_str[i]
|
57 |
-
idx_end = idx_str + segment_size
|
58 |
-
ret[i] = x[i, idx_str:idx_end]
|
59 |
-
return ret
|
60 |
-
|
61 |
-
|
62 |
-
def rand_slice_segments(x, x_lengths=None, segment_size=4):
|
63 |
-
b, d, t = x.size()
|
64 |
-
if x_lengths is None:
|
65 |
-
x_lengths = t
|
66 |
-
ids_str_max = x_lengths - segment_size + 1
|
67 |
-
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
|
68 |
-
ret = slice_segments(x, ids_str, segment_size)
|
69 |
-
return ret, ids_str
|
70 |
-
|
71 |
-
|
72 |
-
def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
|
73 |
-
position = torch.arange(length, dtype=torch.float)
|
74 |
-
num_timescales = channels // 2
|
75 |
-
log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
|
76 |
-
num_timescales - 1
|
77 |
-
)
|
78 |
-
inv_timescales = min_timescale * torch.exp(
|
79 |
-
torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
|
80 |
-
)
|
81 |
-
scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
|
82 |
-
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
|
83 |
-
signal = F.pad(signal, [0, 0, 0, channels % 2])
|
84 |
-
signal = signal.view(1, channels, length)
|
85 |
-
return signal
|
86 |
-
|
87 |
-
|
88 |
-
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
|
89 |
-
b, channels, length = x.size()
|
90 |
-
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
91 |
-
return x + signal.to(dtype=x.dtype, device=x.device)
|
92 |
-
|
93 |
-
|
94 |
-
def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
|
95 |
-
b, channels, length = x.size()
|
96 |
-
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
97 |
-
return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
|
98 |
-
|
99 |
-
|
100 |
-
def subsequent_mask(length):
|
101 |
-
mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
|
102 |
-
return mask
|
103 |
-
|
104 |
-
|
105 |
-
@torch.jit.script
|
106 |
-
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
|
107 |
-
n_channels_int = n_channels[0]
|
108 |
-
in_act = input_a + input_b
|
109 |
-
t_act = torch.tanh(in_act[:, :n_channels_int, :])
|
110 |
-
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
|
111 |
-
acts = t_act * s_act
|
112 |
-
return acts
|
113 |
-
|
114 |
-
|
115 |
-
def convert_pad_shape(pad_shape):
|
116 |
-
l = pad_shape[::-1]
|
117 |
-
pad_shape = [item for sublist in l for item in sublist]
|
118 |
-
return pad_shape
|
119 |
-
|
120 |
-
|
121 |
-
def shift_1d(x):
|
122 |
-
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
|
123 |
-
return x
|
124 |
-
|
125 |
-
|
126 |
-
def sequence_mask(length, max_length=None):
|
127 |
-
if max_length is None:
|
128 |
-
max_length = length.max()
|
129 |
-
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
|
130 |
-
return x.unsqueeze(0) < length.unsqueeze(1)
|
131 |
-
|
132 |
-
|
133 |
-
def generate_path(duration, mask):
|
134 |
-
"""
|
135 |
-
duration: [b, 1, t_x]
|
136 |
-
mask: [b, 1, t_y, t_x]
|
137 |
-
"""
|
138 |
-
device = duration.device
|
139 |
-
|
140 |
-
b, _, t_y, t_x = mask.shape
|
141 |
-
cum_duration = torch.cumsum(duration, -1)
|
142 |
-
|
143 |
-
cum_duration_flat = cum_duration.view(b * t_x)
|
144 |
-
path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
|
145 |
-
path = path.view(b, t_x, t_y)
|
146 |
-
path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
|
147 |
-
path = path.unsqueeze(1).transpose(2, 3) * mask
|
148 |
-
return path
|
149 |
-
|
150 |
-
|
151 |
-
def clip_grad_value_(parameters, clip_value, norm_type=2):
|
152 |
-
if isinstance(parameters, torch.Tensor):
|
153 |
-
parameters = [parameters]
|
154 |
-
parameters = list(filter(lambda p: p.grad is not None, parameters))
|
155 |
-
norm_type = float(norm_type)
|
156 |
-
if clip_value is not None:
|
157 |
-
clip_value = float(clip_value)
|
158 |
-
|
159 |
-
total_norm = 0
|
160 |
-
for p in parameters:
|
161 |
-
param_norm = p.grad.data.norm(norm_type)
|
162 |
-
total_norm += param_norm.item() ** norm_type
|
163 |
-
if clip_value is not None:
|
164 |
-
p.grad.data.clamp_(min=-clip_value, max=clip_value)
|
165 |
-
total_norm = total_norm ** (1.0 / norm_type)
|
166 |
-
return total_norm
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Amanda El Aventurero Juego Completo Descargar Gratis Pc.md
DELETED
@@ -1,60 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Amanda el aventurero: Cómo descargar el juego completo gratis en PC</h1>
|
3 |
-
<p>Si estás buscando un emocionante y divertido juego de terror que te mantenga enganchado y entretenido, entonces definitivamente deberías echar un vistazo a Amanda the Adventurer. Este juego es una obra maestra de rompecabezas espeluznantes estilo sala de escape, animaciones espeluznantes y narración interactiva. En este artículo, te contaremos todo lo que necesitas saber sobre Amanda la aventurera, y cómo puedes descargar el juego completo gratis en tu PC.</p>
|
4 |
-
<h2>¿Qué es Amanda la aventurera? </h2>
|
5 |
-
<p>Amanda the Adventurer es un juego de terror desarrollado por MANGLEDmaw Games y publicado por DreadXP. Fue lanzado el 25 de abril de 2023, en Steam. El juego está inspirado en los clásicos dibujos animados CGI de los 90, pero con un toque oscuro y retorcido. </p>
|
6 |
-
<h2>amanda el aventurero juego completo descargar gratis pc</h2><br /><p><b><b>Download Zip</b> 🔗 <a href="https://bltlly.com/2v6M7w">https://bltlly.com/2v6M7w</a></b></p><br /><br />
|
7 |
-
<h3>Una breve introducción al juego y su género</h3>
|
8 |
-
<p>El juego sigue a Riley Park, quien hereda la casa de su tía Kate y encuentra una colección de cintas de VHS en el ático. Las cintas parecen ser episodios de una caricatura infantil de principios de los 2000 llamada Amanda the Adventurer, protagonizada por una niña llamada Amanda y su mejor amiga Wooly the Sheep. Riley decide ver las cintas, pero pronto se da cuenta de que algo está muy mal. Amanda y Wooly parecen estar comunicándose directamente con Riley a través de la televisión, y tienen algunos planes siniestros para ellos. </p>
|
9 |
-
<h3>Las principales características y jugabilidad de Amanda la aventurera</h3>
|
10 |
-
<p>El juego es una experiencia corta pero intensa de terror para un solo jugador que combina cintas animadas con rompecabezas estilo sala de escape. El jugador tiene que ver las cintas y seguir las instrucciones de Amanda, mientras busca pistas y resolver acertijos ocultos en las cintas. El juego tiene múltiples finales dependiendo de las opciones y acciones del jugador. </p>
|
11 |
-
<h3>La historia y los personajes de Amanda la aventurera</h3>
|
12 |
-
|
13 |
-
<h2>¿Por qué deberías jugar a Amanda la aventurera? </h2>
|
14 |
-
<p>Amanda the Adventurer no es solo otro juego de terror. Es un juego único y original que ofrece muchos beneficios para los jugadores que aman este género. </p>
|
15 |
-
<h3>Los beneficios de jugar un juego de terror en estilo sala de escape</h3>
|
16 |
-
<p>Jugar un juego de terror al estilo sala de escape puede mejorar tus habilidades cognitivas, como la memoria, la atención, la resolución de problemas, la creatividad y la lógica. También puede mejorar sus habilidades emocionales, como el manejo del estrés, la resiliencia, la empatía y el coraje. Además, puede proporcionarle un sentido de logro, satisfacción y diversión. </p>
|
17 |
-
<h3>Los desafíos y rompecabezas que ponen a prueba tus habilidades y lógica</h3>
|
18 |
-
<p>El juego tiene muchos desafíos y puzzles que pondrán a prueba tus habilidades y lógica. Tendrás que prestar atención a cada detalle de las cintas, encontrar objetos ocultos, descifrar códigos, descifrar símbolos, manipular objetos y más. También tendrás que lidiar con las demandas, amenazas, trucos y sorpresas de Amanda. El juego no es fácil, pero es gratificante. </p>
|
19 |
-
<h3>La experiencia inmersiva e interactiva que te mantiene al límite</h3>
|
20 |
-
<p>El juego tiene una experiencia inmersiva e interactiva que te mantiene al límite. Sentirás que eres parte de la historia, ya que Amanda y Wooly te hablan y reaccionan a tus acciones. También escuchará y verá sonidos realistas y gráficos que crean una atmósfera espeluznante. El juego te hará sentir asustado, curioso, divertido y sorprendido. </p>
|
21 |
-
<h2>¿Cómo descargar Amanda the Adventurer gratis en PC? </h2>
|
22 |
-
<p>Si estás interesado en jugar a Amanda the Adventurer, te estarás preguntando cómo descargar el juego completo gratis en tu PC. Bueno, tenemos buenas noticias para ti. Hay una forma legal y segura de obtener el juego de Steam, sin pagar nada. </p>
|
23 |
-
<h3>La forma legal y segura de obtener el juego de Steam</h3>
|
24 |
-
|
25 |
-
<h3>Los pasos para instalar y ejecutar el juego en su ordenador</h3>
|
26 |
-
<p>Una vez que tengas una llave de Steam para Amanda la Aventurera, puedes seguir estos pasos para instalar y ejecutar el juego en tu ordenador:</p>
|
27 |
-
<p></p>
|
28 |
-
<ol>
|
29 |
-
<li>Descargue e instale Steam en su PC desde <a href="">aquí</a>. </li>
|
30 |
-
<li>Crea una cuenta de Steam gratuita o inicia sesión con la existente. </li>
|
31 |
-
<li>Haga clic en el menú "Juegos" y seleccione "Activar un producto en Steam". </li>
|
32 |
-
<li>Introduzca su clave de vapor y siga las instrucciones. </li>
|
33 |
-
<li>Una vez que el juego se agrega a su biblioteca, haga clic en él y seleccione "Instalar". </li>
|
34 |
-
<li>Espere a que la instalación termine y luego haga clic en "Reproducir". </li>
|
35 |
-
</ol>
|
36 |
-
<h3>Los consejos y trucos para optimizar su rendimiento y configuración</h3>
|
37 |
-
<p>Para disfrutar del juego al máximo, es posible que desee optimizar su rendimiento y configuración. Estos son algunos consejos y trucos que pueden ayudarle:</p>
|
38 |
-
<ul>
|
39 |
-
<li>Asegúrese de que su PC cumple con los requisitos mínimos del sistema para el juego. Puede comprobarlos <a href=">here</a>. </li>
|
40 |
-
<li>Actualice sus controladores, especialmente su controlador de tarjeta gráfica. </li>
|
41 |
-
<li>Cierra cualquier programa innecesario o proceso en segundo plano que pueda ralentizar tu PC.</li>
|
42 |
-
<li>Ajustar la configuración de gráficos en el juego de acuerdo a sus preferencias y capacidades de hardware. Puede reducir la resolución, la calidad de la textura, el anti-aliasing o las sombras si experimenta retraso o tartamudeo. </li>
|
43 |
-
<li>Utilice auriculares o altavoces para disfrutar de los efectos de sonido y la actuación de voz del juego. </li>
|
44 |
-
</ul>
|
45 |
-
<h2>Conclusión</h2>
|
46 |
-
|
47 |
-
<h2>Preguntas frecuentes</h2>
|
48 |
-
<p>Aquí hay algunas preguntas frecuentes sobre Amanda la aventurera:</p>
|
49 |
-
<h4>Q: ¿Cuánto tiempo es Amanda la aventurera? </h4>
|
50 |
-
<p>A: El juego es relativamente corto, ya que se puede completar en aproximadamente una hora o menos. Sin embargo, tiene múltiples finales que dependen de tus opciones y acciones, por lo que es posible que quieras volver a reproducirlo para verlos todos. </p>
|
51 |
-
<h4>Q: ¿Es Amanda la aventurera conveniente para los niños? </h4>
|
52 |
-
<p>A: No, Amanda la aventurera no es adecuado para los niños. El juego tiene temas maduros, como violencia, muerte, suicidio, abuso y enfermedad mental. El juego también tiene sustos de salto, imágenes perturbadoras y sonidos aterradores que pueden asustar o traumatizar a los jugadores jóvenes. El juego está clasificado M para Maduro por ESRB.</p>
|
53 |
-
<h4>Q: ¿Puedo jugar Amanda el aventurero fuera de línea? </h4>
|
54 |
-
<p>A: Sí, puedes jugar sin conexión a Amanda the Adventurer una vez que lo hayas descargado e instalado en tu PC. Sin embargo, necesitarás una conexión a Internet para activar tu clave de Steam y actualizar tu juego si es necesario. </p>
|
55 |
-
<h4>Q: ¿Puedo jugar Amanda el aventurero con un controlador? </h4>
|
56 |
-
<p>A: Sí, puedes jugar a Amanda la aventurera con un mando si lo prefieres. El juego es compatible con los mandos de Xbox One, PlayStation 4 y Nintendo Switch. También puedes personalizar la configuración del mando en las opciones del juego. </p>
|
57 |
-
<h4>P: ¿Dónde puedo encontrar más información sobre Amanda la aventurera? </h4>
|
58 |
-
<p>A: Puedes encontrar más información sobre Amanda la aventurera en el sitio web oficial del juego, <a href=">aquí</a>. También puedes seguir el juego en plataformas de redes sociales, como Twitter, Facebook, Instagram y YouTube. También puede unirse al servidor de discordia del juego, <a href="">here</a>, para chatear con otros jugadores y los desarrolladores. </p> 64aa2da5cf<br />
|
59 |
-
<br />
|
60 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Cmo Descargar NBA 2k21 En Android.md
DELETED
@@ -1,116 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Cómo descargar NBA 2K21 en Android</h1>
|
3 |
-
<p>Si usted es un fan del baloncesto y los videojuegos, es posible que se pregunte cómo descargar NBA 2K21 en Android. NBA 2K21 es la última entrega de la popular serie NBA 2K, que ofrece gráficos realistas, jugabilidad y características para los entusiastas del baloncesto. En este artículo, te mostraremos cómo descargar NBA 2K21 en Android, así como algunos consejos y trucos para disfrutar del juego. </p>
|
4 |
-
<h2>Cómo descargar NBA 2k21 en Android</h2><br /><p><b><b>Download File</b> ✓ <a href="https://bltlly.com/2v6JxX">https://bltlly.com/2v6JxX</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es NBA 2K21? </h2>
|
6 |
-
<p>NBA 2K21 es un juego de simulación de baloncesto desarrollado por Visual Concepts y publicado por 2K Sports. Es la 22ª edición de la franquicia NBA 2K, que se basa en la Asociación Nacional de Baloncesto (NBA). NBA 2K21 fue lanzado el 4 de septiembre de 2020 para Microsoft Windows, PlayStation 4, Xbox One, Nintendo Switch y Stadia, y el 10 de noviembre de 2020 para PlayStation 5 y Xbox Series X/S. También está disponible para dispositivos móviles, incluidos Android e iOS. </p>
|
7 |
-
<h3>Características de NBA 2K21</h3>
|
8 |
-
<p>NBA 2K21 ofrece una variedad de características que lo convierten en uno de los mejores juegos de baloncesto del mercado. Algunas de estas características son:</p>
|
9 |
-
<ul>
|
10 |
-
<li><b>Gráficos y animaciones realistas:</b> NBA 2K21 utiliza tecnología avanzada para crear gráficos y animaciones realistas para los jugadores, canchas, estadios y multitudes. Puedes ver el sudor, expresiones faciales, tatuajes, peinados y accesorios de tus estrellas favoritas de la NBA. </li>
|
11 |
-
<li><b>Juego y modos inmersivos:</b> NBA 2K21 te permite experimentar la emoción de jugar al baloncesto en diferentes modos, como MyCareer, MyTeam, MyLeague, MyGM, The Neighborhood, The Park, Blacktop y más. Puedes crear tu propio jugador, personalizar tu equipo, administrar tu franquicia, competir con otros jugadores en línea y explorar varias ubicaciones. </li>
|
12 |
-
|
13 |
-
</ul>
|
14 |
-
<h3>Requisitos para NBA 2K21</h3>
|
15 |
-
<p>Para descargar NBA 2K21 en Android, necesitas tener un dispositivo compatible que cumpla con los siguientes requisitos:</p>
|
16 |
-
<ul>
|
17 |
-
<li><b>Sistema operativo:</b> Android 8.0 o superior</li>
|
18 |
-
<li><b>Memoria:</b> Al menos 4 GB de RAM</li>
|
19 |
-
<li><b>Almacenamiento:</b> Al menos 4 GB de espacio libre</li>
|
20 |
-
<li><b>Conexión a Internet:</b> Wi-Fi o datos celulares</li>
|
21 |
-
</ul>
|
22 |
-
<h2>Cómo descargar NBA 2K21 en Android</h2>
|
23 |
-
<p>Ahora que sabes lo que es NBA 2K21 y lo que ofrece, vamos a ver cómo descargarlo en Android. Estos son los pasos que debes seguir:</p>
|
24 |
-
<h3>Paso 1: Compruebe la compatibilidad de su dispositivo</h3>
|
25 |
-
<p>Lo primero que tienes que hacer es comprobar si tu dispositivo es compatible con NBA 2K21. Puedes hacer esto visitando la página de la aplicación móvil NBA 2K en Google Play Store y revisando la sección de compatibilidad. Si su dispositivo es compatible, verá una marca de verificación verde al lado. Si su dispositivo no es compatible, verá una marca de cruz roja al lado. Alternativamente, también puede usar la aplicación Device Compatibility Checker para escanear su dispositivo y ver si cumple con los requisitos para NBA 2K21. </p>
|
26 |
-
<p></p>
|
27 |
-
<h3>Paso 2: Descargar la aplicación móvil NBA 2K de Google Play Store</h3>
|
28 |
-
<p>Lo siguiente que tienes que hacer es descargar la aplicación NBA 2K Mobile de Google Play Store. Esta aplicación es la versión móvil oficial de NBA 2K21, que le permite jugar el juego en su dispositivo Android. Para descargar la aplicación, siga estos pasos:</p>
|
29 |
-
<ol>
|
30 |
-
<li>Abra la aplicación Google Play Store en su dispositivo y busque NBA 2K Mobile.</li>
|
31 |
-
<li>Seleccione la aplicación NBA 2K Mobile de los resultados de búsqueda y toque en Instalar.</li>
|
32 |
-
<li>Espere a que la aplicación se descargue e instale en su dispositivo. Esto puede tardar algún tiempo dependiendo de la velocidad de Internet y el almacenamiento del dispositivo. </li>
|
33 |
-
<li>Una vez instalada la aplicación, toque en Abrir para iniciarla. </li>
|
34 |
-
</ol>
|
35 |
-
<h3>Paso 3: Inicie la aplicación e inicie sesión con su cuenta de 2K</h3>
|
36 |
-
|
37 |
-
<ol>
|
38 |
-
<li>Toque en Iniciar sesión en la pantalla principal de la aplicación. </li>
|
39 |
-
<li>Introduzca su dirección de correo electrónico y contraseña asociada con su cuenta de 2K y toque en Iniciar sesión.</li>
|
40 |
-
<li>Si no tiene una cuenta de 2K, toque en Crear cuenta y complete los detalles necesarios. </li>
|
41 |
-
<li>Una vez que haya iniciado sesión, verá un mensaje de bienvenida y un tutorial sobre cómo jugar el juego. </li>
|
42 |
-
</ol>
|
43 |
-
<h3>Paso 4: Elige tu equipo favorito de la NBA y comienza a jugar</h3>
|
44 |
-
<p>El paso final es elegir tu equipo favorito de la NBA y empezar a jugar el juego. Puedes elegir entre cualquiera de los 30 equipos de la NBA, como Los Angeles Lakers, Brooklyn Nets, Golden State Warriors, Milwaukee Bucks y más. Para elegir tu equipo y empezar a jugar, sigue estos pasos:</p>
|
45 |
-
<ol>
|
46 |
-
<li>Toque en Reproducir ahora en la pantalla principal de la aplicación. </li>
|
47 |
-
<li>Seleccione su equipo favorito de la NBA de la lista de equipos. También puede tocar en Random para que la aplicación elija un equipo para usted. </li>
|
48 |
-
<li>Espera a que el juego se cargue y comience a jugar. Puedes usar los botones virtuales de la pantalla para controlar a tu jugador y equipo. </li>
|
49 |
-
<li> También puede acceder a otros modos y características del juego pulsando en el botón Menú en la esquina superior izquierda de la pantalla. </li>
|
50 |
-
</ol>
|
51 |
-
<h2>Consejos y trucos para NBA 2K21</h2>
|
52 |
-
<p>NBA 2K21 es un juego divertido y desafiante que requiere habilidad, estrategia y práctica. Para ayudarte a mejorar tu juego y disfrutarlo más, aquí hay algunos consejos y trucos que puedes usar:</p>
|
53 |
-
<h3>Cómo mejorar tus habilidades y rendimiento</h3>
|
54 |
-
<p>Para mejorar tus habilidades y rendimiento en NBA 2K21, necesitas dominar lo básico del baloncesto, como disparar, pasar, driblar, defender, rebotar y más. Puedes hacer esto jugando el modo tutorial, practicando en diferentes modos, viendo videos y guías en línea y aprendiendo de otros jugadores. Estos son algunos consejos específicos que puedes usar:</p>
|
55 |
-
<ul>
|
56 |
-
|
57 |
-
<li><b>Usa el stick derecho:</b> El stick derecho es una herramienta versátil que te permite realizar diferentes movimientos dependiendo de cómo lo muevas. Puedes usarlo para driblar, disparar, pasar, robar, bloquear, publicar y más. También puedes usar diferentes combinaciones de botones y palos para realizar movimientos avanzados, como movimientos de giro, movimientos de cruce, pasos en euros, callejones y más. </li>
|
58 |
-
<li><b>Usa los iconos:</b> Los iconos son símbolos que aparecen sobre las cabezas de tus compañeros de equipo y que te muestran sus fortalezas y debilidades. Puedes usarlos para tomar decisiones inteligentes sobre a quién pasar, con quién disparar, a quién defender y a quién evitar. Por ejemplo, un icono verde significa que el jugador es bueno para disparar, un icono rojo significa que el jugador es malo para disparar, un icono azul significa que el jugador es bueno para defender, y un icono amarillo significa que el jugador es malo para defender. </li>
|
59 |
-
<li><b>Usa los ajustes:</b> Los ajustes son opciones que te permiten personalizar tu experiencia de juego según tus preferencias y necesidades. Puedes usarlos para ajustar el nivel de dificultad, el ángulo de la cámara, los controles, el sonido, los gráficos y más. También puede usarlos para activar o desactivar ciertas funciones, como la reproducción automática, la vibración, las notificaciones y más. </li>
|
60 |
-
</ul>
|
61 |
-
<h3>Cómo personalizar tu jugador y equipo</h3>
|
62 |
-
<p>Para personalizar a tu jugador y equipo en NBA 2K21, necesitas usar las diversas opciones y modos que te permiten crear y editar tu propio personaje, equipo, logotipo, camiseta, corte y más. Puede hacer esto accediendo a los siguientes modos y opciones:</p>
|
63 |
-
<ul>
|
64 |
-
<li><b>MyCareer:</b> MyCareer es un modo que le permite crear su propio jugador y seguir su viaje de un novato a una superestrella. Puedes personalizar la apariencia, atributos, habilidades, insignias, animaciones, equipos y más de tu jugador. También puedes interactuar con otros personajes, tomar decisiones que afecten tu historia y jugar en diferentes ligas y eventos. </li>
|
65 |
-
|
66 |
-
<li><b>MyLeague/MyGM:</b> MyLeague/MyGM son modos que le permiten administrar su propia franquicia y tomar decisiones que afectan su éxito. Puede personalizar el nombre de su franquicia, ubicación, logotipo, camiseta, corte, lista, personal, presupuesto, selecciones de proyectos, oficios, contratos, lesiones y más. También puede simular o jugar juegos en diferentes estaciones y playoffs. </li>
|
67 |
-
<li><b>The Neighborhood/The Park:</b> The Neighborhood/The Park son modos que te permiten explorar un entorno de mundo abierto e interactuar con otros jugadores en línea. Puedes personalizar la apariencia de tu personaje, ropa, accesorios, tatuajes, peinados y más. También puedes jugar varios juegos y actividades, como baloncesto, dodgeball, trivia, carreras y más. </li>
|
68 |
-
</ul>
|
69 |
-
<h3>Cómo ganar recompensas y monedas</h3>
|
70 |
-
<p>Para ganar recompensas y monedas en NBA 2K21, necesitas jugar el juego y completar varias tareas y desafíos. Las recompensas y las monedas son útiles para desbloquear y actualizar diferentes elementos y características en el juego. Puedes ganar recompensas y monedas haciendo las siguientes cosas:</p>
|
71 |
-
<ul>
|
72 |
-
<li><b>Jugar juegos:</b> Jugar juegos es la forma más básica de ganar recompensas y monedas en NBA 2K21. Puedes jugar en diferentes modos y dificultades, y ganar recompensas y monedas en función de tu rendimiento, como puntos, asistencias, rebotes, robos, bloques y más. </li>
|
73 |
-
<li><b>Completar tareas y desafíos:</b> Completar tareas y desafíos es otra forma de ganar recompensas y monedas en NBA 2K21. Puede completar tareas y desafíos en diferentes modos y categorías, como diario, semanal, mensual, estacional, carrera, equipo y más. Puedes ganar recompensas y monedas según la dificultad y los requisitos de las tareas y desafíos. </li>
|
74 |
-
|
75 |
-
<li><b>Comprar con dinero real:</b> Comprar con dinero real es la forma más rápida y conveniente de ganar recompensas y monedas en NBA 2K21. Puede comprar con dinero real de diferentes maneras y cantidades, como usar su tarjeta de crédito, cuenta PayPal, cuenta de Google Play o tarjeta de regalo. Puedes ganar recompensas y monedas según el precio y el valor de la compra. </li>
|
76 |
-
</ul>
|
77 |
-
<h3>Cómo unirse a torneos y eventos</h3>
|
78 |
-
<p>Para unirse a torneos y eventos en NBA 2K21, necesita estar en línea y tener una conexión a Internet estable. Los torneos y eventos son competiciones especiales que te permiten jugar contra otros jugadores en línea y ganar recompensas y premios exclusivos. Puedes unirte a torneos y eventos haciendo las siguientes cosas:</p>
|
79 |
-
<ul>
|
80 |
-
<li><b>Comprobar el calendario:</b> Comprobar el horario es lo primero que debes hacer para unirte a torneos y eventos en NBA 2K21. Puede comprobar el horario pulsando en el botón Eventos en la pantalla principal de la aplicación. Verás una lista de torneos y eventos próximos y en curso, así como sus fechas, horarios, reglas, recompensas y más. </li>
|
81 |
-
<li><b>Registrarse para el torneo o evento:</b> Registrarse para el torneo o evento es la segunda cosa que debe hacer para unirse a torneos y eventos en NBA 2K21. Puedes registrarte para el torneo o evento tocando el botón Unirse en la página del evento. Verás un mensaje de confirmación y un temporizador de cuenta atrás hasta el inicio del torneo o evento. </li>
|
82 |
-
<li><b>Jugar en el torneo o evento:</b> Jugar en el torneo o evento es la tercera cosa que necesitas hacer para unirte a torneos y eventos en NBA 2K21. Puedes jugar en el torneo o evento tocando el botón Reproducir en la página del evento. Usted será emparejado con otros jugadores en línea y jugar de acuerdo con las reglas y el formato del torneo o evento. </li>
|
83 |
-
|
84 |
-
</ul>
|
85 |
-
<h2>Conclusión</h2>
|
86 |
-
<p>NBA 2K21 es un increíble juego de baloncesto que puedes descargar y jugar en tu dispositivo Android. Ofrece gráficos realistas, jugabilidad y características que te harán sentir como un verdadero jugador de la NBA. Para descargar NBA 2K21 en Android, debes seguir estos pasos:</p>
|
87 |
-
<ol>
|
88 |
-
<li>Compruebe la compatibilidad de su dispositivo</li>
|
89 |
-
<li>Descargar la aplicación móvil NBA 2K de Google Play Store</li>
|
90 |
-
<li>Inicie la aplicación e inicie sesión con su cuenta de 2K</li>
|
91 |
-
<li>Elige tu equipo favorito de la NBA y comienza a jugar</li>
|
92 |
-
</ol>
|
93 |
-
<p>También puedes usar estos consejos y trucos para mejorar tu juego y disfrutarlo más:</p>
|
94 |
-
<ul>
|
95 |
-
<li>Usa el medidor de disparos, el stick derecho, los iconos y la configuración para mejorar tus habilidades y rendimiento</li>
|
96 |
-
<li>Usa MyCareer, MyTeam, MyLeague/MyGM, El Vecindario/El Parque para personalizar a tu jugador y equipo</li>
|
97 |
-
<li>Jugar juegos, completar tareas y desafíos, ver anuncios y videos, comprar con dinero real para ganar recompensas y monedas</li>
|
98 |
-
<li>Revisa el calendario, regístrate para el torneo o evento, juega en el torneo o evento, reclama tus recompensas y premios para unirte a torneos y eventos</li>
|
99 |
-
</ul>
|
100 |
-
<p>Esperamos que este artículo te haya ayudado a aprender a descargar NBA 2K21 en Android. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. ¡Gracias por leer! </p>
|
101 |
-
<h3>Preguntas frecuentes</h3>
|
102 |
-
<p>Aquí hay algunas preguntas frecuentes sobre NBA 2K21 en Android:</p>
|
103 |
-
<ol>
|
104 |
-
<li><b>Q: ¿Cuánto cuesta NBA 2K21 en Android? </b></li>
|
105 |
-
<li>A: NBA 2K21 es gratis para descargar y jugar en Android. Sin embargo, también ofrece compras en la aplicación que le permiten comprar monedas, tarjetas, paquetes, paquetes y más. </li>
|
106 |
-
<li><b>Q: ¿Cómo puedo actualizar NBA 2K21 en Android? </b></li>
|
107 |
-
<li>A: NBA 2K21 se actualiza automáticamente al iniciar la aplicación si hay una nueva versión disponible. También puedes buscar actualizaciones manualmente yendo a la aplicación de Google Play Store y pulsando en Mis aplicaciones y juegos.</li>
|
108 |
-
<li><b>Q: ¿Cómo puedo contactar al soporte de NBA 2K21 en Android? </b></li>
|
109 |
-
|
110 |
-
<li><b>Q: ¿Cómo puedo jugar NBA 2K21 sin conexión en Android? </b></li>
|
111 |
-
<li>A: NBA 2K21 requiere una conexión a Internet para jugar en Android. Sin embargo, puedes jugar algunos modos sin conexión, como MyCareer, MyTeam, MyLeague/MyGM y Blacktop. Para jugar estos modos sin conexión, es necesario descargar los datos del juego y cambiar al modo sin conexión en la configuración. </li>
|
112 |
-
<li><b>Q: ¿Cómo puedo jugar NBA 2K21 con amigos en Android? </b></li>
|
113 |
-
<li>A: NBA 2K21 te permite jugar con amigos en Android de diferentes maneras, como invitarlos a unirse a tu equipo, retarlos a un partido, unirse a su lobby o chatear con ellos. Para jugar con amigos en Android, necesitas tener una cuenta de 2K y agregarlos como amigos en la aplicación. </li>
|
114 |
-
</ol></p> 64aa2da5cf<br />
|
115 |
-
<br />
|
116 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/stub.py
DELETED
@@ -1,429 +0,0 @@
|
|
1 |
-
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
4 |
-
# may not use this file except in compliance with the License. A copy of
|
5 |
-
# the License is located at
|
6 |
-
#
|
7 |
-
# http://aws.amazon.com/apache2.0/
|
8 |
-
#
|
9 |
-
# or in the "license" file accompanying this file. This file is
|
10 |
-
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
11 |
-
# ANY KIND, either express or implied. See the License for the specific
|
12 |
-
# language governing permissions and limitations under the License.
|
13 |
-
import copy
|
14 |
-
from collections import deque
|
15 |
-
from pprint import pformat
|
16 |
-
|
17 |
-
from botocore.awsrequest import AWSResponse
|
18 |
-
from botocore.exceptions import (
|
19 |
-
ParamValidationError,
|
20 |
-
StubAssertionError,
|
21 |
-
StubResponseError,
|
22 |
-
UnStubbedResponseError,
|
23 |
-
)
|
24 |
-
from botocore.validate import validate_parameters
|
25 |
-
|
26 |
-
|
27 |
-
class _ANY:
|
28 |
-
"""
|
29 |
-
A helper object that compares equal to everything. Copied from
|
30 |
-
unittest.mock
|
31 |
-
"""
|
32 |
-
|
33 |
-
def __eq__(self, other):
|
34 |
-
return True
|
35 |
-
|
36 |
-
def __ne__(self, other):
|
37 |
-
return False
|
38 |
-
|
39 |
-
def __repr__(self):
|
40 |
-
return '<ANY>'
|
41 |
-
|
42 |
-
|
43 |
-
ANY = _ANY()
|
44 |
-
|
45 |
-
|
46 |
-
class Stubber:
|
47 |
-
"""
|
48 |
-
This class will allow you to stub out requests so you don't have to hit
|
49 |
-
an endpoint to write tests. Responses are returned first in, first out.
|
50 |
-
If operations are called out of order, or are called with no remaining
|
51 |
-
queued responses, an error will be raised.
|
52 |
-
|
53 |
-
**Example:**
|
54 |
-
::
|
55 |
-
import datetime
|
56 |
-
import botocore.session
|
57 |
-
from botocore.stub import Stubber
|
58 |
-
|
59 |
-
|
60 |
-
s3 = botocore.session.get_session().create_client('s3')
|
61 |
-
stubber = Stubber(s3)
|
62 |
-
|
63 |
-
response = {
|
64 |
-
'IsTruncated': False,
|
65 |
-
'Name': 'test-bucket',
|
66 |
-
'MaxKeys': 1000, 'Prefix': '',
|
67 |
-
'Contents': [{
|
68 |
-
'Key': 'test.txt',
|
69 |
-
'ETag': '"abc123"',
|
70 |
-
'StorageClass': 'STANDARD',
|
71 |
-
'LastModified': datetime.datetime(2016, 1, 20, 22, 9),
|
72 |
-
'Owner': {'ID': 'abc123', 'DisplayName': 'myname'},
|
73 |
-
'Size': 14814
|
74 |
-
}],
|
75 |
-
'EncodingType': 'url',
|
76 |
-
'ResponseMetadata': {
|
77 |
-
'RequestId': 'abc123',
|
78 |
-
'HTTPStatusCode': 200,
|
79 |
-
'HostId': 'abc123'
|
80 |
-
},
|
81 |
-
'Marker': ''
|
82 |
-
}
|
83 |
-
|
84 |
-
expected_params = {'Bucket': 'test-bucket'}
|
85 |
-
|
86 |
-
stubber.add_response('list_objects', response, expected_params)
|
87 |
-
stubber.activate()
|
88 |
-
|
89 |
-
service_response = s3.list_objects(Bucket='test-bucket')
|
90 |
-
assert service_response == response
|
91 |
-
|
92 |
-
|
93 |
-
This class can also be called as a context manager, which will handle
|
94 |
-
activation / deactivation for you.
|
95 |
-
|
96 |
-
**Example:**
|
97 |
-
::
|
98 |
-
import datetime
|
99 |
-
import botocore.session
|
100 |
-
from botocore.stub import Stubber
|
101 |
-
|
102 |
-
|
103 |
-
s3 = botocore.session.get_session().create_client('s3')
|
104 |
-
|
105 |
-
response = {
|
106 |
-
"Owner": {
|
107 |
-
"ID": "foo",
|
108 |
-
"DisplayName": "bar"
|
109 |
-
},
|
110 |
-
"Buckets": [{
|
111 |
-
"CreationDate": datetime.datetime(2016, 1, 20, 22, 9),
|
112 |
-
"Name": "baz"
|
113 |
-
}]
|
114 |
-
}
|
115 |
-
|
116 |
-
|
117 |
-
with Stubber(s3) as stubber:
|
118 |
-
stubber.add_response('list_buckets', response, {})
|
119 |
-
service_response = s3.list_buckets()
|
120 |
-
|
121 |
-
assert service_response == response
|
122 |
-
|
123 |
-
|
124 |
-
If you have an input parameter that is a randomly generated value, or you
|
125 |
-
otherwise don't care about its value, you can use ``stub.ANY`` to ignore
|
126 |
-
it in validation.
|
127 |
-
|
128 |
-
**Example:**
|
129 |
-
::
|
130 |
-
import datetime
|
131 |
-
import botocore.session
|
132 |
-
from botocore.stub import Stubber, ANY
|
133 |
-
|
134 |
-
|
135 |
-
s3 = botocore.session.get_session().create_client('s3')
|
136 |
-
stubber = Stubber(s3)
|
137 |
-
|
138 |
-
response = {
|
139 |
-
'IsTruncated': False,
|
140 |
-
'Name': 'test-bucket',
|
141 |
-
'MaxKeys': 1000, 'Prefix': '',
|
142 |
-
'Contents': [{
|
143 |
-
'Key': 'test.txt',
|
144 |
-
'ETag': '"abc123"',
|
145 |
-
'StorageClass': 'STANDARD',
|
146 |
-
'LastModified': datetime.datetime(2016, 1, 20, 22, 9),
|
147 |
-
'Owner': {'ID': 'abc123', 'DisplayName': 'myname'},
|
148 |
-
'Size': 14814
|
149 |
-
}],
|
150 |
-
'EncodingType': 'url',
|
151 |
-
'ResponseMetadata': {
|
152 |
-
'RequestId': 'abc123',
|
153 |
-
'HTTPStatusCode': 200,
|
154 |
-
'HostId': 'abc123'
|
155 |
-
},
|
156 |
-
'Marker': ''
|
157 |
-
}
|
158 |
-
|
159 |
-
expected_params = {'Bucket': ANY}
|
160 |
-
stubber.add_response('list_objects', response, expected_params)
|
161 |
-
|
162 |
-
with stubber:
|
163 |
-
service_response = s3.list_objects(Bucket='test-bucket')
|
164 |
-
|
165 |
-
assert service_response == response
|
166 |
-
"""
|
167 |
-
|
168 |
-
def __init__(self, client):
|
169 |
-
"""
|
170 |
-
:param client: The client to add your stubs to.
|
171 |
-
"""
|
172 |
-
self.client = client
|
173 |
-
self._event_id = 'boto_stubber'
|
174 |
-
self._expected_params_event_id = 'boto_stubber_expected_params'
|
175 |
-
self._queue = deque()
|
176 |
-
|
177 |
-
def __enter__(self):
|
178 |
-
self.activate()
|
179 |
-
return self
|
180 |
-
|
181 |
-
def __exit__(self, exception_type, exception_value, traceback):
|
182 |
-
self.deactivate()
|
183 |
-
|
184 |
-
def activate(self):
|
185 |
-
"""
|
186 |
-
Activates the stubber on the client
|
187 |
-
"""
|
188 |
-
self.client.meta.events.register_first(
|
189 |
-
'before-parameter-build.*.*',
|
190 |
-
self._assert_expected_params,
|
191 |
-
unique_id=self._expected_params_event_id,
|
192 |
-
)
|
193 |
-
self.client.meta.events.register(
|
194 |
-
'before-call.*.*',
|
195 |
-
self._get_response_handler,
|
196 |
-
unique_id=self._event_id,
|
197 |
-
)
|
198 |
-
|
199 |
-
def deactivate(self):
|
200 |
-
"""
|
201 |
-
Deactivates the stubber on the client
|
202 |
-
"""
|
203 |
-
self.client.meta.events.unregister(
|
204 |
-
'before-parameter-build.*.*',
|
205 |
-
self._assert_expected_params,
|
206 |
-
unique_id=self._expected_params_event_id,
|
207 |
-
)
|
208 |
-
self.client.meta.events.unregister(
|
209 |
-
'before-call.*.*',
|
210 |
-
self._get_response_handler,
|
211 |
-
unique_id=self._event_id,
|
212 |
-
)
|
213 |
-
|
214 |
-
def add_response(self, method, service_response, expected_params=None):
|
215 |
-
"""
|
216 |
-
Adds a service response to the response queue. This will be validated
|
217 |
-
against the service model to ensure correctness. It should be noted,
|
218 |
-
however, that while missing attributes are often considered correct,
|
219 |
-
your code may not function properly if you leave them out. Therefore
|
220 |
-
you should always fill in every value you see in a typical response for
|
221 |
-
your particular request.
|
222 |
-
|
223 |
-
:param method: The name of the client method to stub.
|
224 |
-
:type method: str
|
225 |
-
|
226 |
-
:param service_response: A dict response stub. Provided parameters will
|
227 |
-
be validated against the service model.
|
228 |
-
:type service_response: dict
|
229 |
-
|
230 |
-
:param expected_params: A dictionary of the expected parameters to
|
231 |
-
be called for the provided service response. The parameters match
|
232 |
-
the names of keyword arguments passed to that client call. If
|
233 |
-
any of the parameters differ a ``StubResponseError`` is thrown.
|
234 |
-
You can use stub.ANY to indicate a particular parameter to ignore
|
235 |
-
in validation. stub.ANY is only valid for top level params.
|
236 |
-
"""
|
237 |
-
self._add_response(method, service_response, expected_params)
|
238 |
-
|
239 |
-
def _add_response(self, method, service_response, expected_params):
|
240 |
-
if not hasattr(self.client, method):
|
241 |
-
raise ValueError(
|
242 |
-
"Client %s does not have method: %s"
|
243 |
-
% (self.client.meta.service_model.service_name, method)
|
244 |
-
)
|
245 |
-
|
246 |
-
# Create a successful http response
|
247 |
-
http_response = AWSResponse(None, 200, {}, None)
|
248 |
-
|
249 |
-
operation_name = self.client.meta.method_to_api_mapping.get(method)
|
250 |
-
self._validate_operation_response(operation_name, service_response)
|
251 |
-
|
252 |
-
# Add the service_response to the queue for returning responses
|
253 |
-
response = {
|
254 |
-
'operation_name': operation_name,
|
255 |
-
'response': (http_response, service_response),
|
256 |
-
'expected_params': expected_params,
|
257 |
-
}
|
258 |
-
self._queue.append(response)
|
259 |
-
|
260 |
-
def add_client_error(
|
261 |
-
self,
|
262 |
-
method,
|
263 |
-
service_error_code='',
|
264 |
-
service_message='',
|
265 |
-
http_status_code=400,
|
266 |
-
service_error_meta=None,
|
267 |
-
expected_params=None,
|
268 |
-
response_meta=None,
|
269 |
-
modeled_fields=None,
|
270 |
-
):
|
271 |
-
"""
|
272 |
-
Adds a ``ClientError`` to the response queue.
|
273 |
-
|
274 |
-
:param method: The name of the service method to return the error on.
|
275 |
-
:type method: str
|
276 |
-
|
277 |
-
:param service_error_code: The service error code to return,
|
278 |
-
e.g. ``NoSuchBucket``
|
279 |
-
:type service_error_code: str
|
280 |
-
|
281 |
-
:param service_message: The service message to return, e.g.
|
282 |
-
'The specified bucket does not exist.'
|
283 |
-
:type service_message: str
|
284 |
-
|
285 |
-
:param http_status_code: The HTTP status code to return, e.g. 404, etc
|
286 |
-
:type http_status_code: int
|
287 |
-
|
288 |
-
:param service_error_meta: Additional keys to be added to the
|
289 |
-
service Error
|
290 |
-
:type service_error_meta: dict
|
291 |
-
|
292 |
-
:param expected_params: A dictionary of the expected parameters to
|
293 |
-
be called for the provided service response. The parameters match
|
294 |
-
the names of keyword arguments passed to that client call. If
|
295 |
-
any of the parameters differ a ``StubResponseError`` is thrown.
|
296 |
-
You can use stub.ANY to indicate a particular parameter to ignore
|
297 |
-
in validation.
|
298 |
-
|
299 |
-
:param response_meta: Additional keys to be added to the
|
300 |
-
response's ResponseMetadata
|
301 |
-
:type response_meta: dict
|
302 |
-
|
303 |
-
:param modeled_fields: Additional keys to be added to the response
|
304 |
-
based on fields that are modeled for the particular error code.
|
305 |
-
These keys will be validated against the particular error shape
|
306 |
-
designated by the error code.
|
307 |
-
:type modeled_fields: dict
|
308 |
-
|
309 |
-
"""
|
310 |
-
http_response = AWSResponse(None, http_status_code, {}, None)
|
311 |
-
|
312 |
-
# We don't look to the model to build this because the caller would
|
313 |
-
# need to know the details of what the HTTP body would need to
|
314 |
-
# look like.
|
315 |
-
parsed_response = {
|
316 |
-
'ResponseMetadata': {'HTTPStatusCode': http_status_code},
|
317 |
-
'Error': {'Message': service_message, 'Code': service_error_code},
|
318 |
-
}
|
319 |
-
|
320 |
-
if service_error_meta is not None:
|
321 |
-
parsed_response['Error'].update(service_error_meta)
|
322 |
-
|
323 |
-
if response_meta is not None:
|
324 |
-
parsed_response['ResponseMetadata'].update(response_meta)
|
325 |
-
|
326 |
-
if modeled_fields is not None:
|
327 |
-
service_model = self.client.meta.service_model
|
328 |
-
shape = service_model.shape_for_error_code(service_error_code)
|
329 |
-
self._validate_response(shape, modeled_fields)
|
330 |
-
parsed_response.update(modeled_fields)
|
331 |
-
|
332 |
-
operation_name = self.client.meta.method_to_api_mapping.get(method)
|
333 |
-
# Note that we do not allow for expected_params while
|
334 |
-
# adding errors into the queue yet.
|
335 |
-
response = {
|
336 |
-
'operation_name': operation_name,
|
337 |
-
'response': (http_response, parsed_response),
|
338 |
-
'expected_params': expected_params,
|
339 |
-
}
|
340 |
-
self._queue.append(response)
|
341 |
-
|
342 |
-
def assert_no_pending_responses(self):
|
343 |
-
"""
|
344 |
-
Asserts that all expected calls were made.
|
345 |
-
"""
|
346 |
-
remaining = len(self._queue)
|
347 |
-
if remaining != 0:
|
348 |
-
raise AssertionError(f"{remaining} responses remaining in queue.")
|
349 |
-
|
350 |
-
def _assert_expected_call_order(self, model, params):
|
351 |
-
if not self._queue:
|
352 |
-
raise UnStubbedResponseError(
|
353 |
-
operation_name=model.name,
|
354 |
-
reason=(
|
355 |
-
'Unexpected API Call: A call was made but no additional '
|
356 |
-
'calls expected. Either the API Call was not stubbed or '
|
357 |
-
'it was called multiple times.'
|
358 |
-
),
|
359 |
-
)
|
360 |
-
|
361 |
-
name = self._queue[0]['operation_name']
|
362 |
-
if name != model.name:
|
363 |
-
raise StubResponseError(
|
364 |
-
operation_name=model.name,
|
365 |
-
reason=f'Operation mismatch: found response for {name}.',
|
366 |
-
)
|
367 |
-
|
368 |
-
def _get_response_handler(self, model, params, context, **kwargs):
|
369 |
-
self._assert_expected_call_order(model, params)
|
370 |
-
# Pop off the entire response once everything has been validated
|
371 |
-
return self._queue.popleft()['response']
|
372 |
-
|
373 |
-
def _assert_expected_params(self, model, params, context, **kwargs):
|
374 |
-
if self._should_not_stub(context):
|
375 |
-
return
|
376 |
-
self._assert_expected_call_order(model, params)
|
377 |
-
expected_params = self._queue[0]['expected_params']
|
378 |
-
if expected_params is None:
|
379 |
-
return
|
380 |
-
|
381 |
-
# Validate the parameters are equal
|
382 |
-
for param, value in expected_params.items():
|
383 |
-
if param not in params or expected_params[param] != params[param]:
|
384 |
-
raise StubAssertionError(
|
385 |
-
operation_name=model.name,
|
386 |
-
reason='Expected parameters:\n%s,\nbut received:\n%s'
|
387 |
-
% (pformat(expected_params), pformat(params)),
|
388 |
-
)
|
389 |
-
|
390 |
-
# Ensure there are no extra params hanging around
|
391 |
-
if sorted(expected_params.keys()) != sorted(params.keys()):
|
392 |
-
raise StubAssertionError(
|
393 |
-
operation_name=model.name,
|
394 |
-
reason='Expected parameters:\n%s,\nbut received:\n%s'
|
395 |
-
% (pformat(expected_params), pformat(params)),
|
396 |
-
)
|
397 |
-
|
398 |
-
def _should_not_stub(self, context):
|
399 |
-
# Do not include presign requests when processing stubbed client calls
|
400 |
-
# as a presign request will never have an HTTP request sent over the
|
401 |
-
# wire for it and therefore not receive a response back.
|
402 |
-
if context and context.get('is_presign_request'):
|
403 |
-
return True
|
404 |
-
|
405 |
-
def _validate_operation_response(self, operation_name, service_response):
|
406 |
-
service_model = self.client.meta.service_model
|
407 |
-
operation_model = service_model.operation_model(operation_name)
|
408 |
-
output_shape = operation_model.output_shape
|
409 |
-
|
410 |
-
# Remove ResponseMetadata so that the validator doesn't attempt to
|
411 |
-
# perform validation on it.
|
412 |
-
response = service_response
|
413 |
-
if 'ResponseMetadata' in response:
|
414 |
-
response = copy.copy(service_response)
|
415 |
-
del response['ResponseMetadata']
|
416 |
-
|
417 |
-
self._validate_response(output_shape, response)
|
418 |
-
|
419 |
-
def _validate_response(self, shape, response):
|
420 |
-
if shape is not None:
|
421 |
-
validate_parameters(response, shape)
|
422 |
-
elif response:
|
423 |
-
# If the output shape is None, that means the response should be
|
424 |
-
# empty apart from ResponseMetadata
|
425 |
-
raise ParamValidationError(
|
426 |
-
report=(
|
427 |
-
"Service response should only contain ResponseMetadata."
|
428 |
-
)
|
429 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Billyosoro/ESRGAN/tests/test_dataset.py
DELETED
@@ -1,151 +0,0 @@
|
|
1 |
-
import pytest
|
2 |
-
import yaml
|
3 |
-
|
4 |
-
from realesrgan.data.realesrgan_dataset import RealESRGANDataset
|
5 |
-
from realesrgan.data.realesrgan_paired_dataset import RealESRGANPairedDataset
|
6 |
-
|
7 |
-
|
8 |
-
def test_realesrgan_dataset():
|
9 |
-
|
10 |
-
with open('tests/data/test_realesrgan_dataset.yml', mode='r') as f:
|
11 |
-
opt = yaml.load(f, Loader=yaml.FullLoader)
|
12 |
-
|
13 |
-
dataset = RealESRGANDataset(opt)
|
14 |
-
assert dataset.io_backend_opt['type'] == 'disk' # io backend
|
15 |
-
assert len(dataset) == 2 # whether to read correct meta info
|
16 |
-
assert dataset.kernel_list == [
|
17 |
-
'iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'
|
18 |
-
] # correct initialization the degradation configurations
|
19 |
-
assert dataset.betag_range2 == [0.5, 4]
|
20 |
-
|
21 |
-
# test __getitem__
|
22 |
-
result = dataset.__getitem__(0)
|
23 |
-
# check returned keys
|
24 |
-
expected_keys = ['gt', 'kernel1', 'kernel2', 'sinc_kernel', 'gt_path']
|
25 |
-
assert set(expected_keys).issubset(set(result.keys()))
|
26 |
-
# check shape and contents
|
27 |
-
assert result['gt'].shape == (3, 400, 400)
|
28 |
-
assert result['kernel1'].shape == (21, 21)
|
29 |
-
assert result['kernel2'].shape == (21, 21)
|
30 |
-
assert result['sinc_kernel'].shape == (21, 21)
|
31 |
-
assert result['gt_path'] == 'tests/data/gt/baboon.png'
|
32 |
-
|
33 |
-
# ------------------ test lmdb backend -------------------- #
|
34 |
-
opt['dataroot_gt'] = 'tests/data/gt.lmdb'
|
35 |
-
opt['io_backend']['type'] = 'lmdb'
|
36 |
-
|
37 |
-
dataset = RealESRGANDataset(opt)
|
38 |
-
assert dataset.io_backend_opt['type'] == 'lmdb' # io backend
|
39 |
-
assert len(dataset.paths) == 2 # whether to read correct meta info
|
40 |
-
assert dataset.kernel_list == [
|
41 |
-
'iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'
|
42 |
-
] # correct initialization the degradation configurations
|
43 |
-
assert dataset.betag_range2 == [0.5, 4]
|
44 |
-
|
45 |
-
# test __getitem__
|
46 |
-
result = dataset.__getitem__(1)
|
47 |
-
# check returned keys
|
48 |
-
expected_keys = ['gt', 'kernel1', 'kernel2', 'sinc_kernel', 'gt_path']
|
49 |
-
assert set(expected_keys).issubset(set(result.keys()))
|
50 |
-
# check shape and contents
|
51 |
-
assert result['gt'].shape == (3, 400, 400)
|
52 |
-
assert result['kernel1'].shape == (21, 21)
|
53 |
-
assert result['kernel2'].shape == (21, 21)
|
54 |
-
assert result['sinc_kernel'].shape == (21, 21)
|
55 |
-
assert result['gt_path'] == 'comic'
|
56 |
-
|
57 |
-
# ------------------ test with sinc_prob = 0 -------------------- #
|
58 |
-
opt['dataroot_gt'] = 'tests/data/gt.lmdb'
|
59 |
-
opt['io_backend']['type'] = 'lmdb'
|
60 |
-
opt['sinc_prob'] = 0
|
61 |
-
opt['sinc_prob2'] = 0
|
62 |
-
opt['final_sinc_prob'] = 0
|
63 |
-
dataset = RealESRGANDataset(opt)
|
64 |
-
result = dataset.__getitem__(0)
|
65 |
-
# check returned keys
|
66 |
-
expected_keys = ['gt', 'kernel1', 'kernel2', 'sinc_kernel', 'gt_path']
|
67 |
-
assert set(expected_keys).issubset(set(result.keys()))
|
68 |
-
# check shape and contents
|
69 |
-
assert result['gt'].shape == (3, 400, 400)
|
70 |
-
assert result['kernel1'].shape == (21, 21)
|
71 |
-
assert result['kernel2'].shape == (21, 21)
|
72 |
-
assert result['sinc_kernel'].shape == (21, 21)
|
73 |
-
assert result['gt_path'] == 'baboon'
|
74 |
-
|
75 |
-
# ------------------ lmdb backend should have paths ends with lmdb -------------------- #
|
76 |
-
with pytest.raises(ValueError):
|
77 |
-
opt['dataroot_gt'] = 'tests/data/gt'
|
78 |
-
opt['io_backend']['type'] = 'lmdb'
|
79 |
-
dataset = RealESRGANDataset(opt)
|
80 |
-
|
81 |
-
|
82 |
-
def test_realesrgan_paired_dataset():
|
83 |
-
|
84 |
-
with open('tests/data/test_realesrgan_paired_dataset.yml', mode='r') as f:
|
85 |
-
opt = yaml.load(f, Loader=yaml.FullLoader)
|
86 |
-
|
87 |
-
dataset = RealESRGANPairedDataset(opt)
|
88 |
-
assert dataset.io_backend_opt['type'] == 'disk' # io backend
|
89 |
-
assert len(dataset) == 2 # whether to read correct meta info
|
90 |
-
|
91 |
-
# test __getitem__
|
92 |
-
result = dataset.__getitem__(0)
|
93 |
-
# check returned keys
|
94 |
-
expected_keys = ['gt', 'lq', 'gt_path', 'lq_path']
|
95 |
-
assert set(expected_keys).issubset(set(result.keys()))
|
96 |
-
# check shape and contents
|
97 |
-
assert result['gt'].shape == (3, 128, 128)
|
98 |
-
assert result['lq'].shape == (3, 32, 32)
|
99 |
-
assert result['gt_path'] == 'tests/data/gt/baboon.png'
|
100 |
-
assert result['lq_path'] == 'tests/data/lq/baboon.png'
|
101 |
-
|
102 |
-
# ------------------ test lmdb backend -------------------- #
|
103 |
-
opt['dataroot_gt'] = 'tests/data/gt.lmdb'
|
104 |
-
opt['dataroot_lq'] = 'tests/data/lq.lmdb'
|
105 |
-
opt['io_backend']['type'] = 'lmdb'
|
106 |
-
|
107 |
-
dataset = RealESRGANPairedDataset(opt)
|
108 |
-
assert dataset.io_backend_opt['type'] == 'lmdb' # io backend
|
109 |
-
assert len(dataset) == 2 # whether to read correct meta info
|
110 |
-
|
111 |
-
# test __getitem__
|
112 |
-
result = dataset.__getitem__(1)
|
113 |
-
# check returned keys
|
114 |
-
expected_keys = ['gt', 'lq', 'gt_path', 'lq_path']
|
115 |
-
assert set(expected_keys).issubset(set(result.keys()))
|
116 |
-
# check shape and contents
|
117 |
-
assert result['gt'].shape == (3, 128, 128)
|
118 |
-
assert result['lq'].shape == (3, 32, 32)
|
119 |
-
assert result['gt_path'] == 'comic'
|
120 |
-
assert result['lq_path'] == 'comic'
|
121 |
-
|
122 |
-
# ------------------ test paired_paths_from_folder -------------------- #
|
123 |
-
opt['dataroot_gt'] = 'tests/data/gt'
|
124 |
-
opt['dataroot_lq'] = 'tests/data/lq'
|
125 |
-
opt['io_backend'] = dict(type='disk')
|
126 |
-
opt['meta_info'] = None
|
127 |
-
|
128 |
-
dataset = RealESRGANPairedDataset(opt)
|
129 |
-
assert dataset.io_backend_opt['type'] == 'disk' # io backend
|
130 |
-
assert len(dataset) == 2 # whether to read correct meta info
|
131 |
-
|
132 |
-
# test __getitem__
|
133 |
-
result = dataset.__getitem__(0)
|
134 |
-
# check returned keys
|
135 |
-
expected_keys = ['gt', 'lq', 'gt_path', 'lq_path']
|
136 |
-
assert set(expected_keys).issubset(set(result.keys()))
|
137 |
-
# check shape and contents
|
138 |
-
assert result['gt'].shape == (3, 128, 128)
|
139 |
-
assert result['lq'].shape == (3, 32, 32)
|
140 |
-
|
141 |
-
# ------------------ test normalization -------------------- #
|
142 |
-
dataset.mean = [0.5, 0.5, 0.5]
|
143 |
-
dataset.std = [0.5, 0.5, 0.5]
|
144 |
-
# test __getitem__
|
145 |
-
result = dataset.__getitem__(0)
|
146 |
-
# check returned keys
|
147 |
-
expected_keys = ['gt', 'lq', 'gt_path', 'lq_path']
|
148 |
-
assert set(expected_keys).issubset(set(result.keys()))
|
149 |
-
# check shape and contents
|
150 |
-
assert result['gt'].shape == (3, 128, 128)
|
151 |
-
assert result['lq'].shape == (3, 32, 32)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVH-vn1210/make_hair/minigpt4/conversation/conversation.py
DELETED
@@ -1,199 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import time
|
3 |
-
from PIL import Image
|
4 |
-
|
5 |
-
import torch
|
6 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaTokenizer
|
7 |
-
from transformers import StoppingCriteria, StoppingCriteriaList
|
8 |
-
|
9 |
-
import dataclasses
|
10 |
-
from enum import auto, Enum
|
11 |
-
from typing import List, Tuple, Any
|
12 |
-
|
13 |
-
from minigpt4.common.registry import registry
|
14 |
-
|
15 |
-
|
16 |
-
class SeparatorStyle(Enum):
|
17 |
-
"""Different separator style."""
|
18 |
-
SINGLE = auto()
|
19 |
-
TWO = auto()
|
20 |
-
|
21 |
-
|
22 |
-
@dataclasses.dataclass
|
23 |
-
class Conversation:
|
24 |
-
"""A class that keeps all conversation history."""
|
25 |
-
system: str
|
26 |
-
roles: List[str]
|
27 |
-
messages: List[List[str]]
|
28 |
-
offset: int
|
29 |
-
# system_img: List[Image.Image] = []
|
30 |
-
sep_style: SeparatorStyle = SeparatorStyle.SINGLE
|
31 |
-
sep: str = "###"
|
32 |
-
sep2: str = None
|
33 |
-
|
34 |
-
skip_next: bool = False
|
35 |
-
conv_id: Any = None
|
36 |
-
|
37 |
-
def get_prompt(self):
|
38 |
-
if self.sep_style == SeparatorStyle.SINGLE:
|
39 |
-
ret = self.system + self.sep
|
40 |
-
for role, message in self.messages:
|
41 |
-
if message:
|
42 |
-
ret += role + ": " + message + self.sep
|
43 |
-
else:
|
44 |
-
ret += role + ":"
|
45 |
-
return ret
|
46 |
-
elif self.sep_style == SeparatorStyle.TWO:
|
47 |
-
seps = [self.sep, self.sep2]
|
48 |
-
ret = self.system + seps[0]
|
49 |
-
for i, (role, message) in enumerate(self.messages):
|
50 |
-
if message:
|
51 |
-
ret += role + ": " + message + seps[i % 2]
|
52 |
-
else:
|
53 |
-
ret += role + ":"
|
54 |
-
return ret
|
55 |
-
else:
|
56 |
-
raise ValueError(f"Invalid style: {self.sep_style}")
|
57 |
-
|
58 |
-
def append_message(self, role, message):
|
59 |
-
self.messages.append([role, message])
|
60 |
-
|
61 |
-
def to_gradio_chatbot(self):
|
62 |
-
ret = []
|
63 |
-
for i, (role, msg) in enumerate(self.messages[self.offset:]):
|
64 |
-
if i % 2 == 0:
|
65 |
-
ret.append([msg, None])
|
66 |
-
else:
|
67 |
-
ret[-1][-1] = msg
|
68 |
-
return ret
|
69 |
-
|
70 |
-
def copy(self):
|
71 |
-
return Conversation(
|
72 |
-
system=self.system,
|
73 |
-
# system_img=self.system_img,
|
74 |
-
roles=self.roles,
|
75 |
-
messages=[[x, y] for x, y in self.messages],
|
76 |
-
offset=self.offset,
|
77 |
-
sep_style=self.sep_style,
|
78 |
-
sep=self.sep,
|
79 |
-
sep2=self.sep2,
|
80 |
-
conv_id=self.conv_id)
|
81 |
-
|
82 |
-
def dict(self):
|
83 |
-
return {
|
84 |
-
"system": self.system,
|
85 |
-
# "system_img": self.system_img,
|
86 |
-
"roles": self.roles,
|
87 |
-
"messages": self.messages,
|
88 |
-
"offset": self.offset,
|
89 |
-
"sep": self.sep,
|
90 |
-
"sep2": self.sep2,
|
91 |
-
"conv_id": self.conv_id,
|
92 |
-
}
|
93 |
-
|
94 |
-
|
95 |
-
class StoppingCriteriaSub(StoppingCriteria):
|
96 |
-
|
97 |
-
def __init__(self, stops=[], encounters=1):
|
98 |
-
super().__init__()
|
99 |
-
self.stops = stops
|
100 |
-
|
101 |
-
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor):
|
102 |
-
for stop in self.stops:
|
103 |
-
if torch.all((stop == input_ids[0][-len(stop):])).item():
|
104 |
-
return True
|
105 |
-
|
106 |
-
return False
|
107 |
-
|
108 |
-
|
109 |
-
CONV_VISION = Conversation(
|
110 |
-
system="Give the following image: <Img>ImageContent</Img>. "
|
111 |
-
"You will be able to see the image once I provide it to you. Please answer my questions.",
|
112 |
-
roles=("Human", "Assistant"),
|
113 |
-
messages=[],
|
114 |
-
offset=2,
|
115 |
-
sep_style=SeparatorStyle.SINGLE,
|
116 |
-
sep="###",
|
117 |
-
)
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
class Chat:
|
122 |
-
def __init__(self, model, vis_processor, device='cuda:0'):
|
123 |
-
self.device = device
|
124 |
-
self.model = model
|
125 |
-
self.vis_processor = vis_processor
|
126 |
-
stop_words_ids = [torch.tensor([835]).to(self.device),
|
127 |
-
torch.tensor([2277, 29937]).to(self.device)] # '###' can be encoded in two different ways.
|
128 |
-
self.stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub(stops=stop_words_ids)])
|
129 |
-
|
130 |
-
def ask(self, text, conv):
|
131 |
-
if len(conv.messages) > 0 and conv.messages[-1][0] == conv.roles[0] \
|
132 |
-
and conv.messages[-1][1][-6:] == '</Img>': # last message is image.
|
133 |
-
conv.messages[-1][1] = ' '.join([conv.messages[-1][1], text])
|
134 |
-
else:
|
135 |
-
conv.append_message(conv.roles[0], text)
|
136 |
-
|
137 |
-
def answer(self, conv, img_list, max_new_tokens=200, num_beams=5, min_length=1, top_p=0.9,
|
138 |
-
repetition_penalty=1.0, length_penalty=1, temperature=1, max_length=2000):
|
139 |
-
conv.append_message(conv.roles[1], None)
|
140 |
-
embs = self.get_context_emb(conv, img_list)
|
141 |
-
|
142 |
-
# current_max_len = embs.shape[1] + max_new_tokens + 100
|
143 |
-
# begin_idx = max(0, current_max_len - max_length)
|
144 |
-
# embs = embs[:, begin_idx:]
|
145 |
-
outputs = self.model.llama_model.generate(
|
146 |
-
inputs_embeds=embs,
|
147 |
-
max_new_tokens=max_new_tokens,
|
148 |
-
stopping_criteria=self.stopping_criteria,
|
149 |
-
num_beams=num_beams,
|
150 |
-
min_length=min_length,
|
151 |
-
top_p=top_p,
|
152 |
-
repetition_penalty=repetition_penalty,
|
153 |
-
length_penalty=length_penalty,
|
154 |
-
temperature=temperature,
|
155 |
-
)
|
156 |
-
output_token = outputs[0]
|
157 |
-
if output_token[0] == 0:
|
158 |
-
output_token = output_token[1:]
|
159 |
-
output_text = self.model.llama_tokenizer.decode(output_token, add_special_tokens=False)
|
160 |
-
output_text = output_text.split('###')[0] # remove the stop sign '###'
|
161 |
-
output_text = output_text.split('Assistant:')[-1].strip()
|
162 |
-
conv.messages[-1][1] = output_text
|
163 |
-
return output_text, output_token.cpu().numpy()
|
164 |
-
|
165 |
-
def upload_img(self, image, conv, img_list):
|
166 |
-
if isinstance(image, str): # is a image path
|
167 |
-
raw_image = Image.open(image).convert('RGB')
|
168 |
-
image = self.vis_processor(raw_image).unsqueeze(0).to(self.device)
|
169 |
-
elif isinstance(image, Image.Image):
|
170 |
-
raw_image = image
|
171 |
-
image = self.vis_processor(raw_image).unsqueeze(0).to(self.device)
|
172 |
-
elif isinstance(image, torch.Tensor):
|
173 |
-
if len(image.shape) == 3:
|
174 |
-
image = image.unsqueeze(0)
|
175 |
-
image = image.to(self.device)
|
176 |
-
|
177 |
-
image_emb, _ = self.model.encode_img(image)
|
178 |
-
img_list.append(image_emb)
|
179 |
-
conv.append_message(conv.roles[0], "<Img><ImageHere></Img>")
|
180 |
-
msg = "Received."
|
181 |
-
# self.conv.append_message(self.conv.roles[1], msg)
|
182 |
-
return msg
|
183 |
-
|
184 |
-
def get_context_emb(self, conv, img_list):
|
185 |
-
prompt = conv.get_prompt()
|
186 |
-
prompt_segs = prompt.split('<ImageHere>')
|
187 |
-
assert len(prompt_segs) == len(img_list) + 1, "Unmatched numbers of image placeholders and images."
|
188 |
-
seg_tokens = [
|
189 |
-
self.model.llama_tokenizer(
|
190 |
-
seg, return_tensors="pt", add_special_tokens=i == 0).to(self.device).input_ids
|
191 |
-
# only add bos to the first seg
|
192 |
-
for i, seg in enumerate(prompt_segs)
|
193 |
-
]
|
194 |
-
seg_embs = [self.model.llama_model.model.embed_tokens(seg_t) for seg_t in seg_tokens]
|
195 |
-
mixed_embs = [emb for pair in zip(seg_embs[:-1], img_list) for emb in pair] + [seg_embs[-1]]
|
196 |
-
mixed_embs = torch.cat(mixed_embs, dim=1)
|
197 |
-
return mixed_embs
|
198 |
-
|
199 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVH-vn1210/make_hair/minigpt4/models/__init__.py
DELETED
@@ -1,200 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Copyright (c) 2022, salesforce.com, inc.
|
3 |
-
All rights reserved.
|
4 |
-
SPDX-License-Identifier: BSD-3-Clause
|
5 |
-
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
6 |
-
"""
|
7 |
-
|
8 |
-
import logging
|
9 |
-
import torch
|
10 |
-
from omegaconf import OmegaConf
|
11 |
-
|
12 |
-
from minigpt4.common.registry import registry
|
13 |
-
from minigpt4.models.base_model import BaseModel
|
14 |
-
from minigpt4.models.blip2 import Blip2Base
|
15 |
-
from minigpt4.models.mini_gpt4 import MiniGPT4
|
16 |
-
from minigpt4.processors.base_processor import BaseProcessor
|
17 |
-
|
18 |
-
|
19 |
-
__all__ = [
|
20 |
-
"load_model",
|
21 |
-
"BaseModel",
|
22 |
-
"Blip2Base",
|
23 |
-
"MiniGPT4",
|
24 |
-
]
|
25 |
-
|
26 |
-
|
27 |
-
def load_model(name, model_type, is_eval=False, device="cpu", checkpoint=None):
|
28 |
-
"""
|
29 |
-
Load supported models.
|
30 |
-
|
31 |
-
To list all available models and types in registry:
|
32 |
-
>>> from minigpt4.models import model_zoo
|
33 |
-
>>> print(model_zoo)
|
34 |
-
|
35 |
-
Args:
|
36 |
-
name (str): name of the model.
|
37 |
-
model_type (str): type of the model.
|
38 |
-
is_eval (bool): whether the model is in eval mode. Default: False.
|
39 |
-
device (str): device to use. Default: "cpu".
|
40 |
-
checkpoint (str): path or to checkpoint. Default: None.
|
41 |
-
Note that expecting the checkpoint to have the same keys in state_dict as the model.
|
42 |
-
|
43 |
-
Returns:
|
44 |
-
model (torch.nn.Module): model.
|
45 |
-
"""
|
46 |
-
|
47 |
-
model = registry.get_model_class(name).from_pretrained(model_type=model_type)
|
48 |
-
|
49 |
-
if checkpoint is not None:
|
50 |
-
model.load_checkpoint(checkpoint)
|
51 |
-
|
52 |
-
if is_eval:
|
53 |
-
model.eval()
|
54 |
-
|
55 |
-
if device == "cpu":
|
56 |
-
model = model.float()
|
57 |
-
|
58 |
-
return model.to(device)
|
59 |
-
|
60 |
-
|
61 |
-
def load_preprocess(config):
|
62 |
-
"""
|
63 |
-
Load preprocessor configs and construct preprocessors.
|
64 |
-
|
65 |
-
If no preprocessor is specified, return BaseProcessor, which does not do any preprocessing.
|
66 |
-
|
67 |
-
Args:
|
68 |
-
config (dict): preprocessor configs.
|
69 |
-
|
70 |
-
Returns:
|
71 |
-
vis_processors (dict): preprocessors for visual inputs.
|
72 |
-
txt_processors (dict): preprocessors for text inputs.
|
73 |
-
|
74 |
-
Key is "train" or "eval" for processors used in training and evaluation respectively.
|
75 |
-
"""
|
76 |
-
|
77 |
-
def _build_proc_from_cfg(cfg):
|
78 |
-
return (
|
79 |
-
registry.get_processor_class(cfg.name).from_config(cfg)
|
80 |
-
if cfg is not None
|
81 |
-
else BaseProcessor()
|
82 |
-
)
|
83 |
-
|
84 |
-
vis_processors = dict()
|
85 |
-
txt_processors = dict()
|
86 |
-
|
87 |
-
vis_proc_cfg = config.get("vis_processor")
|
88 |
-
txt_proc_cfg = config.get("text_processor")
|
89 |
-
|
90 |
-
if vis_proc_cfg is not None:
|
91 |
-
vis_train_cfg = vis_proc_cfg.get("train")
|
92 |
-
vis_eval_cfg = vis_proc_cfg.get("eval")
|
93 |
-
else:
|
94 |
-
vis_train_cfg = None
|
95 |
-
vis_eval_cfg = None
|
96 |
-
|
97 |
-
vis_processors["train"] = _build_proc_from_cfg(vis_train_cfg)
|
98 |
-
vis_processors["eval"] = _build_proc_from_cfg(vis_eval_cfg)
|
99 |
-
|
100 |
-
if txt_proc_cfg is not None:
|
101 |
-
txt_train_cfg = txt_proc_cfg.get("train")
|
102 |
-
txt_eval_cfg = txt_proc_cfg.get("eval")
|
103 |
-
else:
|
104 |
-
txt_train_cfg = None
|
105 |
-
txt_eval_cfg = None
|
106 |
-
|
107 |
-
txt_processors["train"] = _build_proc_from_cfg(txt_train_cfg)
|
108 |
-
txt_processors["eval"] = _build_proc_from_cfg(txt_eval_cfg)
|
109 |
-
|
110 |
-
return vis_processors, txt_processors
|
111 |
-
|
112 |
-
|
113 |
-
def load_model_and_preprocess(name, model_type, is_eval=False, device="cpu"):
|
114 |
-
"""
|
115 |
-
Load model and its related preprocessors.
|
116 |
-
|
117 |
-
List all available models and types in registry:
|
118 |
-
>>> from minigpt4.models import model_zoo
|
119 |
-
>>> print(model_zoo)
|
120 |
-
|
121 |
-
Args:
|
122 |
-
name (str): name of the model.
|
123 |
-
model_type (str): type of the model.
|
124 |
-
is_eval (bool): whether the model is in eval mode. Default: False.
|
125 |
-
device (str): device to use. Default: "cpu".
|
126 |
-
|
127 |
-
Returns:
|
128 |
-
model (torch.nn.Module): model.
|
129 |
-
vis_processors (dict): preprocessors for visual inputs.
|
130 |
-
txt_processors (dict): preprocessors for text inputs.
|
131 |
-
"""
|
132 |
-
model_cls = registry.get_model_class(name)
|
133 |
-
|
134 |
-
# load model
|
135 |
-
model = model_cls.from_pretrained(model_type=model_type)
|
136 |
-
|
137 |
-
if is_eval:
|
138 |
-
model.eval()
|
139 |
-
|
140 |
-
# load preprocess
|
141 |
-
cfg = OmegaConf.load(model_cls.default_config_path(model_type))
|
142 |
-
if cfg is not None:
|
143 |
-
preprocess_cfg = cfg.preprocess
|
144 |
-
|
145 |
-
vis_processors, txt_processors = load_preprocess(preprocess_cfg)
|
146 |
-
else:
|
147 |
-
vis_processors, txt_processors = None, None
|
148 |
-
logging.info(
|
149 |
-
f"""No default preprocess for model {name} ({model_type}).
|
150 |
-
This can happen if the model is not finetuned on downstream datasets,
|
151 |
-
or it is not intended for direct use without finetuning.
|
152 |
-
"""
|
153 |
-
)
|
154 |
-
|
155 |
-
if device == "cpu" or device == torch.device("cpu"):
|
156 |
-
model = model.float()
|
157 |
-
|
158 |
-
return model.to(device), vis_processors, txt_processors
|
159 |
-
|
160 |
-
|
161 |
-
class ModelZoo:
|
162 |
-
"""
|
163 |
-
A utility class to create string representation of available model architectures and types.
|
164 |
-
|
165 |
-
>>> from minigpt4.models import model_zoo
|
166 |
-
>>> # list all available models
|
167 |
-
>>> print(model_zoo)
|
168 |
-
>>> # show total number of models
|
169 |
-
>>> print(len(model_zoo))
|
170 |
-
"""
|
171 |
-
|
172 |
-
def __init__(self) -> None:
|
173 |
-
self.model_zoo = {
|
174 |
-
k: list(v.PRETRAINED_MODEL_CONFIG_DICT.keys())
|
175 |
-
for k, v in registry.mapping["model_name_mapping"].items()
|
176 |
-
}
|
177 |
-
|
178 |
-
def __str__(self) -> str:
|
179 |
-
return (
|
180 |
-
"=" * 50
|
181 |
-
+ "\n"
|
182 |
-
+ f"{'Architectures':<30} {'Types'}\n"
|
183 |
-
+ "=" * 50
|
184 |
-
+ "\n"
|
185 |
-
+ "\n".join(
|
186 |
-
[
|
187 |
-
f"{name:<30} {', '.join(types)}"
|
188 |
-
for name, types in self.model_zoo.items()
|
189 |
-
]
|
190 |
-
)
|
191 |
-
)
|
192 |
-
|
193 |
-
def __iter__(self):
|
194 |
-
return iter(self.model_zoo.items())
|
195 |
-
|
196 |
-
def __len__(self):
|
197 |
-
return sum([len(v) for v in self.model_zoo.values()])
|
198 |
-
|
199 |
-
|
200 |
-
model_zoo = ModelZoo()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/TensorMask/tensormask/layers/__init__.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
from .swap_align2nat import SwapAlign2Nat, swap_align2nat
|
3 |
-
|
4 |
-
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/count.h
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// this system has no special version of this algorithm
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/models/dense_heads/corner_head.py
DELETED
@@ -1,1074 +0,0 @@
|
|
1 |
-
from logging import warning
|
2 |
-
from math import ceil, log
|
3 |
-
|
4 |
-
import torch
|
5 |
-
import torch.nn as nn
|
6 |
-
import torch.nn.functional as F
|
7 |
-
from mmcv.cnn import ConvModule, bias_init_with_prob
|
8 |
-
from mmcv.ops import CornerPool, batched_nms
|
9 |
-
|
10 |
-
from mmdet.core import multi_apply
|
11 |
-
from ..builder import HEADS, build_loss
|
12 |
-
from ..utils import gaussian_radius, gen_gaussian_target
|
13 |
-
from .base_dense_head import BaseDenseHead
|
14 |
-
|
15 |
-
|
16 |
-
class BiCornerPool(nn.Module):
|
17 |
-
"""Bidirectional Corner Pooling Module (TopLeft, BottomRight, etc.)
|
18 |
-
|
19 |
-
Args:
|
20 |
-
in_channels (int): Input channels of module.
|
21 |
-
out_channels (int): Output channels of module.
|
22 |
-
feat_channels (int): Feature channels of module.
|
23 |
-
directions (list[str]): Directions of two CornerPools.
|
24 |
-
norm_cfg (dict): Dictionary to construct and config norm layer.
|
25 |
-
"""
|
26 |
-
|
27 |
-
def __init__(self,
|
28 |
-
in_channels,
|
29 |
-
directions,
|
30 |
-
feat_channels=128,
|
31 |
-
out_channels=128,
|
32 |
-
norm_cfg=dict(type='BN', requires_grad=True)):
|
33 |
-
super(BiCornerPool, self).__init__()
|
34 |
-
self.direction1_conv = ConvModule(
|
35 |
-
in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)
|
36 |
-
self.direction2_conv = ConvModule(
|
37 |
-
in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)
|
38 |
-
|
39 |
-
self.aftpool_conv = ConvModule(
|
40 |
-
feat_channels,
|
41 |
-
out_channels,
|
42 |
-
3,
|
43 |
-
padding=1,
|
44 |
-
norm_cfg=norm_cfg,
|
45 |
-
act_cfg=None)
|
46 |
-
|
47 |
-
self.conv1 = ConvModule(
|
48 |
-
in_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)
|
49 |
-
self.conv2 = ConvModule(
|
50 |
-
in_channels, out_channels, 3, padding=1, norm_cfg=norm_cfg)
|
51 |
-
|
52 |
-
self.direction1_pool = CornerPool(directions[0])
|
53 |
-
self.direction2_pool = CornerPool(directions[1])
|
54 |
-
self.relu = nn.ReLU(inplace=True)
|
55 |
-
|
56 |
-
def forward(self, x):
|
57 |
-
"""Forward features from the upstream network.
|
58 |
-
|
59 |
-
Args:
|
60 |
-
x (tensor): Input feature of BiCornerPool.
|
61 |
-
|
62 |
-
Returns:
|
63 |
-
conv2 (tensor): Output feature of BiCornerPool.
|
64 |
-
"""
|
65 |
-
direction1_conv = self.direction1_conv(x)
|
66 |
-
direction2_conv = self.direction2_conv(x)
|
67 |
-
direction1_feat = self.direction1_pool(direction1_conv)
|
68 |
-
direction2_feat = self.direction2_pool(direction2_conv)
|
69 |
-
aftpool_conv = self.aftpool_conv(direction1_feat + direction2_feat)
|
70 |
-
conv1 = self.conv1(x)
|
71 |
-
relu = self.relu(aftpool_conv + conv1)
|
72 |
-
conv2 = self.conv2(relu)
|
73 |
-
return conv2
|
74 |
-
|
75 |
-
|
76 |
-
@HEADS.register_module()
|
77 |
-
class CornerHead(BaseDenseHead):
|
78 |
-
"""Head of CornerNet: Detecting Objects as Paired Keypoints.
|
79 |
-
|
80 |
-
Code is modified from the `official github repo
|
81 |
-
<https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/
|
82 |
-
kp.py#L73>`_ .
|
83 |
-
|
84 |
-
More details can be found in the `paper
|
85 |
-
<https://arxiv.org/abs/1808.01244>`_ .
|
86 |
-
|
87 |
-
Args:
|
88 |
-
num_classes (int): Number of categories excluding the background
|
89 |
-
category.
|
90 |
-
in_channels (int): Number of channels in the input feature map.
|
91 |
-
num_feat_levels (int): Levels of feature from the previous module. 2
|
92 |
-
for HourglassNet-104 and 1 for HourglassNet-52. Because
|
93 |
-
HourglassNet-104 outputs the final feature and intermediate
|
94 |
-
supervision feature and HourglassNet-52 only outputs the final
|
95 |
-
feature. Default: 2.
|
96 |
-
corner_emb_channels (int): Channel of embedding vector. Default: 1.
|
97 |
-
train_cfg (dict | None): Training config. Useless in CornerHead,
|
98 |
-
but we keep this variable for SingleStageDetector. Default: None.
|
99 |
-
test_cfg (dict | None): Testing config of CornerHead. Default: None.
|
100 |
-
loss_heatmap (dict | None): Config of corner heatmap loss. Default:
|
101 |
-
GaussianFocalLoss.
|
102 |
-
loss_embedding (dict | None): Config of corner embedding loss. Default:
|
103 |
-
AssociativeEmbeddingLoss.
|
104 |
-
loss_offset (dict | None): Config of corner offset loss. Default:
|
105 |
-
SmoothL1Loss.
|
106 |
-
"""
|
107 |
-
|
108 |
-
def __init__(self,
|
109 |
-
num_classes,
|
110 |
-
in_channels,
|
111 |
-
num_feat_levels=2,
|
112 |
-
corner_emb_channels=1,
|
113 |
-
train_cfg=None,
|
114 |
-
test_cfg=None,
|
115 |
-
loss_heatmap=dict(
|
116 |
-
type='GaussianFocalLoss',
|
117 |
-
alpha=2.0,
|
118 |
-
gamma=4.0,
|
119 |
-
loss_weight=1),
|
120 |
-
loss_embedding=dict(
|
121 |
-
type='AssociativeEmbeddingLoss',
|
122 |
-
pull_weight=0.25,
|
123 |
-
push_weight=0.25),
|
124 |
-
loss_offset=dict(
|
125 |
-
type='SmoothL1Loss', beta=1.0, loss_weight=1)):
|
126 |
-
super(CornerHead, self).__init__()
|
127 |
-
self.num_classes = num_classes
|
128 |
-
self.in_channels = in_channels
|
129 |
-
self.corner_emb_channels = corner_emb_channels
|
130 |
-
self.with_corner_emb = self.corner_emb_channels > 0
|
131 |
-
self.corner_offset_channels = 2
|
132 |
-
self.num_feat_levels = num_feat_levels
|
133 |
-
self.loss_heatmap = build_loss(
|
134 |
-
loss_heatmap) if loss_heatmap is not None else None
|
135 |
-
self.loss_embedding = build_loss(
|
136 |
-
loss_embedding) if loss_embedding is not None else None
|
137 |
-
self.loss_offset = build_loss(
|
138 |
-
loss_offset) if loss_offset is not None else None
|
139 |
-
self.train_cfg = train_cfg
|
140 |
-
self.test_cfg = test_cfg
|
141 |
-
|
142 |
-
self._init_layers()
|
143 |
-
|
144 |
-
def _make_layers(self, out_channels, in_channels=256, feat_channels=256):
|
145 |
-
"""Initialize conv sequential for CornerHead."""
|
146 |
-
return nn.Sequential(
|
147 |
-
ConvModule(in_channels, feat_channels, 3, padding=1),
|
148 |
-
ConvModule(
|
149 |
-
feat_channels, out_channels, 1, norm_cfg=None, act_cfg=None))
|
150 |
-
|
151 |
-
def _init_corner_kpt_layers(self):
|
152 |
-
"""Initialize corner keypoint layers.
|
153 |
-
|
154 |
-
Including corner heatmap branch and corner offset branch. Each branch
|
155 |
-
has two parts: prefix `tl_` for top-left and `br_` for bottom-right.
|
156 |
-
"""
|
157 |
-
self.tl_pool, self.br_pool = nn.ModuleList(), nn.ModuleList()
|
158 |
-
self.tl_heat, self.br_heat = nn.ModuleList(), nn.ModuleList()
|
159 |
-
self.tl_off, self.br_off = nn.ModuleList(), nn.ModuleList()
|
160 |
-
|
161 |
-
for _ in range(self.num_feat_levels):
|
162 |
-
self.tl_pool.append(
|
163 |
-
BiCornerPool(
|
164 |
-
self.in_channels, ['top', 'left'],
|
165 |
-
out_channels=self.in_channels))
|
166 |
-
self.br_pool.append(
|
167 |
-
BiCornerPool(
|
168 |
-
self.in_channels, ['bottom', 'right'],
|
169 |
-
out_channels=self.in_channels))
|
170 |
-
|
171 |
-
self.tl_heat.append(
|
172 |
-
self._make_layers(
|
173 |
-
out_channels=self.num_classes,
|
174 |
-
in_channels=self.in_channels))
|
175 |
-
self.br_heat.append(
|
176 |
-
self._make_layers(
|
177 |
-
out_channels=self.num_classes,
|
178 |
-
in_channels=self.in_channels))
|
179 |
-
|
180 |
-
self.tl_off.append(
|
181 |
-
self._make_layers(
|
182 |
-
out_channels=self.corner_offset_channels,
|
183 |
-
in_channels=self.in_channels))
|
184 |
-
self.br_off.append(
|
185 |
-
self._make_layers(
|
186 |
-
out_channels=self.corner_offset_channels,
|
187 |
-
in_channels=self.in_channels))
|
188 |
-
|
189 |
-
def _init_corner_emb_layers(self):
|
190 |
-
"""Initialize corner embedding layers.
|
191 |
-
|
192 |
-
Only include corner embedding branch with two parts: prefix `tl_` for
|
193 |
-
top-left and `br_` for bottom-right.
|
194 |
-
"""
|
195 |
-
self.tl_emb, self.br_emb = nn.ModuleList(), nn.ModuleList()
|
196 |
-
|
197 |
-
for _ in range(self.num_feat_levels):
|
198 |
-
self.tl_emb.append(
|
199 |
-
self._make_layers(
|
200 |
-
out_channels=self.corner_emb_channels,
|
201 |
-
in_channels=self.in_channels))
|
202 |
-
self.br_emb.append(
|
203 |
-
self._make_layers(
|
204 |
-
out_channels=self.corner_emb_channels,
|
205 |
-
in_channels=self.in_channels))
|
206 |
-
|
207 |
-
def _init_layers(self):
|
208 |
-
"""Initialize layers for CornerHead.
|
209 |
-
|
210 |
-
Including two parts: corner keypoint layers and corner embedding layers
|
211 |
-
"""
|
212 |
-
self._init_corner_kpt_layers()
|
213 |
-
if self.with_corner_emb:
|
214 |
-
self._init_corner_emb_layers()
|
215 |
-
|
216 |
-
def init_weights(self):
|
217 |
-
"""Initialize weights of the head."""
|
218 |
-
bias_init = bias_init_with_prob(0.1)
|
219 |
-
for i in range(self.num_feat_levels):
|
220 |
-
# The initialization of parameters are different between nn.Conv2d
|
221 |
-
# and ConvModule. Our experiments show that using the original
|
222 |
-
# initialization of nn.Conv2d increases the final mAP by about 0.2%
|
223 |
-
self.tl_heat[i][-1].conv.reset_parameters()
|
224 |
-
self.tl_heat[i][-1].conv.bias.data.fill_(bias_init)
|
225 |
-
self.br_heat[i][-1].conv.reset_parameters()
|
226 |
-
self.br_heat[i][-1].conv.bias.data.fill_(bias_init)
|
227 |
-
self.tl_off[i][-1].conv.reset_parameters()
|
228 |
-
self.br_off[i][-1].conv.reset_parameters()
|
229 |
-
if self.with_corner_emb:
|
230 |
-
self.tl_emb[i][-1].conv.reset_parameters()
|
231 |
-
self.br_emb[i][-1].conv.reset_parameters()
|
232 |
-
|
233 |
-
def forward(self, feats):
|
234 |
-
"""Forward features from the upstream network.
|
235 |
-
|
236 |
-
Args:
|
237 |
-
feats (tuple[Tensor]): Features from the upstream network, each is
|
238 |
-
a 4D-tensor.
|
239 |
-
|
240 |
-
Returns:
|
241 |
-
tuple: Usually a tuple of corner heatmaps, offset heatmaps and
|
242 |
-
embedding heatmaps.
|
243 |
-
- tl_heats (list[Tensor]): Top-left corner heatmaps for all
|
244 |
-
levels, each is a 4D-tensor, the channels number is
|
245 |
-
num_classes.
|
246 |
-
- br_heats (list[Tensor]): Bottom-right corner heatmaps for all
|
247 |
-
levels, each is a 4D-tensor, the channels number is
|
248 |
-
num_classes.
|
249 |
-
- tl_embs (list[Tensor] | list[None]): Top-left embedding
|
250 |
-
heatmaps for all levels, each is a 4D-tensor or None.
|
251 |
-
If not None, the channels number is corner_emb_channels.
|
252 |
-
- br_embs (list[Tensor] | list[None]): Bottom-right embedding
|
253 |
-
heatmaps for all levels, each is a 4D-tensor or None.
|
254 |
-
If not None, the channels number is corner_emb_channels.
|
255 |
-
- tl_offs (list[Tensor]): Top-left offset heatmaps for all
|
256 |
-
levels, each is a 4D-tensor. The channels number is
|
257 |
-
corner_offset_channels.
|
258 |
-
- br_offs (list[Tensor]): Bottom-right offset heatmaps for all
|
259 |
-
levels, each is a 4D-tensor. The channels number is
|
260 |
-
corner_offset_channels.
|
261 |
-
"""
|
262 |
-
lvl_ind = list(range(self.num_feat_levels))
|
263 |
-
return multi_apply(self.forward_single, feats, lvl_ind)
|
264 |
-
|
265 |
-
def forward_single(self, x, lvl_ind, return_pool=False):
|
266 |
-
"""Forward feature of a single level.
|
267 |
-
|
268 |
-
Args:
|
269 |
-
x (Tensor): Feature of a single level.
|
270 |
-
lvl_ind (int): Level index of current feature.
|
271 |
-
return_pool (bool): Return corner pool feature or not.
|
272 |
-
|
273 |
-
Returns:
|
274 |
-
tuple[Tensor]: A tuple of CornerHead's output for current feature
|
275 |
-
level. Containing the following Tensors:
|
276 |
-
|
277 |
-
- tl_heat (Tensor): Predicted top-left corner heatmap.
|
278 |
-
- br_heat (Tensor): Predicted bottom-right corner heatmap.
|
279 |
-
- tl_emb (Tensor | None): Predicted top-left embedding heatmap.
|
280 |
-
None for `self.with_corner_emb == False`.
|
281 |
-
- br_emb (Tensor | None): Predicted bottom-right embedding
|
282 |
-
heatmap. None for `self.with_corner_emb == False`.
|
283 |
-
- tl_off (Tensor): Predicted top-left offset heatmap.
|
284 |
-
- br_off (Tensor): Predicted bottom-right offset heatmap.
|
285 |
-
- tl_pool (Tensor): Top-left corner pool feature. Not must
|
286 |
-
have.
|
287 |
-
- br_pool (Tensor): Bottom-right corner pool feature. Not must
|
288 |
-
have.
|
289 |
-
"""
|
290 |
-
tl_pool = self.tl_pool[lvl_ind](x)
|
291 |
-
tl_heat = self.tl_heat[lvl_ind](tl_pool)
|
292 |
-
br_pool = self.br_pool[lvl_ind](x)
|
293 |
-
br_heat = self.br_heat[lvl_ind](br_pool)
|
294 |
-
|
295 |
-
tl_emb, br_emb = None, None
|
296 |
-
if self.with_corner_emb:
|
297 |
-
tl_emb = self.tl_emb[lvl_ind](tl_pool)
|
298 |
-
br_emb = self.br_emb[lvl_ind](br_pool)
|
299 |
-
|
300 |
-
tl_off = self.tl_off[lvl_ind](tl_pool)
|
301 |
-
br_off = self.br_off[lvl_ind](br_pool)
|
302 |
-
|
303 |
-
result_list = [tl_heat, br_heat, tl_emb, br_emb, tl_off, br_off]
|
304 |
-
if return_pool:
|
305 |
-
result_list.append(tl_pool)
|
306 |
-
result_list.append(br_pool)
|
307 |
-
|
308 |
-
return result_list
|
309 |
-
|
310 |
-
def get_targets(self,
|
311 |
-
gt_bboxes,
|
312 |
-
gt_labels,
|
313 |
-
feat_shape,
|
314 |
-
img_shape,
|
315 |
-
with_corner_emb=False,
|
316 |
-
with_guiding_shift=False,
|
317 |
-
with_centripetal_shift=False):
|
318 |
-
"""Generate corner targets.
|
319 |
-
|
320 |
-
Including corner heatmap, corner offset.
|
321 |
-
|
322 |
-
Optional: corner embedding, corner guiding shift, centripetal shift.
|
323 |
-
|
324 |
-
For CornerNet, we generate corner heatmap, corner offset and corner
|
325 |
-
embedding from this function.
|
326 |
-
|
327 |
-
For CentripetalNet, we generate corner heatmap, corner offset, guiding
|
328 |
-
shift and centripetal shift from this function.
|
329 |
-
|
330 |
-
Args:
|
331 |
-
gt_bboxes (list[Tensor]): Ground truth bboxes of each image, each
|
332 |
-
has shape (num_gt, 4).
|
333 |
-
gt_labels (list[Tensor]): Ground truth labels of each box, each has
|
334 |
-
shape (num_gt,).
|
335 |
-
feat_shape (list[int]): Shape of output feature,
|
336 |
-
[batch, channel, height, width].
|
337 |
-
img_shape (list[int]): Shape of input image,
|
338 |
-
[height, width, channel].
|
339 |
-
with_corner_emb (bool): Generate corner embedding target or not.
|
340 |
-
Default: False.
|
341 |
-
with_guiding_shift (bool): Generate guiding shift target or not.
|
342 |
-
Default: False.
|
343 |
-
with_centripetal_shift (bool): Generate centripetal shift target or
|
344 |
-
not. Default: False.
|
345 |
-
|
346 |
-
Returns:
|
347 |
-
dict: Ground truth of corner heatmap, corner offset, corner
|
348 |
-
embedding, guiding shift and centripetal shift. Containing the
|
349 |
-
following keys:
|
350 |
-
|
351 |
-
- topleft_heatmap (Tensor): Ground truth top-left corner
|
352 |
-
heatmap.
|
353 |
-
- bottomright_heatmap (Tensor): Ground truth bottom-right
|
354 |
-
corner heatmap.
|
355 |
-
- topleft_offset (Tensor): Ground truth top-left corner offset.
|
356 |
-
- bottomright_offset (Tensor): Ground truth bottom-right corner
|
357 |
-
offset.
|
358 |
-
- corner_embedding (list[list[list[int]]]): Ground truth corner
|
359 |
-
embedding. Not must have.
|
360 |
-
- topleft_guiding_shift (Tensor): Ground truth top-left corner
|
361 |
-
guiding shift. Not must have.
|
362 |
-
- bottomright_guiding_shift (Tensor): Ground truth bottom-right
|
363 |
-
corner guiding shift. Not must have.
|
364 |
-
- topleft_centripetal_shift (Tensor): Ground truth top-left
|
365 |
-
corner centripetal shift. Not must have.
|
366 |
-
- bottomright_centripetal_shift (Tensor): Ground truth
|
367 |
-
bottom-right corner centripetal shift. Not must have.
|
368 |
-
"""
|
369 |
-
batch_size, _, height, width = feat_shape
|
370 |
-
img_h, img_w = img_shape[:2]
|
371 |
-
|
372 |
-
width_ratio = float(width / img_w)
|
373 |
-
height_ratio = float(height / img_h)
|
374 |
-
|
375 |
-
gt_tl_heatmap = gt_bboxes[-1].new_zeros(
|
376 |
-
[batch_size, self.num_classes, height, width])
|
377 |
-
gt_br_heatmap = gt_bboxes[-1].new_zeros(
|
378 |
-
[batch_size, self.num_classes, height, width])
|
379 |
-
gt_tl_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width])
|
380 |
-
gt_br_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width])
|
381 |
-
|
382 |
-
if with_corner_emb:
|
383 |
-
match = []
|
384 |
-
|
385 |
-
# Guiding shift is a kind of offset, from center to corner
|
386 |
-
if with_guiding_shift:
|
387 |
-
gt_tl_guiding_shift = gt_bboxes[-1].new_zeros(
|
388 |
-
[batch_size, 2, height, width])
|
389 |
-
gt_br_guiding_shift = gt_bboxes[-1].new_zeros(
|
390 |
-
[batch_size, 2, height, width])
|
391 |
-
# Centripetal shift is also a kind of offset, from center to corner
|
392 |
-
# and normalized by log.
|
393 |
-
if with_centripetal_shift:
|
394 |
-
gt_tl_centripetal_shift = gt_bboxes[-1].new_zeros(
|
395 |
-
[batch_size, 2, height, width])
|
396 |
-
gt_br_centripetal_shift = gt_bboxes[-1].new_zeros(
|
397 |
-
[batch_size, 2, height, width])
|
398 |
-
|
399 |
-
for batch_id in range(batch_size):
|
400 |
-
# Ground truth of corner embedding per image is a list of coord set
|
401 |
-
corner_match = []
|
402 |
-
for box_id in range(len(gt_labels[batch_id])):
|
403 |
-
left, top, right, bottom = gt_bboxes[batch_id][box_id]
|
404 |
-
center_x = (left + right) / 2.0
|
405 |
-
center_y = (top + bottom) / 2.0
|
406 |
-
label = gt_labels[batch_id][box_id]
|
407 |
-
|
408 |
-
# Use coords in the feature level to generate ground truth
|
409 |
-
scale_left = left * width_ratio
|
410 |
-
scale_right = right * width_ratio
|
411 |
-
scale_top = top * height_ratio
|
412 |
-
scale_bottom = bottom * height_ratio
|
413 |
-
scale_center_x = center_x * width_ratio
|
414 |
-
scale_center_y = center_y * height_ratio
|
415 |
-
|
416 |
-
# Int coords on feature map/ground truth tensor
|
417 |
-
left_idx = int(min(scale_left, width - 1))
|
418 |
-
right_idx = int(min(scale_right, width - 1))
|
419 |
-
top_idx = int(min(scale_top, height - 1))
|
420 |
-
bottom_idx = int(min(scale_bottom, height - 1))
|
421 |
-
|
422 |
-
# Generate gaussian heatmap
|
423 |
-
scale_box_width = ceil(scale_right - scale_left)
|
424 |
-
scale_box_height = ceil(scale_bottom - scale_top)
|
425 |
-
radius = gaussian_radius((scale_box_height, scale_box_width),
|
426 |
-
min_overlap=0.3)
|
427 |
-
radius = max(0, int(radius))
|
428 |
-
gt_tl_heatmap[batch_id, label] = gen_gaussian_target(
|
429 |
-
gt_tl_heatmap[batch_id, label], [left_idx, top_idx],
|
430 |
-
radius)
|
431 |
-
gt_br_heatmap[batch_id, label] = gen_gaussian_target(
|
432 |
-
gt_br_heatmap[batch_id, label], [right_idx, bottom_idx],
|
433 |
-
radius)
|
434 |
-
|
435 |
-
# Generate corner offset
|
436 |
-
left_offset = scale_left - left_idx
|
437 |
-
top_offset = scale_top - top_idx
|
438 |
-
right_offset = scale_right - right_idx
|
439 |
-
bottom_offset = scale_bottom - bottom_idx
|
440 |
-
gt_tl_offset[batch_id, 0, top_idx, left_idx] = left_offset
|
441 |
-
gt_tl_offset[batch_id, 1, top_idx, left_idx] = top_offset
|
442 |
-
gt_br_offset[batch_id, 0, bottom_idx, right_idx] = right_offset
|
443 |
-
gt_br_offset[batch_id, 1, bottom_idx,
|
444 |
-
right_idx] = bottom_offset
|
445 |
-
|
446 |
-
# Generate corner embedding
|
447 |
-
if with_corner_emb:
|
448 |
-
corner_match.append([[top_idx, left_idx],
|
449 |
-
[bottom_idx, right_idx]])
|
450 |
-
# Generate guiding shift
|
451 |
-
if with_guiding_shift:
|
452 |
-
gt_tl_guiding_shift[batch_id, 0, top_idx,
|
453 |
-
left_idx] = scale_center_x - left_idx
|
454 |
-
gt_tl_guiding_shift[batch_id, 1, top_idx,
|
455 |
-
left_idx] = scale_center_y - top_idx
|
456 |
-
gt_br_guiding_shift[batch_id, 0, bottom_idx,
|
457 |
-
right_idx] = right_idx - scale_center_x
|
458 |
-
gt_br_guiding_shift[
|
459 |
-
batch_id, 1, bottom_idx,
|
460 |
-
right_idx] = bottom_idx - scale_center_y
|
461 |
-
# Generate centripetal shift
|
462 |
-
if with_centripetal_shift:
|
463 |
-
gt_tl_centripetal_shift[batch_id, 0, top_idx,
|
464 |
-
left_idx] = log(scale_center_x -
|
465 |
-
scale_left)
|
466 |
-
gt_tl_centripetal_shift[batch_id, 1, top_idx,
|
467 |
-
left_idx] = log(scale_center_y -
|
468 |
-
scale_top)
|
469 |
-
gt_br_centripetal_shift[batch_id, 0, bottom_idx,
|
470 |
-
right_idx] = log(scale_right -
|
471 |
-
scale_center_x)
|
472 |
-
gt_br_centripetal_shift[batch_id, 1, bottom_idx,
|
473 |
-
right_idx] = log(scale_bottom -
|
474 |
-
scale_center_y)
|
475 |
-
|
476 |
-
if with_corner_emb:
|
477 |
-
match.append(corner_match)
|
478 |
-
|
479 |
-
target_result = dict(
|
480 |
-
topleft_heatmap=gt_tl_heatmap,
|
481 |
-
topleft_offset=gt_tl_offset,
|
482 |
-
bottomright_heatmap=gt_br_heatmap,
|
483 |
-
bottomright_offset=gt_br_offset)
|
484 |
-
|
485 |
-
if with_corner_emb:
|
486 |
-
target_result.update(corner_embedding=match)
|
487 |
-
if with_guiding_shift:
|
488 |
-
target_result.update(
|
489 |
-
topleft_guiding_shift=gt_tl_guiding_shift,
|
490 |
-
bottomright_guiding_shift=gt_br_guiding_shift)
|
491 |
-
if with_centripetal_shift:
|
492 |
-
target_result.update(
|
493 |
-
topleft_centripetal_shift=gt_tl_centripetal_shift,
|
494 |
-
bottomright_centripetal_shift=gt_br_centripetal_shift)
|
495 |
-
|
496 |
-
return target_result
|
497 |
-
|
498 |
-
def loss(self,
|
499 |
-
tl_heats,
|
500 |
-
br_heats,
|
501 |
-
tl_embs,
|
502 |
-
br_embs,
|
503 |
-
tl_offs,
|
504 |
-
br_offs,
|
505 |
-
gt_bboxes,
|
506 |
-
gt_labels,
|
507 |
-
img_metas,
|
508 |
-
gt_bboxes_ignore=None):
|
509 |
-
"""Compute losses of the head.
|
510 |
-
|
511 |
-
Args:
|
512 |
-
tl_heats (list[Tensor]): Top-left corner heatmaps for each level
|
513 |
-
with shape (N, num_classes, H, W).
|
514 |
-
br_heats (list[Tensor]): Bottom-right corner heatmaps for each
|
515 |
-
level with shape (N, num_classes, H, W).
|
516 |
-
tl_embs (list[Tensor]): Top-left corner embeddings for each level
|
517 |
-
with shape (N, corner_emb_channels, H, W).
|
518 |
-
br_embs (list[Tensor]): Bottom-right corner embeddings for each
|
519 |
-
level with shape (N, corner_emb_channels, H, W).
|
520 |
-
tl_offs (list[Tensor]): Top-left corner offsets for each level
|
521 |
-
with shape (N, corner_offset_channels, H, W).
|
522 |
-
br_offs (list[Tensor]): Bottom-right corner offsets for each level
|
523 |
-
with shape (N, corner_offset_channels, H, W).
|
524 |
-
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
|
525 |
-
shape (num_gts, 4) in [left, top, right, bottom] format.
|
526 |
-
gt_labels (list[Tensor]): Class indices corresponding to each box.
|
527 |
-
img_metas (list[dict]): Meta information of each image, e.g.,
|
528 |
-
image size, scaling factor, etc.
|
529 |
-
gt_bboxes_ignore (list[Tensor] | None): Specify which bounding
|
530 |
-
boxes can be ignored when computing the loss.
|
531 |
-
|
532 |
-
Returns:
|
533 |
-
dict[str, Tensor]: A dictionary of loss components. Containing the
|
534 |
-
following losses:
|
535 |
-
|
536 |
-
- det_loss (list[Tensor]): Corner keypoint losses of all
|
537 |
-
feature levels.
|
538 |
-
- pull_loss (list[Tensor]): Part one of AssociativeEmbedding
|
539 |
-
losses of all feature levels.
|
540 |
-
- push_loss (list[Tensor]): Part two of AssociativeEmbedding
|
541 |
-
losses of all feature levels.
|
542 |
-
- off_loss (list[Tensor]): Corner offset losses of all feature
|
543 |
-
levels.
|
544 |
-
"""
|
545 |
-
targets = self.get_targets(
|
546 |
-
gt_bboxes,
|
547 |
-
gt_labels,
|
548 |
-
tl_heats[-1].shape,
|
549 |
-
img_metas[0]['pad_shape'],
|
550 |
-
with_corner_emb=self.with_corner_emb)
|
551 |
-
mlvl_targets = [targets for _ in range(self.num_feat_levels)]
|
552 |
-
det_losses, pull_losses, push_losses, off_losses = multi_apply(
|
553 |
-
self.loss_single, tl_heats, br_heats, tl_embs, br_embs, tl_offs,
|
554 |
-
br_offs, mlvl_targets)
|
555 |
-
loss_dict = dict(det_loss=det_losses, off_loss=off_losses)
|
556 |
-
if self.with_corner_emb:
|
557 |
-
loss_dict.update(pull_loss=pull_losses, push_loss=push_losses)
|
558 |
-
return loss_dict
|
559 |
-
|
560 |
-
def loss_single(self, tl_hmp, br_hmp, tl_emb, br_emb, tl_off, br_off,
|
561 |
-
targets):
|
562 |
-
"""Compute losses for single level.
|
563 |
-
|
564 |
-
Args:
|
565 |
-
tl_hmp (Tensor): Top-left corner heatmap for current level with
|
566 |
-
shape (N, num_classes, H, W).
|
567 |
-
br_hmp (Tensor): Bottom-right corner heatmap for current level with
|
568 |
-
shape (N, num_classes, H, W).
|
569 |
-
tl_emb (Tensor): Top-left corner embedding for current level with
|
570 |
-
shape (N, corner_emb_channels, H, W).
|
571 |
-
br_emb (Tensor): Bottom-right corner embedding for current level
|
572 |
-
with shape (N, corner_emb_channels, H, W).
|
573 |
-
tl_off (Tensor): Top-left corner offset for current level with
|
574 |
-
shape (N, corner_offset_channels, H, W).
|
575 |
-
br_off (Tensor): Bottom-right corner offset for current level with
|
576 |
-
shape (N, corner_offset_channels, H, W).
|
577 |
-
targets (dict): Corner target generated by `get_targets`.
|
578 |
-
|
579 |
-
Returns:
|
580 |
-
tuple[torch.Tensor]: Losses of the head's differnet branches
|
581 |
-
containing the following losses:
|
582 |
-
|
583 |
-
- det_loss (Tensor): Corner keypoint loss.
|
584 |
-
- pull_loss (Tensor): Part one of AssociativeEmbedding loss.
|
585 |
-
- push_loss (Tensor): Part two of AssociativeEmbedding loss.
|
586 |
-
- off_loss (Tensor): Corner offset loss.
|
587 |
-
"""
|
588 |
-
gt_tl_hmp = targets['topleft_heatmap']
|
589 |
-
gt_br_hmp = targets['bottomright_heatmap']
|
590 |
-
gt_tl_off = targets['topleft_offset']
|
591 |
-
gt_br_off = targets['bottomright_offset']
|
592 |
-
gt_embedding = targets['corner_embedding']
|
593 |
-
|
594 |
-
# Detection loss
|
595 |
-
tl_det_loss = self.loss_heatmap(
|
596 |
-
tl_hmp.sigmoid(),
|
597 |
-
gt_tl_hmp,
|
598 |
-
avg_factor=max(1,
|
599 |
-
gt_tl_hmp.eq(1).sum()))
|
600 |
-
br_det_loss = self.loss_heatmap(
|
601 |
-
br_hmp.sigmoid(),
|
602 |
-
gt_br_hmp,
|
603 |
-
avg_factor=max(1,
|
604 |
-
gt_br_hmp.eq(1).sum()))
|
605 |
-
det_loss = (tl_det_loss + br_det_loss) / 2.0
|
606 |
-
|
607 |
-
# AssociativeEmbedding loss
|
608 |
-
if self.with_corner_emb and self.loss_embedding is not None:
|
609 |
-
pull_loss, push_loss = self.loss_embedding(tl_emb, br_emb,
|
610 |
-
gt_embedding)
|
611 |
-
else:
|
612 |
-
pull_loss, push_loss = None, None
|
613 |
-
|
614 |
-
# Offset loss
|
615 |
-
# We only compute the offset loss at the real corner position.
|
616 |
-
# The value of real corner would be 1 in heatmap ground truth.
|
617 |
-
# The mask is computed in class agnostic mode and its shape is
|
618 |
-
# batch * 1 * width * height.
|
619 |
-
tl_off_mask = gt_tl_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as(
|
620 |
-
gt_tl_hmp)
|
621 |
-
br_off_mask = gt_br_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as(
|
622 |
-
gt_br_hmp)
|
623 |
-
tl_off_loss = self.loss_offset(
|
624 |
-
tl_off,
|
625 |
-
gt_tl_off,
|
626 |
-
tl_off_mask,
|
627 |
-
avg_factor=max(1, tl_off_mask.sum()))
|
628 |
-
br_off_loss = self.loss_offset(
|
629 |
-
br_off,
|
630 |
-
gt_br_off,
|
631 |
-
br_off_mask,
|
632 |
-
avg_factor=max(1, br_off_mask.sum()))
|
633 |
-
|
634 |
-
off_loss = (tl_off_loss + br_off_loss) / 2.0
|
635 |
-
|
636 |
-
return det_loss, pull_loss, push_loss, off_loss
|
637 |
-
|
638 |
-
def get_bboxes(self,
|
639 |
-
tl_heats,
|
640 |
-
br_heats,
|
641 |
-
tl_embs,
|
642 |
-
br_embs,
|
643 |
-
tl_offs,
|
644 |
-
br_offs,
|
645 |
-
img_metas,
|
646 |
-
rescale=False,
|
647 |
-
with_nms=True):
|
648 |
-
"""Transform network output for a batch into bbox predictions.
|
649 |
-
|
650 |
-
Args:
|
651 |
-
tl_heats (list[Tensor]): Top-left corner heatmaps for each level
|
652 |
-
with shape (N, num_classes, H, W).
|
653 |
-
br_heats (list[Tensor]): Bottom-right corner heatmaps for each
|
654 |
-
level with shape (N, num_classes, H, W).
|
655 |
-
tl_embs (list[Tensor]): Top-left corner embeddings for each level
|
656 |
-
with shape (N, corner_emb_channels, H, W).
|
657 |
-
br_embs (list[Tensor]): Bottom-right corner embeddings for each
|
658 |
-
level with shape (N, corner_emb_channels, H, W).
|
659 |
-
tl_offs (list[Tensor]): Top-left corner offsets for each level
|
660 |
-
with shape (N, corner_offset_channels, H, W).
|
661 |
-
br_offs (list[Tensor]): Bottom-right corner offsets for each level
|
662 |
-
with shape (N, corner_offset_channels, H, W).
|
663 |
-
img_metas (list[dict]): Meta information of each image, e.g.,
|
664 |
-
image size, scaling factor, etc.
|
665 |
-
rescale (bool): If True, return boxes in original image space.
|
666 |
-
Default: False.
|
667 |
-
with_nms (bool): If True, do nms before return boxes.
|
668 |
-
Default: True.
|
669 |
-
"""
|
670 |
-
assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(img_metas)
|
671 |
-
result_list = []
|
672 |
-
for img_id in range(len(img_metas)):
|
673 |
-
result_list.append(
|
674 |
-
self._get_bboxes_single(
|
675 |
-
tl_heats[-1][img_id:img_id + 1, :],
|
676 |
-
br_heats[-1][img_id:img_id + 1, :],
|
677 |
-
tl_offs[-1][img_id:img_id + 1, :],
|
678 |
-
br_offs[-1][img_id:img_id + 1, :],
|
679 |
-
img_metas[img_id],
|
680 |
-
tl_emb=tl_embs[-1][img_id:img_id + 1, :],
|
681 |
-
br_emb=br_embs[-1][img_id:img_id + 1, :],
|
682 |
-
rescale=rescale,
|
683 |
-
with_nms=with_nms))
|
684 |
-
|
685 |
-
return result_list
|
686 |
-
|
687 |
-
def _get_bboxes_single(self,
|
688 |
-
tl_heat,
|
689 |
-
br_heat,
|
690 |
-
tl_off,
|
691 |
-
br_off,
|
692 |
-
img_meta,
|
693 |
-
tl_emb=None,
|
694 |
-
br_emb=None,
|
695 |
-
tl_centripetal_shift=None,
|
696 |
-
br_centripetal_shift=None,
|
697 |
-
rescale=False,
|
698 |
-
with_nms=True):
|
699 |
-
"""Transform outputs for a single batch item into bbox predictions.
|
700 |
-
|
701 |
-
Args:
|
702 |
-
tl_heat (Tensor): Top-left corner heatmap for current level with
|
703 |
-
shape (N, num_classes, H, W).
|
704 |
-
br_heat (Tensor): Bottom-right corner heatmap for current level
|
705 |
-
with shape (N, num_classes, H, W).
|
706 |
-
tl_off (Tensor): Top-left corner offset for current level with
|
707 |
-
shape (N, corner_offset_channels, H, W).
|
708 |
-
br_off (Tensor): Bottom-right corner offset for current level with
|
709 |
-
shape (N, corner_offset_channels, H, W).
|
710 |
-
img_meta (dict): Meta information of current image, e.g.,
|
711 |
-
image size, scaling factor, etc.
|
712 |
-
tl_emb (Tensor): Top-left corner embedding for current level with
|
713 |
-
shape (N, corner_emb_channels, H, W).
|
714 |
-
br_emb (Tensor): Bottom-right corner embedding for current level
|
715 |
-
with shape (N, corner_emb_channels, H, W).
|
716 |
-
tl_centripetal_shift: Top-left corner's centripetal shift for
|
717 |
-
current level with shape (N, 2, H, W).
|
718 |
-
br_centripetal_shift: Bottom-right corner's centripetal shift for
|
719 |
-
current level with shape (N, 2, H, W).
|
720 |
-
rescale (bool): If True, return boxes in original image space.
|
721 |
-
Default: False.
|
722 |
-
with_nms (bool): If True, do nms before return boxes.
|
723 |
-
Default: True.
|
724 |
-
"""
|
725 |
-
if isinstance(img_meta, (list, tuple)):
|
726 |
-
img_meta = img_meta[0]
|
727 |
-
|
728 |
-
batch_bboxes, batch_scores, batch_clses = self.decode_heatmap(
|
729 |
-
tl_heat=tl_heat.sigmoid(),
|
730 |
-
br_heat=br_heat.sigmoid(),
|
731 |
-
tl_off=tl_off,
|
732 |
-
br_off=br_off,
|
733 |
-
tl_emb=tl_emb,
|
734 |
-
br_emb=br_emb,
|
735 |
-
tl_centripetal_shift=tl_centripetal_shift,
|
736 |
-
br_centripetal_shift=br_centripetal_shift,
|
737 |
-
img_meta=img_meta,
|
738 |
-
k=self.test_cfg.corner_topk,
|
739 |
-
kernel=self.test_cfg.local_maximum_kernel,
|
740 |
-
distance_threshold=self.test_cfg.distance_threshold)
|
741 |
-
|
742 |
-
if rescale:
|
743 |
-
batch_bboxes /= batch_bboxes.new_tensor(img_meta['scale_factor'])
|
744 |
-
|
745 |
-
bboxes = batch_bboxes.view([-1, 4])
|
746 |
-
scores = batch_scores.view([-1, 1])
|
747 |
-
clses = batch_clses.view([-1, 1])
|
748 |
-
|
749 |
-
idx = scores.argsort(dim=0, descending=True)
|
750 |
-
bboxes = bboxes[idx].view([-1, 4])
|
751 |
-
scores = scores[idx].view(-1)
|
752 |
-
clses = clses[idx].view(-1)
|
753 |
-
|
754 |
-
detections = torch.cat([bboxes, scores.unsqueeze(-1)], -1)
|
755 |
-
keepinds = (detections[:, -1] > -0.1)
|
756 |
-
detections = detections[keepinds]
|
757 |
-
labels = clses[keepinds]
|
758 |
-
|
759 |
-
if with_nms:
|
760 |
-
detections, labels = self._bboxes_nms(detections, labels,
|
761 |
-
self.test_cfg)
|
762 |
-
|
763 |
-
return detections, labels
|
764 |
-
|
765 |
-
def _bboxes_nms(self, bboxes, labels, cfg):
|
766 |
-
if labels.numel() == 0:
|
767 |
-
return bboxes, labels
|
768 |
-
|
769 |
-
if 'nms_cfg' in cfg:
|
770 |
-
warning.warn('nms_cfg in test_cfg will be deprecated. '
|
771 |
-
'Please rename it as nms')
|
772 |
-
if 'nms' not in cfg:
|
773 |
-
cfg.nms = cfg.nms_cfg
|
774 |
-
|
775 |
-
out_bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:, -1], labels,
|
776 |
-
cfg.nms)
|
777 |
-
out_labels = labels[keep]
|
778 |
-
|
779 |
-
if len(out_bboxes) > 0:
|
780 |
-
idx = torch.argsort(out_bboxes[:, -1], descending=True)
|
781 |
-
idx = idx[:cfg.max_per_img]
|
782 |
-
out_bboxes = out_bboxes[idx]
|
783 |
-
out_labels = out_labels[idx]
|
784 |
-
|
785 |
-
return out_bboxes, out_labels
|
786 |
-
|
787 |
-
def _gather_feat(self, feat, ind, mask=None):
|
788 |
-
"""Gather feature according to index.
|
789 |
-
|
790 |
-
Args:
|
791 |
-
feat (Tensor): Target feature map.
|
792 |
-
ind (Tensor): Target coord index.
|
793 |
-
mask (Tensor | None): Mask of featuremap. Default: None.
|
794 |
-
|
795 |
-
Returns:
|
796 |
-
feat (Tensor): Gathered feature.
|
797 |
-
"""
|
798 |
-
dim = feat.size(2)
|
799 |
-
ind = ind.unsqueeze(2).repeat(1, 1, dim)
|
800 |
-
feat = feat.gather(1, ind)
|
801 |
-
if mask is not None:
|
802 |
-
mask = mask.unsqueeze(2).expand_as(feat)
|
803 |
-
feat = feat[mask]
|
804 |
-
feat = feat.view(-1, dim)
|
805 |
-
return feat
|
806 |
-
|
807 |
-
def _local_maximum(self, heat, kernel=3):
|
808 |
-
"""Extract local maximum pixel with given kernel.
|
809 |
-
|
810 |
-
Args:
|
811 |
-
heat (Tensor): Target heatmap.
|
812 |
-
kernel (int): Kernel size of max pooling. Default: 3.
|
813 |
-
|
814 |
-
Returns:
|
815 |
-
heat (Tensor): A heatmap where local maximum pixels maintain its
|
816 |
-
own value and other positions are 0.
|
817 |
-
"""
|
818 |
-
pad = (kernel - 1) // 2
|
819 |
-
hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad)
|
820 |
-
keep = (hmax == heat).float()
|
821 |
-
return heat * keep
|
822 |
-
|
823 |
-
def _transpose_and_gather_feat(self, feat, ind):
|
824 |
-
"""Transpose and gather feature according to index.
|
825 |
-
|
826 |
-
Args:
|
827 |
-
feat (Tensor): Target feature map.
|
828 |
-
ind (Tensor): Target coord index.
|
829 |
-
|
830 |
-
Returns:
|
831 |
-
feat (Tensor): Transposed and gathered feature.
|
832 |
-
"""
|
833 |
-
feat = feat.permute(0, 2, 3, 1).contiguous()
|
834 |
-
feat = feat.view(feat.size(0), -1, feat.size(3))
|
835 |
-
feat = self._gather_feat(feat, ind)
|
836 |
-
return feat
|
837 |
-
|
838 |
-
def _topk(self, scores, k=20):
|
839 |
-
"""Get top k positions from heatmap.
|
840 |
-
|
841 |
-
Args:
|
842 |
-
scores (Tensor): Target heatmap with shape
|
843 |
-
[batch, num_classes, height, width].
|
844 |
-
k (int): Target number. Default: 20.
|
845 |
-
|
846 |
-
Returns:
|
847 |
-
tuple[torch.Tensor]: Scores, indexes, categories and coords of
|
848 |
-
topk keypoint. Containing following Tensors:
|
849 |
-
|
850 |
-
- topk_scores (Tensor): Max scores of each topk keypoint.
|
851 |
-
- topk_inds (Tensor): Indexes of each topk keypoint.
|
852 |
-
- topk_clses (Tensor): Categories of each topk keypoint.
|
853 |
-
- topk_ys (Tensor): Y-coord of each topk keypoint.
|
854 |
-
- topk_xs (Tensor): X-coord of each topk keypoint.
|
855 |
-
"""
|
856 |
-
batch, _, height, width = scores.size()
|
857 |
-
topk_scores, topk_inds = torch.topk(scores.view(batch, -1), k)
|
858 |
-
topk_clses = topk_inds // (height * width)
|
859 |
-
topk_inds = topk_inds % (height * width)
|
860 |
-
topk_ys = topk_inds // width
|
861 |
-
topk_xs = (topk_inds % width).int().float()
|
862 |
-
return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs
|
863 |
-
|
864 |
-
def decode_heatmap(self,
|
865 |
-
tl_heat,
|
866 |
-
br_heat,
|
867 |
-
tl_off,
|
868 |
-
br_off,
|
869 |
-
tl_emb=None,
|
870 |
-
br_emb=None,
|
871 |
-
tl_centripetal_shift=None,
|
872 |
-
br_centripetal_shift=None,
|
873 |
-
img_meta=None,
|
874 |
-
k=100,
|
875 |
-
kernel=3,
|
876 |
-
distance_threshold=0.5,
|
877 |
-
num_dets=1000):
|
878 |
-
"""Transform outputs for a single batch item into raw bbox predictions.
|
879 |
-
|
880 |
-
Args:
|
881 |
-
tl_heat (Tensor): Top-left corner heatmap for current level with
|
882 |
-
shape (N, num_classes, H, W).
|
883 |
-
br_heat (Tensor): Bottom-right corner heatmap for current level
|
884 |
-
with shape (N, num_classes, H, W).
|
885 |
-
tl_off (Tensor): Top-left corner offset for current level with
|
886 |
-
shape (N, corner_offset_channels, H, W).
|
887 |
-
br_off (Tensor): Bottom-right corner offset for current level with
|
888 |
-
shape (N, corner_offset_channels, H, W).
|
889 |
-
tl_emb (Tensor | None): Top-left corner embedding for current
|
890 |
-
level with shape (N, corner_emb_channels, H, W).
|
891 |
-
br_emb (Tensor | None): Bottom-right corner embedding for current
|
892 |
-
level with shape (N, corner_emb_channels, H, W).
|
893 |
-
tl_centripetal_shift (Tensor | None): Top-left centripetal shift
|
894 |
-
for current level with shape (N, 2, H, W).
|
895 |
-
br_centripetal_shift (Tensor | None): Bottom-right centripetal
|
896 |
-
shift for current level with shape (N, 2, H, W).
|
897 |
-
img_meta (dict): Meta information of current image, e.g.,
|
898 |
-
image size, scaling factor, etc.
|
899 |
-
k (int): Get top k corner keypoints from heatmap.
|
900 |
-
kernel (int): Max pooling kernel for extract local maximum pixels.
|
901 |
-
distance_threshold (float): Distance threshold. Top-left and
|
902 |
-
bottom-right corner keypoints with feature distance less than
|
903 |
-
the threshold will be regarded as keypoints from same object.
|
904 |
-
num_dets (int): Num of raw boxes before doing nms.
|
905 |
-
|
906 |
-
Returns:
|
907 |
-
tuple[torch.Tensor]: Decoded output of CornerHead, containing the
|
908 |
-
following Tensors:
|
909 |
-
|
910 |
-
- bboxes (Tensor): Coords of each box.
|
911 |
-
- scores (Tensor): Scores of each box.
|
912 |
-
- clses (Tensor): Categories of each box.
|
913 |
-
"""
|
914 |
-
with_embedding = tl_emb is not None and br_emb is not None
|
915 |
-
with_centripetal_shift = (
|
916 |
-
tl_centripetal_shift is not None
|
917 |
-
and br_centripetal_shift is not None)
|
918 |
-
assert with_embedding + with_centripetal_shift == 1
|
919 |
-
batch, _, height, width = tl_heat.size()
|
920 |
-
inp_h, inp_w, _ = img_meta['pad_shape']
|
921 |
-
|
922 |
-
# perform nms on heatmaps
|
923 |
-
tl_heat = self._local_maximum(tl_heat, kernel=kernel)
|
924 |
-
br_heat = self._local_maximum(br_heat, kernel=kernel)
|
925 |
-
|
926 |
-
tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = self._topk(tl_heat, k=k)
|
927 |
-
br_scores, br_inds, br_clses, br_ys, br_xs = self._topk(br_heat, k=k)
|
928 |
-
|
929 |
-
# We use repeat instead of expand here because expand is a
|
930 |
-
# shallow-copy function. Thus it could cause unexpected testing result
|
931 |
-
# sometimes. Using expand will decrease about 10% mAP during testing
|
932 |
-
# compared to repeat.
|
933 |
-
tl_ys = tl_ys.view(batch, k, 1).repeat(1, 1, k)
|
934 |
-
tl_xs = tl_xs.view(batch, k, 1).repeat(1, 1, k)
|
935 |
-
br_ys = br_ys.view(batch, 1, k).repeat(1, k, 1)
|
936 |
-
br_xs = br_xs.view(batch, 1, k).repeat(1, k, 1)
|
937 |
-
|
938 |
-
tl_off = self._transpose_and_gather_feat(tl_off, tl_inds)
|
939 |
-
tl_off = tl_off.view(batch, k, 1, 2)
|
940 |
-
br_off = self._transpose_and_gather_feat(br_off, br_inds)
|
941 |
-
br_off = br_off.view(batch, 1, k, 2)
|
942 |
-
|
943 |
-
tl_xs = tl_xs + tl_off[..., 0]
|
944 |
-
tl_ys = tl_ys + tl_off[..., 1]
|
945 |
-
br_xs = br_xs + br_off[..., 0]
|
946 |
-
br_ys = br_ys + br_off[..., 1]
|
947 |
-
|
948 |
-
if with_centripetal_shift:
|
949 |
-
tl_centripetal_shift = self._transpose_and_gather_feat(
|
950 |
-
tl_centripetal_shift, tl_inds).view(batch, k, 1, 2).exp()
|
951 |
-
br_centripetal_shift = self._transpose_and_gather_feat(
|
952 |
-
br_centripetal_shift, br_inds).view(batch, 1, k, 2).exp()
|
953 |
-
|
954 |
-
tl_ctxs = tl_xs + tl_centripetal_shift[..., 0]
|
955 |
-
tl_ctys = tl_ys + tl_centripetal_shift[..., 1]
|
956 |
-
br_ctxs = br_xs - br_centripetal_shift[..., 0]
|
957 |
-
br_ctys = br_ys - br_centripetal_shift[..., 1]
|
958 |
-
|
959 |
-
# all possible boxes based on top k corners (ignoring class)
|
960 |
-
tl_xs *= (inp_w / width)
|
961 |
-
tl_ys *= (inp_h / height)
|
962 |
-
br_xs *= (inp_w / width)
|
963 |
-
br_ys *= (inp_h / height)
|
964 |
-
|
965 |
-
if with_centripetal_shift:
|
966 |
-
tl_ctxs *= (inp_w / width)
|
967 |
-
tl_ctys *= (inp_h / height)
|
968 |
-
br_ctxs *= (inp_w / width)
|
969 |
-
br_ctys *= (inp_h / height)
|
970 |
-
|
971 |
-
x_off = img_meta['border'][2]
|
972 |
-
y_off = img_meta['border'][0]
|
973 |
-
|
974 |
-
tl_xs -= x_off
|
975 |
-
tl_ys -= y_off
|
976 |
-
br_xs -= x_off
|
977 |
-
br_ys -= y_off
|
978 |
-
|
979 |
-
tl_xs *= tl_xs.gt(0.0).type_as(tl_xs)
|
980 |
-
tl_ys *= tl_ys.gt(0.0).type_as(tl_ys)
|
981 |
-
br_xs *= br_xs.gt(0.0).type_as(br_xs)
|
982 |
-
br_ys *= br_ys.gt(0.0).type_as(br_ys)
|
983 |
-
|
984 |
-
bboxes = torch.stack((tl_xs, tl_ys, br_xs, br_ys), dim=3)
|
985 |
-
area_bboxes = ((br_xs - tl_xs) * (br_ys - tl_ys)).abs()
|
986 |
-
|
987 |
-
if with_centripetal_shift:
|
988 |
-
tl_ctxs -= x_off
|
989 |
-
tl_ctys -= y_off
|
990 |
-
br_ctxs -= x_off
|
991 |
-
br_ctys -= y_off
|
992 |
-
|
993 |
-
tl_ctxs *= tl_ctxs.gt(0.0).type_as(tl_ctxs)
|
994 |
-
tl_ctys *= tl_ctys.gt(0.0).type_as(tl_ctys)
|
995 |
-
br_ctxs *= br_ctxs.gt(0.0).type_as(br_ctxs)
|
996 |
-
br_ctys *= br_ctys.gt(0.0).type_as(br_ctys)
|
997 |
-
|
998 |
-
ct_bboxes = torch.stack((tl_ctxs, tl_ctys, br_ctxs, br_ctys),
|
999 |
-
dim=3)
|
1000 |
-
area_ct_bboxes = ((br_ctxs - tl_ctxs) * (br_ctys - tl_ctys)).abs()
|
1001 |
-
|
1002 |
-
rcentral = torch.zeros_like(ct_bboxes)
|
1003 |
-
# magic nums from paper section 4.1
|
1004 |
-
mu = torch.ones_like(area_bboxes) / 2.4
|
1005 |
-
mu[area_bboxes > 3500] = 1 / 2.1 # large bbox have smaller mu
|
1006 |
-
|
1007 |
-
bboxes_center_x = (bboxes[..., 0] + bboxes[..., 2]) / 2
|
1008 |
-
bboxes_center_y = (bboxes[..., 1] + bboxes[..., 3]) / 2
|
1009 |
-
rcentral[..., 0] = bboxes_center_x - mu * (bboxes[..., 2] -
|
1010 |
-
bboxes[..., 0]) / 2
|
1011 |
-
rcentral[..., 1] = bboxes_center_y - mu * (bboxes[..., 3] -
|
1012 |
-
bboxes[..., 1]) / 2
|
1013 |
-
rcentral[..., 2] = bboxes_center_x + mu * (bboxes[..., 2] -
|
1014 |
-
bboxes[..., 0]) / 2
|
1015 |
-
rcentral[..., 3] = bboxes_center_y + mu * (bboxes[..., 3] -
|
1016 |
-
bboxes[..., 1]) / 2
|
1017 |
-
area_rcentral = ((rcentral[..., 2] - rcentral[..., 0]) *
|
1018 |
-
(rcentral[..., 3] - rcentral[..., 1])).abs()
|
1019 |
-
dists = area_ct_bboxes / area_rcentral
|
1020 |
-
|
1021 |
-
tl_ctx_inds = (ct_bboxes[..., 0] <= rcentral[..., 0]) | (
|
1022 |
-
ct_bboxes[..., 0] >= rcentral[..., 2])
|
1023 |
-
tl_cty_inds = (ct_bboxes[..., 1] <= rcentral[..., 1]) | (
|
1024 |
-
ct_bboxes[..., 1] >= rcentral[..., 3])
|
1025 |
-
br_ctx_inds = (ct_bboxes[..., 2] <= rcentral[..., 0]) | (
|
1026 |
-
ct_bboxes[..., 2] >= rcentral[..., 2])
|
1027 |
-
br_cty_inds = (ct_bboxes[..., 3] <= rcentral[..., 1]) | (
|
1028 |
-
ct_bboxes[..., 3] >= rcentral[..., 3])
|
1029 |
-
|
1030 |
-
if with_embedding:
|
1031 |
-
tl_emb = self._transpose_and_gather_feat(tl_emb, tl_inds)
|
1032 |
-
tl_emb = tl_emb.view(batch, k, 1)
|
1033 |
-
br_emb = self._transpose_and_gather_feat(br_emb, br_inds)
|
1034 |
-
br_emb = br_emb.view(batch, 1, k)
|
1035 |
-
dists = torch.abs(tl_emb - br_emb)
|
1036 |
-
|
1037 |
-
tl_scores = tl_scores.view(batch, k, 1).repeat(1, 1, k)
|
1038 |
-
br_scores = br_scores.view(batch, 1, k).repeat(1, k, 1)
|
1039 |
-
|
1040 |
-
scores = (tl_scores + br_scores) / 2 # scores for all possible boxes
|
1041 |
-
|
1042 |
-
# tl and br should have same class
|
1043 |
-
tl_clses = tl_clses.view(batch, k, 1).repeat(1, 1, k)
|
1044 |
-
br_clses = br_clses.view(batch, 1, k).repeat(1, k, 1)
|
1045 |
-
cls_inds = (tl_clses != br_clses)
|
1046 |
-
|
1047 |
-
# reject boxes based on distances
|
1048 |
-
dist_inds = dists > distance_threshold
|
1049 |
-
|
1050 |
-
# reject boxes based on widths and heights
|
1051 |
-
width_inds = (br_xs <= tl_xs)
|
1052 |
-
height_inds = (br_ys <= tl_ys)
|
1053 |
-
|
1054 |
-
scores[cls_inds] = -1
|
1055 |
-
scores[width_inds] = -1
|
1056 |
-
scores[height_inds] = -1
|
1057 |
-
scores[dist_inds] = -1
|
1058 |
-
if with_centripetal_shift:
|
1059 |
-
scores[tl_ctx_inds] = -1
|
1060 |
-
scores[tl_cty_inds] = -1
|
1061 |
-
scores[br_ctx_inds] = -1
|
1062 |
-
scores[br_cty_inds] = -1
|
1063 |
-
|
1064 |
-
scores = scores.view(batch, -1)
|
1065 |
-
scores, inds = torch.topk(scores, num_dets)
|
1066 |
-
scores = scores.unsqueeze(2)
|
1067 |
-
|
1068 |
-
bboxes = bboxes.view(batch, -1, 4)
|
1069 |
-
bboxes = self._gather_feat(bboxes, inds)
|
1070 |
-
|
1071 |
-
clses = tl_clses.contiguous().view(batch, -1, 1)
|
1072 |
-
clses = self._gather_feat(clses, inds).float()
|
1073 |
-
|
1074 |
-
return bboxes, scores, clses
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/drawings-to-human/Makefile
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
install-node:
|
2 |
-
./install-node.sh
|
3 |
-
build-client:
|
4 |
-
cd frontend && npm install && npm run build && rm -rf ../static && cp -r build/ ../static/
|
5 |
-
build-dev:
|
6 |
-
cd frontend && npm install && NODE_ENV=development npm run build && rm -rf ../static 2>&1 && cp -rv build/ ../static/
|
7 |
-
run-dev:
|
8 |
-
FLASK_DEBUG=development python app.py
|
9 |
-
run-prod:
|
10 |
-
python app.py
|
11 |
-
build-all: run-prod
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/drawings-to-human/frontend/src/types.ts
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
export interface Color {
|
2 |
-
color: number[];
|
3 |
-
label: string;
|
4 |
-
}
|
5 |
-
export type RGB = `rgb(${number},${number},${number})`;
|
6 |
-
export interface Brush {
|
7 |
-
color: RGB;
|
8 |
-
size: number;
|
9 |
-
label: string;
|
10 |
-
}
|
11 |
-
|
12 |
-
export interface Params {
|
13 |
-
texture: string;
|
14 |
-
seed: bigint;
|
15 |
-
steps: number;
|
16 |
-
}
|
17 |
-
|
18 |
-
export interface FormElements extends HTMLCollection {
|
19 |
-
seed: HTMLInputElement;
|
20 |
-
steps: HTMLInputElement;
|
21 |
-
texture0: HTMLInputElement;
|
22 |
-
texture1: HTMLInputElement;
|
23 |
-
texture2: HTMLInputElement;
|
24 |
-
}
|
25 |
-
interface Point {
|
26 |
-
x: number;
|
27 |
-
y: number;
|
28 |
-
}
|
29 |
-
interface pxPoint {
|
30 |
-
from: Point;
|
31 |
-
to: Point;
|
32 |
-
}
|
33 |
-
export interface DrawingLayer {
|
34 |
-
brush: Brush;
|
35 |
-
points: pxPoint[];
|
36 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Caoyunkang/Segment-Any-Anomaly/utils/training_utils.py
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
import random
|
2 |
-
import shutil
|
3 |
-
import time
|
4 |
-
import torch
|
5 |
-
# from torch.utils.tensorboard import SummaryWriter
|
6 |
-
|
7 |
-
from utils.visualization import *
|
8 |
-
from loguru import logger
|
9 |
-
|
10 |
-
# def get_tensorboard_logger_from_args(tensorboard_dir, reset_version=False):
|
11 |
-
# if reset_version:
|
12 |
-
# shutil.rmtree(os.path.join(tensorboard_dir))
|
13 |
-
# return SummaryWriter(log_dir=tensorboard_dir)
|
14 |
-
|
15 |
-
|
16 |
-
def get_optimizer_from_args(model, lr, weight_decay, **kwargs) -> torch.optim.Optimizer:
|
17 |
-
return torch.optim.AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=lr,
|
18 |
-
weight_decay=weight_decay)
|
19 |
-
|
20 |
-
|
21 |
-
def get_lr_schedule(optimizer):
|
22 |
-
return torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.95)
|
23 |
-
|
24 |
-
|
25 |
-
def setup_seed(seed):
|
26 |
-
torch.manual_seed(seed)
|
27 |
-
torch.cuda.manual_seed_all(seed)
|
28 |
-
np.random.seed(seed)
|
29 |
-
random.seed(seed)
|
30 |
-
torch.backends.cudnn.deterministic = True
|
31 |
-
|
32 |
-
|
33 |
-
def get_dir_from_args(root_dir, class_name, **kwargs):
|
34 |
-
|
35 |
-
exp_name = f"{kwargs['dataset']}-k-{kwargs['k_shot']}"
|
36 |
-
|
37 |
-
csv_dir = os.path.join(root_dir, 'csv')
|
38 |
-
csv_path = os.path.join(csv_dir, f"{exp_name}-indx-{kwargs['experiment_indx']}.csv")
|
39 |
-
|
40 |
-
model_dir = os.path.join(root_dir, exp_name, 'models')
|
41 |
-
img_dir = os.path.join(root_dir, exp_name, 'imgs')
|
42 |
-
|
43 |
-
logger_dir = os.path.join(root_dir, exp_name, 'logger', class_name)
|
44 |
-
|
45 |
-
log_file_name = os.path.join(logger_dir,
|
46 |
-
f'log_{time.strftime("%Y-%m-%d-%H-%I-%S", time.localtime(time.time()))}.log')
|
47 |
-
|
48 |
-
model_name = f'{class_name}'
|
49 |
-
|
50 |
-
os.makedirs(model_dir, exist_ok=True)
|
51 |
-
os.makedirs(img_dir, exist_ok=True)
|
52 |
-
os.makedirs(logger_dir, exist_ok=True)
|
53 |
-
os.makedirs(csv_dir, exist_ok=True)
|
54 |
-
|
55 |
-
logger.start(log_file_name)
|
56 |
-
|
57 |
-
logger.info(f"===> Root dir for this experiment: {logger_dir}")
|
58 |
-
|
59 |
-
return model_dir, img_dir, logger_dir, model_name, csv_path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CarlDennis/HYTTS/text/sanskrit.py
DELETED
@@ -1,62 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
from indic_transliteration import sanscript
|
3 |
-
|
4 |
-
|
5 |
-
# List of (iast, ipa) pairs:
|
6 |
-
_iast_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
|
7 |
-
('a', 'ə'),
|
8 |
-
('ā', 'aː'),
|
9 |
-
('ī', 'iː'),
|
10 |
-
('ū', 'uː'),
|
11 |
-
('ṛ', 'ɹ`'),
|
12 |
-
('ṝ', 'ɹ`ː'),
|
13 |
-
('ḷ', 'l`'),
|
14 |
-
('ḹ', 'l`ː'),
|
15 |
-
('e', 'eː'),
|
16 |
-
('o', 'oː'),
|
17 |
-
('k', 'k⁼'),
|
18 |
-
('k⁼h', 'kʰ'),
|
19 |
-
('g', 'g⁼'),
|
20 |
-
('g⁼h', 'gʰ'),
|
21 |
-
('ṅ', 'ŋ'),
|
22 |
-
('c', 'ʧ⁼'),
|
23 |
-
('ʧ⁼h', 'ʧʰ'),
|
24 |
-
('j', 'ʥ⁼'),
|
25 |
-
('ʥ⁼h', 'ʥʰ'),
|
26 |
-
('ñ', 'n^'),
|
27 |
-
('ṭ', 't`⁼'),
|
28 |
-
('t`⁼h', 't`ʰ'),
|
29 |
-
('ḍ', 'd`⁼'),
|
30 |
-
('d`⁼h', 'd`ʰ'),
|
31 |
-
('ṇ', 'n`'),
|
32 |
-
('t', 't⁼'),
|
33 |
-
('t⁼h', 'tʰ'),
|
34 |
-
('d', 'd⁼'),
|
35 |
-
('d⁼h', 'dʰ'),
|
36 |
-
('p', 'p⁼'),
|
37 |
-
('p⁼h', 'pʰ'),
|
38 |
-
('b', 'b⁼'),
|
39 |
-
('b⁼h', 'bʰ'),
|
40 |
-
('y', 'j'),
|
41 |
-
('ś', 'ʃ'),
|
42 |
-
('ṣ', 's`'),
|
43 |
-
('r', 'ɾ'),
|
44 |
-
('l̤', 'l`'),
|
45 |
-
('h', 'ɦ'),
|
46 |
-
("'", ''),
|
47 |
-
('~', '^'),
|
48 |
-
('ṃ', '^')
|
49 |
-
]]
|
50 |
-
|
51 |
-
|
52 |
-
def devanagari_to_ipa(text):
|
53 |
-
text = text.replace('ॐ', 'ओम्')
|
54 |
-
text = re.sub(r'\s*।\s*$', '.', text)
|
55 |
-
text = re.sub(r'\s*।\s*', ', ', text)
|
56 |
-
text = re.sub(r'\s*॥', '.', text)
|
57 |
-
text = sanscript.transliterate(text, sanscript.DEVANAGARI, sanscript.IAST)
|
58 |
-
for regex, replacement in _iast_to_ipa:
|
59 |
-
text = re.sub(regex, replacement, text)
|
60 |
-
text = re.sub('(.)[`ː]*ḥ', lambda x: x.group(0)
|
61 |
-
[:-1]+'h'+x.group(1)+'*', text)
|
62 |
-
return text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChristopherMarais/Andrew_Alpha/Ambrosia.py
DELETED
@@ -1,296 +0,0 @@
|
|
1 |
-
# CLASS:
|
2 |
-
# pre_process_image
|
3 |
-
# METHODS:
|
4 |
-
# __init__
|
5 |
-
# INPUT:
|
6 |
-
# image_dir = (str) a full path to an image with multiple beetles and possibly a size reference circle
|
7 |
-
# manual_thresh_buffer (float) {optional} this is a manual way to control the binarizxing threshold.
|
8 |
-
# use this when beetles are broken up into multiple images\
|
9 |
-
# inputs should range from -1 to 1. higehr vlaues include lighter colors into the blobs and lower values reduce blob size
|
10 |
-
# OUTPUT(ATTRIBUTES):
|
11 |
-
# image_dir = (str) the same directory as is given as an input to the iamge that is being processed
|
12 |
-
# image = (np.array) the original compound image
|
13 |
-
# grey_image = (np.array) the original compound image in greyscale
|
14 |
-
# bw_image = (np.array) the original image in binary black and white
|
15 |
-
# inv_bw_image = (np.array) the original image inverted black and white binary
|
16 |
-
# clear_inv_bw_image = (np.array) the inverted black and white binary original image with all components touching the border removed
|
17 |
-
# segment
|
18 |
-
# INPUT:
|
19 |
-
# cluster_num = (int) {default=2} the number of clusters used for kmeans to pick only the cluster with alrgest blobs
|
20 |
-
# image_edge_buffer = (int) {default=50} number of pixels to add to box borders
|
21 |
-
# OUTPUT(ATTRIBUTES):
|
22 |
-
# cluster_num = (int) the same as the input
|
23 |
-
# image_edge_buffer = (int) the same as the input
|
24 |
-
# labeled_image = (np.array) the original compound image that is labelled
|
25 |
-
# max_kmeans_label = (int) the label of the cluster with the largest object/blob
|
26 |
-
# image_selected_df = (pd.DataFrame) a dataframe with columns describing each segmented image:
|
27 |
-
# 'centroid' = centre of the image
|
28 |
-
# 'bbox-0' = border 0
|
29 |
-
# 'bbox-1' = border 1
|
30 |
-
# 'bbox-2' = border 2
|
31 |
-
# 'bbox-3' = border 3
|
32 |
-
# 'orientation' = angle of image segment
|
33 |
-
# 'axis_major_length'
|
34 |
-
# 'axis_minor_length'
|
35 |
-
# 'area'
|
36 |
-
# 'area_filled'
|
37 |
-
# image_properties_df = (pd.DataFrame) similar to the image_selected_df, but inlcudes all the artefacts that are picked up
|
38 |
-
# col_image_lst = (list) a list with all the segmented images in color
|
39 |
-
# inv_bw_image_lst = (list) a list with all the segmented images in inverted binary black and white
|
40 |
-
# image_segment_count = (int) number of segmented images extracted from the compound image
|
41 |
-
# detect_outlier
|
42 |
-
# INPUT:
|
43 |
-
# None
|
44 |
-
# OUTPUT(ATTRIBUTES):
|
45 |
-
# image_array = (np.array) an array of the list of color segemented images (number of images, (R,G,B))
|
46 |
-
# r_ar_lst = (list) a list of arrays with flattened images red values
|
47 |
-
# g_ar_lst = (list) a list of arrays with flattened images green values
|
48 |
-
# b_ar_lst = (list) a list of arrays with flattened images blue values
|
49 |
-
# all_ar_lst = (list) a list of arrays with flattened images all red, green, and blue values
|
50 |
-
# px_dens_dist = (np.array) frequency distribution at 0-255 of all the values for each pixel
|
51 |
-
# corr_coef = (np.array) a square array of length equal to the number of segmented images showing the spearman correlation bewteen images
|
52 |
-
# corr_pval = (np.array) the pvalues associatedwith each correlation
|
53 |
-
# corr_coef_sum = (np.array) the sum of the correlations across each iamge compared to all others
|
54 |
-
# outlier_idx = (int) the index of the image with the lowest spearman correlation sum
|
55 |
-
# outlier_val = (float) the lowest sum correlation value
|
56 |
-
# outlier_col_image = (np.array) the color image of what is detected as the outlier
|
57 |
-
# outlier_inv_bw_image = (np.array) the inverted black on white image of the outlier segmented image
|
58 |
-
# outlier_bw_image = (np.array) the white on black image of the outlier segmented image
|
59 |
-
# image_selected_df = (pd.DataFrame) an updated dataframe that contains the circle identification data
|
60 |
-
# estimate_size
|
61 |
-
# INPUT:
|
62 |
-
# known_radius = (int) {default=1} the radius of the reference circle (shoudl be approximately the same size as the specimens to work best)
|
63 |
-
# canny_sigma = (int) {default=5} this describes how strict the cleaning border is for identifying the circle to place over the reference circle
|
64 |
-
# outlier_idx = (int) {default should be self.outlier_idx} change this when the circle is falsely detected
|
65 |
-
# OUTPUT(ATTRIBUTES):
|
66 |
-
# outlier_bw_image = (np.array) an updated version of the outlier iamge with a clean circle clear of artifacts
|
67 |
-
# outlier_idx = (int) same as the input
|
68 |
-
# clean_inv_bw_image_lst = (list) a list of cleaned white on black images no blobs touching hte border
|
69 |
-
# image_selected_df = (pd.DataFrame) an update to the dataframe of metadata containing pixel counts and relative area in mm^2 of all segmented images
|
70 |
-
# *black and white is white on black
|
71 |
-
|
72 |
-
# import requirements
|
73 |
-
import os
|
74 |
-
os.environ["OMP_NUM_THREADS"] = '1' #use this line on windows machines to avoid memory leaks
|
75 |
-
import numpy as np
|
76 |
-
import pandas as pd
|
77 |
-
from math import ceil
|
78 |
-
from skimage import io
|
79 |
-
from skimage.filters import threshold_otsu
|
80 |
-
from skimage.color import rgb2gray
|
81 |
-
from skimage.segmentation import clear_border
|
82 |
-
from skimage.measure import label, regionprops_table
|
83 |
-
from skimage.transform import hough_circle, hough_circle_peaks
|
84 |
-
from skimage.feature import canny
|
85 |
-
from skimage.draw import disk
|
86 |
-
from sklearn.cluster import KMeans
|
87 |
-
from scipy.stats import spearmanr
|
88 |
-
|
89 |
-
class pre_process_image:
|
90 |
-
# initialize image to be segmented from path
|
91 |
-
def __init__(self, image=None, image_dir=None, manual_thresh_buffer=0):
|
92 |
-
if image_dir is not None:
|
93 |
-
self.image_dir = image_dir.replace('\\','/') # full directory path to image
|
94 |
-
self.image = io.imread(image_dir) # read image from directory
|
95 |
-
elif image is not None:
|
96 |
-
self.image = image
|
97 |
-
else:
|
98 |
-
print("No image given to function")
|
99 |
-
self.grey_image = rgb2gray(self.image) #convert image to greyscale
|
100 |
-
self.bw_image = self.grey_image > threshold_otsu(self.grey_image) + manual_thresh_buffer # binarize image to be black & white
|
101 |
-
self.inv_bw_image = np.invert(self.bw_image) # invert black and white image
|
102 |
-
self.clear_inv_bw_image = clear_border(self.inv_bw_image) # remove anything touching image border
|
103 |
-
|
104 |
-
# segment the image into smaller images
|
105 |
-
def segment(self, cluster_num=2, image_edge_buffer=50):
|
106 |
-
self.cluster_num = cluster_num
|
107 |
-
self.image_edge_buffer = image_edge_buffer
|
108 |
-
self.labeled_image = label(self.clear_inv_bw_image) #label image
|
109 |
-
image_properties_df = pd.DataFrame( # get the properties of each image used to segment blobs in image
|
110 |
-
regionprops_table(
|
111 |
-
self.labeled_image,
|
112 |
-
properties=('centroid',
|
113 |
-
'bbox',
|
114 |
-
'orientation',
|
115 |
-
'axis_major_length',
|
116 |
-
'axis_minor_length',
|
117 |
-
'area',
|
118 |
-
'area_filled')
|
119 |
-
)
|
120 |
-
)
|
121 |
-
# cluster boxes of blobs by size
|
122 |
-
kmean_result = KMeans(n_clusters=cluster_num, n_init='auto').fit(
|
123 |
-
np.array(
|
124 |
-
image_properties_df[['axis_major_length', 'axis_minor_length']]
|
125 |
-
)
|
126 |
-
)
|
127 |
-
image_properties_df['kmeans_label'] = kmean_result.labels_
|
128 |
-
# keep only the largest cluster (ball bearing needs to be a similar size as the beetles)
|
129 |
-
self.max_kmeans_label = int(image_properties_df.kmeans_label[image_properties_df['area'] == image_properties_df['area'].max()])
|
130 |
-
image_selected_df = image_properties_df[image_properties_df['kmeans_label']==self.max_kmeans_label]
|
131 |
-
self.image_properties_df = image_properties_df
|
132 |
-
# enlarge the boxes around blobs with buffer
|
133 |
-
coord_df = image_selected_df.loc[:,['bbox-0','bbox-1','bbox-2','bbox-3']].copy()
|
134 |
-
coord_df = coord_df.reset_index(drop = True)
|
135 |
-
image_selected_df = image_selected_df.reset_index(drop = True)
|
136 |
-
coord_df.loc[:,['bbox-0','bbox-1']] = coord_df.loc[:,['bbox-0','bbox-1']]-self.image_edge_buffer
|
137 |
-
coord_df.loc[:,['bbox-2','bbox-3']] = coord_df.loc[:,['bbox-2','bbox-3']]+self.image_edge_buffer
|
138 |
-
image_selected_df.loc[:,['bbox-0','bbox-1','bbox-2','bbox-3']] = coord_df.loc[:,['bbox-0','bbox-1','bbox-2','bbox-3']]
|
139 |
-
# limit boundaries to the initial image size without this the iamge size bugs out when the boundaries are negative and it removes the image
|
140 |
-
mask = image_selected_df[['bbox-0','bbox-1','bbox-2','bbox-3']]>=0
|
141 |
-
image_selected_df[['bbox-0','bbox-1','bbox-2','bbox-3']] = image_selected_df[['bbox-0','bbox-1','bbox-2','bbox-3']].where(mask, other=0)
|
142 |
-
self.image_selected_df = image_selected_df
|
143 |
-
# crop blobs from image based on box sizes and add to list
|
144 |
-
col_image_lst = []
|
145 |
-
inv_bw_image_lst = []
|
146 |
-
for i in range(len(image_selected_df)):
|
147 |
-
coord_i = image_selected_df.iloc[i]
|
148 |
-
# color images
|
149 |
-
crop_img = self.image[int(coord_i['bbox-0']):int(coord_i['bbox-2']), int(coord_i['bbox-1']):int(coord_i['bbox-3'])]
|
150 |
-
col_image_lst.append(crop_img)
|
151 |
-
# inverted black and white images
|
152 |
-
crop_bw_img = self.inv_bw_image[int(coord_i['bbox-0']):int(coord_i['bbox-2']), int(coord_i['bbox-1']):int(coord_i['bbox-3'])]
|
153 |
-
inv_bw_image_lst.append(crop_bw_img)
|
154 |
-
|
155 |
-
#clear all images that are empty
|
156 |
-
# col_image_lst = [x for x in col_image_lst if x.shape[0] != 0]
|
157 |
-
# inv_bw_image_lst = [x for x in inv_bw_image_lst if x.shape[0] != 0]
|
158 |
-
|
159 |
-
self.col_image_lst = col_image_lst
|
160 |
-
self.inv_bw_image_lst = inv_bw_image_lst
|
161 |
-
self.image_segment_count = len(col_image_lst)
|
162 |
-
|
163 |
-
def detect_outlier(self):
|
164 |
-
# convert list to numpy array
|
165 |
-
self.image_array = np.copy(np.array(self.col_image_lst, dtype='object'))
|
166 |
-
# initialize lists to store data in
|
167 |
-
r_ar_lst = []
|
168 |
-
g_ar_lst = []
|
169 |
-
b_ar_lst = []
|
170 |
-
all_ar_lst = []
|
171 |
-
for l in range(self.image_segment_count):
|
172 |
-
# flatten arrays
|
173 |
-
img_var = self.image_array[l]
|
174 |
-
r_ar = img_var[:,:,0].flatten() # red
|
175 |
-
g_ar = img_var[:,:,1].flatten() # green
|
176 |
-
b_ar = img_var[:,:,2].flatten() # blue
|
177 |
-
all_ar = img_var.flatten() # all
|
178 |
-
# collect data in lists
|
179 |
-
r_ar_lst.append(r_ar)
|
180 |
-
g_ar_lst.append(g_ar)
|
181 |
-
b_ar_lst.append(b_ar)
|
182 |
-
all_ar_lst.append(all_ar)
|
183 |
-
self.r_ar_lst = r_ar_lst
|
184 |
-
self.g_ar_lst = g_ar_lst
|
185 |
-
self.b_ar_lst = b_ar_lst
|
186 |
-
self.all_ar_lst = all_ar_lst
|
187 |
-
# get frequency of values at each rgb value(0-255)
|
188 |
-
values_array = all_ar_lst # use all, but can use any color
|
189 |
-
temp_dist_ar = np.zeros(shape=(255, self.image_segment_count))
|
190 |
-
for i in range(self.image_segment_count):
|
191 |
-
unique, counts = np.unique(values_array[i], return_counts=True)
|
192 |
-
temp_dict = dict(zip(unique, counts))
|
193 |
-
for j in temp_dict.keys():
|
194 |
-
temp_dist_ar[j-1][i] = temp_dict[j]
|
195 |
-
self.px_dens_dist = temp_dist_ar
|
196 |
-
# calculate the spearman correlation of distributions between images
|
197 |
-
# use spearman because it is a non-parametric measures
|
198 |
-
# use the sum of the correlation coefficients to identify the outlier image
|
199 |
-
corr_ar = np.array(spearmanr(temp_dist_ar, axis=0))
|
200 |
-
corr_coef_ar = corr_ar[0,:,:]
|
201 |
-
corr_pval_ar = corr_ar[1,:,:]
|
202 |
-
corr_sum_ar = corr_coef_ar.sum(axis=0)
|
203 |
-
self.corr_coef = corr_coef_ar
|
204 |
-
self.corr_pval = corr_pval_ar
|
205 |
-
self.corr_coef_sum = corr_sum_ar
|
206 |
-
self.outlier_idx = corr_sum_ar.argmin()
|
207 |
-
self.outlier_val = corr_sum_ar.min()
|
208 |
-
self.outlier_col_image = self.col_image_lst[self.outlier_idx]
|
209 |
-
self.outlier_inv_bw_image = self.inv_bw_image_lst[self.outlier_idx]
|
210 |
-
self.outlier_bw_image = np.invert(self.outlier_inv_bw_image)
|
211 |
-
# update metadata dataframe
|
212 |
-
self.image_selected_df['circle_class'] = 'non_circle'
|
213 |
-
self.image_selected_df.loc[self.outlier_idx, 'circle_class'] = 'circle'
|
214 |
-
|
215 |
-
def estimate_size(self, outlier_idx, known_radius=1, canny_sigma=5):
|
216 |
-
for i in range(len(self.corr_coef_sum)):
|
217 |
-
# add appropriate data to dataframe when circle not detected at all
|
218 |
-
if i == (len(self.corr_coef_sum)-1):
|
219 |
-
self.outlier_idx = None
|
220 |
-
self.outlier_val = None
|
221 |
-
self.outlier_col_image = None
|
222 |
-
self.outlier_inv_bw_image = None
|
223 |
-
self.outlier_bw_image = None
|
224 |
-
# update metadata dataframe
|
225 |
-
self.image_selected_df['circle_class'] = 'non_circle'
|
226 |
-
self.image_selected_df['real_area'] = 0
|
227 |
-
clean_inv_bw_image_lst = []
|
228 |
-
for inv_bw_image in self.inv_bw_image_lst:
|
229 |
-
# bw_image = np.invert(inv_bw_image)
|
230 |
-
clean_inv_bw_image = clear_border(inv_bw_image)
|
231 |
-
clean_inv_bw_image_lst.append(clean_inv_bw_image)
|
232 |
-
px_count_lst = []
|
233 |
-
for bw_img in clean_inv_bw_image_lst:
|
234 |
-
unique_px_count = np.unique(bw_img, return_counts=True)
|
235 |
-
px_dict = dict(zip(list(unique_px_count[0]), list(unique_px_count[1])))
|
236 |
-
if len(px_dict) == 1:
|
237 |
-
px_count = 0
|
238 |
-
else:
|
239 |
-
px_count = px_dict[True]
|
240 |
-
px_count_lst.append(px_count)
|
241 |
-
self.image_selected_df['pixel_count'] = px_count_lst
|
242 |
-
print("Circle could not be found: "+str(self.image_dir))
|
243 |
-
else:
|
244 |
-
try:
|
245 |
-
self.outlier_idx = np.argsort(self.corr_coef_sum)[i]
|
246 |
-
self.outlier_val = np.sort(self.corr_coef_sum)[i]
|
247 |
-
self.outlier_col_image = self.col_image_lst[self.outlier_idx]
|
248 |
-
self.outlier_inv_bw_image = self.inv_bw_image_lst[self.outlier_idx]
|
249 |
-
self.outlier_bw_image = np.invert(self.outlier_inv_bw_image)
|
250 |
-
# update metadata dataframe
|
251 |
-
self.image_selected_df['circle_class'] = 'non_circle'
|
252 |
-
self.image_selected_df.loc[self.outlier_idx, 'circle_class'] = 'circle'
|
253 |
-
outlier_inv_bw_image = np.invert(self.outlier_bw_image)
|
254 |
-
# remove the border touching blobs of all b&w images
|
255 |
-
clean_inv_bw_image_lst = []
|
256 |
-
for inv_bw_image in self.inv_bw_image_lst:
|
257 |
-
# bw_image = np.invert(inv_bw_image)
|
258 |
-
clean_inv_bw_image = clear_border(inv_bw_image)
|
259 |
-
clean_inv_bw_image_lst.append(clean_inv_bw_image)
|
260 |
-
# default is the image detected with detect_outlier
|
261 |
-
# change outlier_bw_image if this is not the ball bearing
|
262 |
-
edges = canny(self.outlier_bw_image, sigma=canny_sigma)
|
263 |
-
# Detect radius
|
264 |
-
max_r = int((max(outlier_inv_bw_image.shape)/2) + (self.image_edge_buffer/2)) # max radius
|
265 |
-
min_r = int((max_r-self.image_edge_buffer) - (self.image_edge_buffer/2)) # min radius
|
266 |
-
hough_radii = np.arange(min_r, max_r, 10)
|
267 |
-
hough_res = hough_circle(edges, hough_radii)
|
268 |
-
# Select the most prominent circle
|
269 |
-
accums, cx, cy, radii = hough_circle_peaks(hough_res, hough_radii, total_num_peaks=1)
|
270 |
-
circy, circx = disk((cy[0], cx[0]), radii[0])
|
271 |
-
# change the outlier image to fill in the circle
|
272 |
-
outlier_inv_bw_image[circy, circx] = True # this index error occurs when the outlier object circle does not fit into the image
|
273 |
-
|
274 |
-
self.outlier_inv_bw_image = clear_border(outlier_inv_bw_image)
|
275 |
-
clean_inv_bw_image_lst[self.outlier_idx] = self.outlier_inv_bw_image
|
276 |
-
self.clean_inv_bw_image_lst = clean_inv_bw_image_lst
|
277 |
-
# get the area of the ball bearing based on the known radius
|
278 |
-
circle_area = np.pi*(known_radius**2)
|
279 |
-
px_count_lst = []
|
280 |
-
for bw_img in clean_inv_bw_image_lst:
|
281 |
-
px_count = np.unique(bw_img, return_counts=True)[1][1] # this index error occurs when the outlier object touches the edge of the image (forces recalculation of outlier)
|
282 |
-
px_count_lst.append(px_count)
|
283 |
-
self.image_selected_df['pixel_count'] = px_count_lst
|
284 |
-
circle_px_count = px_count_lst[self.outlier_idx]
|
285 |
-
area_ar = (np.array(px_count_lst)/circle_px_count)*circle_area
|
286 |
-
self.image_selected_df['real_area'] = area_ar
|
287 |
-
|
288 |
-
break
|
289 |
-
|
290 |
-
except IndexError:
|
291 |
-
print('Updating circle classification for image: '+ str(self.image_dir))
|
292 |
-
|
293 |
-
else:
|
294 |
-
print("No circle was found to estimate beetle size")
|
295 |
-
|
296 |
-
# add a section at line 219 that labels all area as 0 and all circle_class as non_circle when the least outlying object is considered.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/QQsign/Dockerfile
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
FROM openjdk:17-slim
|
2 |
-
|
3 |
-
# 设置时区
|
4 |
-
ENV TZ Asia/Shanghai
|
5 |
-
|
6 |
-
# 设置工作目录
|
7 |
-
WORKDIR /app
|
8 |
-
|
9 |
-
# 复制文件到工作目录
|
10 |
-
COPY bin /app/bin
|
11 |
-
COPY lib /app/lib
|
12 |
-
COPY txlib /app/txlib
|
13 |
-
|
14 |
-
# 设置命令
|
15 |
-
RUN chmod -R 777 /tmp
|
16 |
-
RUN chmod -R 777 /app
|
17 |
-
RUN sed 's/"key": ".*"/"key": "'"$KEY_VALUE"'"/' txlib/$TXLIB_VERSION/config.json > /app/txlib/$TXLIB_VERSION/config.json
|
18 |
-
|
19 |
-
# 运行
|
20 |
-
CMD bash bin/unidbg-fetch-qsign --basePath=txlib/$TXLIB_VERSION
|
21 |
-
|
22 |
-
# 暴露端口
|
23 |
-
EXPOSE 7860
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CofAI/urlcut/style.css
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
body {
|
2 |
-
padding: 2rem;
|
3 |
-
font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
|
4 |
-
}
|
5 |
-
|
6 |
-
h1 {
|
7 |
-
font-size: 16px;
|
8 |
-
margin-top: 0;
|
9 |
-
}
|
10 |
-
|
11 |
-
p {
|
12 |
-
color: rgb(107, 114, 128);
|
13 |
-
font-size: 15px;
|
14 |
-
margin-bottom: 10px;
|
15 |
-
margin-top: 5px;
|
16 |
-
}
|
17 |
-
|
18 |
-
.card {
|
19 |
-
max-width: 620px;
|
20 |
-
margin: 0 auto;
|
21 |
-
padding: 16px;
|
22 |
-
border: 1px solid lightgray;
|
23 |
-
border-radius: 16px;
|
24 |
-
}
|
25 |
-
|
26 |
-
.card p:last-child {
|
27 |
-
margin-bottom: 0;
|
28 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/app.py
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import openai
|
3 |
-
from dotenv import load_dotenv
|
4 |
-
import os
|
5 |
-
|
6 |
-
# Load the environment variables from the .env file
|
7 |
-
load_dotenv()
|
8 |
-
|
9 |
-
# Get the API key from the environment variable
|
10 |
-
openai.api_key = os.getenv("OPENAI_API_KEY")
|
11 |
-
|
12 |
-
|
13 |
-
def predict(input):
|
14 |
-
response = openai.ChatCompletion.create(
|
15 |
-
model="gpt-4", messages=[{"role": "user", "content": input}]
|
16 |
-
)
|
17 |
-
return response.choices[0].message.content
|
18 |
-
|
19 |
-
|
20 |
-
demo = gr.Interface(fn=predict, inputs="text", outputs="text", title="GGAI LAB DEMO")
|
21 |
-
|
22 |
-
if __name__ == "__main__":
|
23 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|