Commit
·
77fa959
1
Parent(s):
5058543
Update parquet files (step 112 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/101-5/gpt4free/.github/ISSUE_TEMPLATE/feature_request.md +0 -20
- spaces/101-5/gpt4free/g4f/Provider/Providers/GetGpt.py +0 -57
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Arrival (English) dual audio hindi download Watch the sci-fi mystery thriller in HD.md +0 -113
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Celemony Melodyne Studio 3 - Full Crack Serial Key [TOP].md +0 -77
- spaces/1gistliPinn/ChatGPT4/Examples/Anonymox Premium Serial Key What Is It and How to Use It Safely.md +0 -8
- spaces/1gistliPinn/ChatGPT4/Examples/Coloring Game - Expansion Pack No. 1 Free Download [torrent Full] The Ultimate Guide to This Amazing Puzzle Game Expansion.md +0 -12
- spaces/1gistliPinn/ChatGPT4/Examples/Flobo Hard Disk Repair Full Crack 11.md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/Sonnox-Oxford-64-Bit-Mac-Crack.md +0 -104
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Among Us 3D A New Way to Play the Popular Game on PC and Mac.md +0 -136
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Clash of Warpath APK The Best Android Game for Strategy and Hero Cultivation.md +0 -115
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Alice in Borderland Season 1 in Hindi 480p 720p 1080p HD Netflix Series.md +0 -95
- spaces/1phancelerku/anime-remove-background/Azrbaycanda dron istehsalnn inkiaf yeni texnologiyalar v perspektivlr.md +0 -22
- spaces/1phancelerku/anime-remove-background/Download the Ultimate Domino Online APK and Challenge Yourself in Six Variants.md +0 -136
- spaces/1phancelerku/anime-remove-background/Downloading Audio Clips and Voice Recordings From Facebook Messenger.md +0 -130
- spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion/pipeline_fastdeploy_stable_diffusion_mega.py +0 -193
- spaces/2023Liu2023/bingo/src/components/theme-toggle.tsx +0 -31
- spaces/7eu7d7/anime-ai-detect-fucker/attack.py +0 -113
- spaces/A00001/bingothoo/Dockerfile +0 -36
- spaces/AI-Hobbyist/Hoyo-RVC/infer_uvr5.py +0 -363
- spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/losses_audio/contperceptual_dis.py +0 -137
- spaces/AIKey/TestStatic/README.md +0 -10
- spaces/ASJMO/freegpt/g4f/__init__.py +0 -39
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-210e_deepfashion2_shorts_256x192/__init__.py +0 -0
- spaces/AchyuthGamer/OpenGPT/server/backend.py +0 -188
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/swipe/Factory.js +0 -16
- spaces/AkitoP/umamusume_bert_vits2/text/english.py +0 -214
- spaces/Alex89912/ai-code-v1/README.md +0 -12
- spaces/AlexZou/Deploy_Restoration/Dehazing.py +0 -45
- spaces/Alfasign/remove-background-on-image/README.md +0 -13
- spaces/Amitesh007/elevenlabs-stt/app.py +0 -59
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/multi_subject_dreambooth/train_multi_subject_dreambooth.py +0 -1185
- spaces/Andy1621/uniformer_image_detection/configs/yolo/yolov3_d53_mstrain-416_273e_coco.py +0 -42
- spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr48_512x512_20k_voc12aug.py +0 -10
- spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr48_512x512_160k_ade20k.py +0 -39
- spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py +0 -6
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/__init__.py +0 -15
- spaces/Anonymous-sub/Rerender/ControlNet/gradio_scribble2image_interactive.py +0 -102
- spaces/ArtyomKhyan/Detection/utils/utils.py +0 -1200
- spaces/Audio-AGI/AudioSep/models/CLAP/training/__init__.py +0 -0
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/tutorials/deployment.md +0 -137
- spaces/Benson/text-generation/Examples/Apk Xplore File Manager.md +0 -83
- spaces/Benson/text-generation/Examples/Cmo Descargar La Tarjeta Aadhar En Lnea.md +0 -51
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/vcs/bazaar.py +0 -112
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tenacity/__init__.py +0 -608
- spaces/Borda90/Titanic_Esp/README.md +0 -13
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/__init__.py +0 -10
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/nms.py +0 -146
- spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/inner_product.h +0 -59
- spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/inner_product.h +0 -23
- spaces/CVPR/MonoScene/monoscene/app.py +0 -138
spaces/101-5/gpt4free/.github/ISSUE_TEMPLATE/feature_request.md
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
---
|
2 |
-
name: Feature request
|
3 |
-
about: Suggest an idea for this project
|
4 |
-
title: ''
|
5 |
-
labels: ''
|
6 |
-
assignees: ''
|
7 |
-
|
8 |
-
---
|
9 |
-
|
10 |
-
**Is your feature request related to a problem? Please describe.**
|
11 |
-
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
12 |
-
|
13 |
-
**Describe the solution you'd like**
|
14 |
-
A clear and concise description of what you want to happen.
|
15 |
-
|
16 |
-
**Describe alternatives you've considered**
|
17 |
-
A clear and concise description of any alternative solutions or features you've considered.
|
18 |
-
|
19 |
-
**Additional context**
|
20 |
-
Add any other context or screenshots about the feature request here.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/101-5/gpt4free/g4f/Provider/Providers/GetGpt.py
DELETED
@@ -1,57 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
-
import uuid
|
4 |
-
import requests
|
5 |
-
from Crypto.Cipher import AES
|
6 |
-
from ...typing import sha256, Dict, get_type_hints
|
7 |
-
|
8 |
-
url = 'https://chat.getgpt.world/'
|
9 |
-
model = ['gpt-3.5-turbo']
|
10 |
-
supports_stream = True
|
11 |
-
needs_auth = False
|
12 |
-
|
13 |
-
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
14 |
-
def encrypt(e):
|
15 |
-
t = os.urandom(8).hex().encode('utf-8')
|
16 |
-
n = os.urandom(8).hex().encode('utf-8')
|
17 |
-
r = e.encode('utf-8')
|
18 |
-
cipher = AES.new(t, AES.MODE_CBC, n)
|
19 |
-
ciphertext = cipher.encrypt(pad_data(r))
|
20 |
-
return ciphertext.hex() + t.decode('utf-8') + n.decode('utf-8')
|
21 |
-
|
22 |
-
def pad_data(data: bytes) -> bytes:
|
23 |
-
block_size = AES.block_size
|
24 |
-
padding_size = block_size - len(data) % block_size
|
25 |
-
padding = bytes([padding_size] * padding_size)
|
26 |
-
return data + padding
|
27 |
-
|
28 |
-
headers = {
|
29 |
-
'Content-Type': 'application/json',
|
30 |
-
'Referer': 'https://chat.getgpt.world/',
|
31 |
-
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
|
32 |
-
}
|
33 |
-
|
34 |
-
data = json.dumps({
|
35 |
-
'messages': messages,
|
36 |
-
'frequency_penalty': kwargs.get('frequency_penalty', 0),
|
37 |
-
'max_tokens': kwargs.get('max_tokens', 4000),
|
38 |
-
'model': 'gpt-3.5-turbo',
|
39 |
-
'presence_penalty': kwargs.get('presence_penalty', 0),
|
40 |
-
'temperature': kwargs.get('temperature', 1),
|
41 |
-
'top_p': kwargs.get('top_p', 1),
|
42 |
-
'stream': True,
|
43 |
-
'uuid': str(uuid.uuid4())
|
44 |
-
})
|
45 |
-
|
46 |
-
res = requests.post('https://chat.getgpt.world/api/chat/stream',
|
47 |
-
headers=headers, json={'signature': encrypt(data)}, stream=True)
|
48 |
-
|
49 |
-
for line in res.iter_lines():
|
50 |
-
if b'content' in line:
|
51 |
-
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
|
52 |
-
yield (line_json['choices'][0]['delta']['content'])
|
53 |
-
|
54 |
-
|
55 |
-
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
56 |
-
'(%s)' % ', '.join(
|
57 |
-
[f'{name}: {get_type_hints(_create_completion)[name].__name__}' for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Arrival (English) dual audio hindi download Watch the sci-fi mystery thriller in HD.md
DELETED
@@ -1,113 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Arrival (English) Dual Audio Hindi Download: How to Watch the Award-Winning Sci-Fi Film Online</h1>
|
3 |
-
<p>If you are a fan of science fiction films, you might have heard of <strong>Arrival</strong>, a 2016 film directed by Denis Villeneuve and starring Amy Adams, Jeremy Renner, and Forest Whitaker. The film was based on a short story by Ted Chiang and received critical acclaim and numerous awards, including an Oscar for Best Sound Editing. But what if you want to watch this film in dual audio, with both English and Hindi languages? In this article, we will tell you what Arrival is about, why it is worth watching, and how to download it in dual audio.</p>
|
4 |
-
<h2>Arrival (English) dual audio hindi download</h2><br /><p><b><b>Download</b> 🌟 <a href="https://byltly.com/2uKvj3">https://byltly.com/2uKvj3</a></b></p><br /><br />
|
5 |
-
<h2>Introduction</h2>
|
6 |
-
<h3>What is Arrival about?</h3>
|
7 |
-
<p>Arrival is a sci-fi drama film that tells the story of Louise Banks (Amy Adams), a linguist who is recruited by the US Army to communicate with alien lifeforms that have arrived on Earth in 12 mysterious spacecraft. Along with physicist Ian Donnelly (Jeremy Renner), she tries to decipher their language and understand their purpose before tensions escalate into a global war. As she learns more about the aliens, who are called heptapods, she also experiences flashbacks of her daughter Hannah, who died of an incurable disease.</p>
|
8 |
-
<h3>Why is Arrival worth watching?</h3>
|
9 |
-
<p>Arrival is not your typical alien invasion film. It is a thoughtful and intelligent exploration of communication, time, memory, and free will. It challenges the viewers to think about how we perceive reality and how we make choices that affect our lives. It also showcases the power of language and how it shapes our worldview. The film has a captivating plot that keeps you engaged and surprised until the end. It also has excellent performances from the cast, especially Amy Adams, who delivers a nuanced and emotional portrayal of Louise.</p>
|
10 |
-
<h3>How to download Arrival in dual audio (English and Hindi)?</h3>
|
11 |
-
<p>If you want to watch Arrival in dual audio, you have several options available online. One of them is to use Google Drive, where you can find a link to download the film in 1080p quality with both English and Hindi audio tracks. Another option is to use FilmyGod.UK, a website that offers high-quality Hindi dubbed movies for free. You can download Arrival in 480p, 720p, or 1080p quality with dual audio from this site. However, be aware that these sites may not be legal or safe to use, so proceed at your own risk.</p>
|
12 |
-
<h2>Review of Arrival</h2>
|
13 |
-
<h3>The plot</h3>
|
14 |
-
<p>The plot of Arrival is complex and intriguing, as it involves nonlinear storytelling and multiple timelines. The film uses flashbacks and flash-forwards to reveal Louise's past and future, as well as the nature of the heptapods' language and mission. The film also has a twist ending that changes everything you thought you knew about the story. The plot is well-written and executed, as it keeps you guessing and curious throughout the film. It also raises some philosophical questions about fate, determinism, and human agency.</p>
|
15 |
-
<h3>The characters</h3>
|
16 |
-
<p>The characters of Arrival are well-developed and relatable, as they have their own motivations and struggles. Louise is the protagonist of the film, who is haunted by the loss of her daughter and seeks to find meaning in her life through her work as a linguist. She is brave, compassionate, and curious, as she tries to understand the heptapods and their message. Ian is her partner in the project, who is a physicist and a mathematician. He is rational, analytical, and supportive, as he helps Louise with her research and develops feelings for her. Colonel Weber is their boss, Continuing the article: <h3>The themes</h3>
|
17 |
-
<p>Arrival explores various themes that are relevant and profound for the human condition. Some of the main themes are:</p>
|
18 |
-
<p>Arrival movie dual audio hindi english download<br />
|
19 |
-
Download Arrival 2016 in hindi english dual audio<br />
|
20 |
-
Arrival hindi dubbed english movie download<br />
|
21 |
-
How to download Arrival in dual audio hindi and english<br />
|
22 |
-
Arrival full movie in hindi and english download<br />
|
23 |
-
Download Arrival dual audio 720p hindi english<br />
|
24 |
-
Arrival dual audio 1080p hindi english download<br />
|
25 |
-
Arrival hindi english dual audio torrent download<br />
|
26 |
-
Arrival bluray dual audio hindi english download<br />
|
27 |
-
Arrival dual audio hdrip hindi english download<br />
|
28 |
-
Download Arrival dvdrip dual audio hindi and english<br />
|
29 |
-
Arrival web-dl dual audio hindi english download<br />
|
30 |
-
Arrival brrip dual audio hindi english download<br />
|
31 |
-
Download Arrival x264 dual audio hindi and english<br />
|
32 |
-
Arrival xvid dual audio hindi english download<br />
|
33 |
-
Download Arrival hevc dual audio hindi and english<br />
|
34 |
-
Arrival h264 dual audio hindi english download<br />
|
35 |
-
Download Arrival mkv dual audio hindi and english<br />
|
36 |
-
Arrival mp4 dual audio hindi english download<br />
|
37 |
-
Download Arrival avi dual audio hindi and english<br />
|
38 |
-
Arrival ac3 dual audio hindi english download<br />
|
39 |
-
Download Arrival aac dual audio hindi and english<br />
|
40 |
-
Arrival dts dual audio hindi english download<br />
|
41 |
-
Download Arrival 5.1ch dual audio hindi and english<br />
|
42 |
-
Arrival 7.1ch dual audio hindi english download<br />
|
43 |
-
Download Arrival 2.0ch dual audio hindi and english<br />
|
44 |
-
Arrival 3.0ch dual audio hindi english download<br />
|
45 |
-
Download Arrival 4.0ch dual audio hindi and english<br />
|
46 |
-
Arrival subtitles dual audio hindi english download<br />
|
47 |
-
Download Arrival srt dual audio hindi and english<br />
|
48 |
-
Arrival subbed dual audio hindi english download<br />
|
49 |
-
Download Arrival dubbed dual audio hindi and english<br />
|
50 |
-
Arrival original language dual audio hindi english download<br />
|
51 |
-
Download Arrival original sound track dual audio hindi and english<br />
|
52 |
-
Arrival director's cut dual audio hindi english download<br />
|
53 |
-
Download Arrival extended edition dual audio hindi and english<br />
|
54 |
-
Arrival unrated version dual audio hindi english download<br />
|
55 |
-
Download Arrival theatrical release dual audio hindi and english<br />
|
56 |
-
Arrival special features dual audio hindi english download<br />
|
57 |
-
Download Arrival bonus content dual audio hindi and english<br />
|
58 |
-
Arrival behind the scenes dual audio hindi english download<br />
|
59 |
-
Download Arrival making of dual audio hindi and english<br />
|
60 |
-
Arrival interviews dual audio hindi english download<br />
|
61 |
-
Download Arrival cast and crew dual audio hindi and english<br />
|
62 |
-
Arrival reviews dual audio hindi english download<br />
|
63 |
-
Download Arrival ratings dual audio hindi and english<br />
|
64 |
-
Arrival awards dual audio hindi english download<br />
|
65 |
-
Download Arrival nominations dual audio hindi and english<br />
|
66 |
-
Arrival box office collection dual audio hindi english download</p>
|
67 |
-
<ul>
|
68 |
-
<li><strong>Communication:</strong> The film shows how communication is essential for understanding and cooperation, not only between humans and aliens, but also among humans themselves. The film also illustrates how communication can be challenging and complex, as different languages have different structures, meanings, and assumptions. The film also suggests that communication can influence our perception of reality and time, as learning the heptapod language allows Louise to experience time in a non-linear way.</li>
|
69 |
-
<li><strong>Choice:</strong> The film raises the question of whether we have free will or whether our lives are predetermined by fate. The film also explores the consequences of our choices and how they affect ourselves and others. Louise faces a difficult choice when she learns that her daughter will die in the future, but she decides to have her anyway, knowing that she will cherish the moments they will share. The film also shows how choices can create conflict or harmony, as different nations choose to either attack or cooperate with the heptapods.</li>
|
70 |
-
<li><strong>Empathy:</strong> The film emphasizes the importance of empathy and compassion for bridging the gaps between different beings. The film shows how empathy can foster trust and understanding, as Louise and Ian develop a bond with the heptapods by trying to learn their language and culture. The film also shows how empathy can prevent violence and war, as Louise manages to convince General Shang of China to stand down from his attack by using his personal phone number and his wife's dying words.</li>
|
71 |
-
</ul>
|
72 |
-
<h3>The cinematography</h3>
|
73 |
-
<p>The cinematography of Arrival is stunning and captivating, as it creates a contrast between the mundane and the extraordinary. The film uses a muted color palette and natural lighting to depict the realistic and bleak aspects of the human world, such as the military base, the university campus, and Louise's home. The film also uses wide shots and aerial views to show the scale and scope of the alien arrival, as well as the global response. The film also uses close-ups and low angles to emphasize the mystery and awe of the heptapods and their spacecraft, as well as the intimacy and emotion of the characters.</p>
|
74 |
-
<h3>The music</h3>
|
75 |
-
<p>The music of Arrival is composed by Jóhann Jóhannsson, who creates a haunting and atmospheric score that matches the tone and mood of the film. The music combines orchestral elements with electronic sounds and vocal samples, creating a blend of organic and alien sounds. The music also reflects the themes and emotions of the film, such as curiosity, tension, sadness, and wonder. The music also enhances the impact of some of the key scenes in the film, such as the first encounter with the heptapods, the revelation of Louise's future, and the final conversation with General Shang.</p>
|
76 |
-
<h2>Conclusion</h2>
|
77 |
-
<h3>Summary of the main points</h3>
|
78 |
-
<p>In conclusion, Arrival is a remarkable sci-fi film that offers a unique and profound perspective on communication, time, choice, empathy, and humanity. The film has a compelling plot that surprises and challenges the viewers with its nonlinear structure and twist ending. The film also has superb performances from Amy Adams and Jeremy Renner, who bring depth and emotion to their roles. The film also has impressive cinematography and music that create a captivating visual and auditory experience. The film is not only entertaining but also enlightening, as it invites us to think about our place in the universe and our relationship with ourselves and others.</p>
|
79 |
-
<h3>Recommendation for the viewers</h3>
|
80 |
-
<p>If you are looking for a sci-fi film that is more than just action and spectacle, Arrival is a perfect choice for you. Arrival is a film that will make you think, feel, and wonder about life's big questions. Arrival is a film that will inspire you to learn new languages, appreciate different cultures, and embrace your choices. Arrival is a film that will touch your heart and mind with its beauty and wisdom.</p>
|
81 |
-
<h3>FAQs</h3>
|
82 |
-
<ol>
|
83 |
-
<li><strong>What is the meaning of Arrival?</strong> Arrival is a film that explores the meaning of communication, time, choice, empathy, and humanity through the story of Louise Banks, a linguist who tries to communicate with alien visitors who have arrived on Earth.</li>
|
84 |
-
<li><strong>What is the language of Arrival?</strong> Arrival features two languages: English and Heptapod. English is spoken by most of the human characters in the film. Heptapod is spoken by Continuing the article: the heptapods, a complex and circular language that has no beginning or end. Heptapod A is the spoken form of the language, which consists of low-pitched and thrumming sounds that are unpronounceable by humans. Heptapod B is the written form of the language, which consists of circular logograms that represent entire sentences or concepts. The heptapods can write multiple logograms at once, using their seven limbs to create intricate patterns.</li>
|
85 |
-
<li><strong>Memory:</strong> The film explores the role of memory in shaping our identity and reality. The film uses flashbacks and flash-forwards to show Louise's memories of her daughter, as well as her future interactions with the heptapods and Ian. The film also reveals that Louise's memories are not chronological, but rather influenced by her exposure to the heptapod language, which allows her to perceive time in a non-linear way. The film suggests that memory is not fixed or objective, but rather fluid and subjective, depending on our perspective and context.</li>
|
86 |
-
</ol>
|
87 |
-
<h2>Conclusion</h2>
|
88 |
-
<h3>Summary of the main points</h3>
|
89 |
-
<p>In conclusion, Arrival is a remarkable sci-fi film that offers a unique and profound perspective on communication, time, choice, empathy, memory, and humanity. The film has a compelling plot that surprises and challenges the viewers with its nonlinear structure and twist ending. The film also has superb performances from Amy Adams and Jeremy Renner, who bring depth and emotion to their roles. The film also has impressive cinematography and music that create a captivating visual and auditory experience. The film is not only entertaining but also enlightening, as it invites us to think about our place in the universe and our relationship with ourselves and others.</p>
|
90 |
-
<h3>Recommendation for the viewers</h3>
|
91 |
-
<p>If you are looking for a sci-fi film that is more than just action and spectacle, Arrival is a perfect choice for you. Arrival is a film that will make you think, feel, and wonder about life's big questions. Arrival is a film that will inspire you to learn new languages, appreciate different cultures, and embrace your choices. Arrival is a film that will touch your heart and mind with its beauty and wisdom.</p>
|
92 |
-
<h3>FAQs</h3>
|
93 |
-
<ol>
|
94 |
-
<li><strong>What is the meaning of Arrival?</strong> Arrival is a film that explores the meaning of communication, time, choice, empathy, memory, and humanity through the story of Louise Banks, a linguist who tries to communicate with alien visitors who have arrived on Earth.</li>
|
95 |
-
<li><strong>What is the language of Arrival?</strong> Arrival features two languages: English and Heptapod. English is spoken by most of the human characters in the film. Heptapod is spoken by Continuing the article: the heptapods, a complex and circular language that has no beginning or end. Heptapod A is the spoken form of the language, which consists of low-pitched and thrumming sounds that are unpronounceable by humans. Heptapod B is the written form of the language, which consists of circular logograms that represent entire sentences or concepts. The heptapods can write multiple logograms at once, using their seven limbs to create intricate patterns.</li>
|
96 |
-
<li><strong>Memory:</strong> The film explores the role of memory in shaping our identity and reality. The film uses flashbacks and flash-forwards to show Louise's memories of her daughter, as well as her future interactions with the heptapods and Ian. The film also reveals that Louise's memories are not chronological, but rather influenced by her exposure to the heptapod language, which allows her to perceive time in a non-linear way. The film suggests that memory is not fixed or objective, but rather fluid and subjective, depending on our perspective and context.</li>
|
97 |
-
</ol>
|
98 |
-
<h2>Conclusion</h2>
|
99 |
-
<h3>Summary of the main points</h3>
|
100 |
-
<p>In conclusion, Arrival is a remarkable sci-fi film that offers a unique and profound perspective on communication, time, choice, empathy, memory, and humanity. The film has a compelling plot that surprises and challenges the viewers with its nonlinear structure and twist ending. The film also has superb performances from Amy Adams and Jeremy Renner, who bring depth and emotion to their roles. The film also has impressive cinematography and music that create a captivating visual and auditory experience. The film is not only entertaining but also enlightening, as it invites us to think about our place in the universe and our relationship with ourselves and others.</p>
|
101 |
-
<h3>Recommendation for the viewers</h3>
|
102 |
-
<p>If you are looking for a sci-fi film that is more than just action and spectacle, Arrival is a perfect choice for you. Arrival is a film that will make you think, feel, and wonder about life's big questions. Arrival is a film that will inspire you to learn new languages, appreciate different cultures, and embrace your choices. Arrival is a film that will touch your heart and mind with its beauty and wisdom.</p>
|
103 |
-
<h3>FAQs</h3>
|
104 |
-
<ol>
|
105 |
-
<li><strong>What is the meaning of Arrival?</strong> Arrival is a film that explores the meaning of communication, time, choice, empathy, memory, and humanity through the story of Louise Banks, a linguist who tries to communicate with alien visitors who have arrived on Earth.</li>
|
106 |
-
<li><strong>What is the language of Arrival?</strong> Arrival features two languages: English and Heptapod. English is spoken by most of the human characters in the film. Heptapod is spoken by the heptapods, an alien species that communicates with circular logograms that have no beginning or end.</li>
|
107 |
-
<li><strong>What is the twist ending of Arrival?</strong> The twist ending of Arrival is that Louise's flashbacks are actually flash-forwards of her future life with Ian and their daughter Hannah. Louise learns from the heptapods that they have come to Earth to offer humanity their language, which enables them to perceive time in a non-linear way. The heptapods need humanity's help in 3,000 years for an unknown reason. Louise also learns from General Shang of China that she will call him in 18 months and convince him to stop his attack on the heptapods by using his personal phone number and his wife's dying words. Louise decides to accept her future with Ian and Hannah, even though she knows that Hannah will die from an incurable disease.</li>
|
108 |
-
<li><strong>Who are the actors in Arrival?</strong> The main actors in Arrival are Amy Adams as Louise Banks, Jeremy Renner as Ian Donnelly, Forest Whitaker as Colonel Weber, Michael Stuhlbarg as Agent Halpern, Tzi Ma as General Shang, Julia Scarlett Dan as 12-Year-Old-Hannah, Jadyn Malone as 6-Year-Old-Hannah.</li>
|
109 |
-
<li><strong>Where can I watch Arrival online?</strong> You can watch Arrival online on various streaming platforms such as Netflix, Amazon Prime Video, Hulu, YouTube, Google Play, iTunes, Vudu, etc. However, availability may vary depending on your region and subscription plan.</li>
|
110 |
-
</ol>
|
111 |
-
</p> 0a6ba089eb<br />
|
112 |
-
<br />
|
113 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Celemony Melodyne Studio 3 - Full Crack Serial Key [TOP].md
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Celemony Melodyne Studio 3 - full Crack Serial Key</h1>
|
3 |
-
<p>If you are looking for a powerful and versatile audio editing software, you might have heard of Celemony Melodyne Studio 3. This is a professional tool that allows you to edit, manipulate, and transform audio files in a musical and intuitive way. You can correct pitch, timing, and intonation, as well as change the tone color, dynamics, and formants of any sound. You can also work with multiple tracks, chords, scales, tempos, and tunings, and create innovative sound design effects.</p>
|
4 |
-
<h2>Celemony Melodyne Studio 3 - full Crack Serial Key</h2><br /><p><b><b>Download</b> ✺✺✺ <a href="https://byltly.com/2uKxNF">https://byltly.com/2uKxNF</a></b></p><br /><br />
|
5 |
-
<p>However, Celemony Melodyne Studio 3 is not a cheap software. It costs $699 for a new license, or $299 for an upgrade from a previous version. If you are on a tight budget or just want to try it out before buying, you might be tempted to look for a crack serial key that can unlock all the features of Melodyne without paying anything. But is this a good idea? And how can you do it safely and effectively?</p>
|
6 |
-
<p>In this article, we will show you how to download and install Celemony Melodyne Studio 3 with a full crack serial key. We will also give you some tips on how to use it and what to watch out for. By the end of this article, you will be able to enjoy one of the best audio editing software in the market without breaking the bank.</p>
|
7 |
-
<h2>How to download and install Celemony Melodyne Studio 3</h2>
|
8 |
-
<p>The first step to use Celemony Melodyne Studio 3 with a crack serial key is to download the official package and the crack file from reliable sources. You can find the official package on the <a href="(^1^)">Celemony website</a>, where you can also download a free trial version that lasts for 30 days. The trial version has all the features of the full version, but it will stop working after the trial period expires.</p>
|
9 |
-
<p>The crack file is a bit harder to find, as it is not officially supported by Celemony or any other legitimate website. You will have to search for it on various torrent sites, forums, or blogs that offer free serial keys for software. However, be careful when downloading crack files from unknown sources, as they might contain viruses, malware, or spyware that can harm your computer or steal your personal information. Always scan any file you download with an antivirus program before opening it.</p>
|
10 |
-
<p>Once you have downloaded both files, you can proceed to install Celemony Melodyne Studio 3 on your computer. Follow these steps:</p>
|
11 |
-
<p></p>
|
12 |
-
<ol>
|
13 |
-
<li>Run the setup file of the official package and follow the instructions on the screen.</li>
|
14 |
-
<li>When prompted to enter a serial number, open the crack file and copy one of the serial keys provided.</li>
|
15 |
-
<li>Paste the serial key into the setup window and click Next.</li>
|
16 |
-
<li>Complete the installation process and launch Celemony Melodyne Studio 3.</li>
|
17 |
-
<li>You should now be able to use all the features of Melodyne without any limitations or restrictions.</li>
|
18 |
-
</ol>
|
19 |
-
<h2>How to use Celemony Melodyne Studio 3</h2>
|
20 |
-
<p>Celemony Melodyne Studio 3 is a powerful and versatile audio editing software that offers many features and tools for manipulating audio files in a musical and intuitive way. Here are some of the main features and tools that you can use with Celemony Melodyne Studio 3:</p>
|
21 |
-
<h3>Pitch and Time Editing</h3>
|
22 |
-
<p>One of the most impressive features of Melodyne is its ability to edit pitch and time independently and accurately. You can correct the pitch of any note, adjust the vibrato, bend, or glide, and change the pitch modulation of any sound. You can also edit the timing of any note, move it forward or backward, stretch or compress it, and change the tempo or groove of any sound. You can do all this with a simple mouse click and drag, or use the keyboard shortcuts for more precision.</p>
|
23 |
-
<p>Melodyne displays the audio files as blobs, which are graphical representations of the notes and their pitch and time information. You can see the blobs in different views, such as note, chord, scale, tempo, or tuning. You can also switch between different modes, such as melodic, polyphonic, percussive, or universal, depending on the type of audio you are editing. You can also use the tools menu to access various functions, such as quantize, transpose, copy, paste, delete, split, join, or mute.</p>
|
24 |
-
<h3>Tone Color and Dynamics Editing</h3>
|
25 |
-
<p>Another amazing feature of Melodyne is its ability to edit the tone color and dynamics of any sound. You can change the formants, which are the frequency components that give each sound its characteristic timbre. You can also change the amplitude envelope, which is the shape of the sound's loudness over time. You can do this by using the formant tool and the amplitude tool, which allow you to modify the shape and size of the blobs. You can also use the macros menu to apply global changes to the tone color and dynamics of your audio files.</p>
|
26 |
-
<p>By editing the tone color and dynamics of your audio files, you can create various effects and transformations. For example, you can make a male voice sound like a female voice, or vice versa. You can make a guitar sound like a piano, or a piano sound like a guitar. You can make a drum sound like a bell, or a bell sound like a drum. You can also create harmonies, choruses, flangers, phasers, or other modulation effects.</p>
|
27 |
-
<h3>Multi-Track Editing</h3>
|
28 |
-
<p>A third remarkable feature of Melodyne is its ability to edit multiple tracks simultaneously and in relation to each other. You can import up to 24 tracks into Melodyne and see them in a single window. You can then edit each track individually or together with other tracks. You can also use the mixer menu to adjust the volume, pan, solo, mute, or bypass of each track.</p>
|
29 |
-
<p>By editing multiple tracks with Melodyne, you can achieve a better balance and coherence among your audio files. You can align the pitch and timing of different tracks to create a tight and harmonious mix. You can also create chords, scales, tunings, or tempos that match across different tracks. You can also use the compare menu to compare different versions of your edits and choose the best one.</p>
|
30 |
-
<h2>Conclusion</h2>
|
31 |
-
<p>Celemony Melodyne Studio 3 is one of the best audio editing software in the market. It offers many features and tools that allow you to edit, manipulate, and transform audio files in a musical and intuitive way. You can correct pitch, timing, and intonation, as well as change the tone color, dynamics, and formants of any sound. You can also work with multiple tracks, chords, scales, tempos, and tunings, and create innovative sound design effects.</p>
|
32 |
-
<p>However, Celemony Melodyne Studio 3 is not a cheap software. It costs $699 for a new license, or $299 for an upgrade from a previous version. If you want to use it without paying anything, you will have to look for a crack serial key that can unlock all the features of Melodyne. But this is not a risk-free option. You might encounter viruses, malware, or spyware that can damage your computer or compromise your privacy. You might also face legal issues or ethical dilemmas for using a pirated software.</p>
|
33 |
-
<p>Therefore, we recommend that you use Celemony Melodyne Studio 3 with a crack serial key only if you are aware of the potential consequences and willing to take the responsibility. Otherwise, you might want to consider buying a legitimate license or using a free trial version of Melodyne. This way, you can enjoy the benefits of Melodyne without any worries or regrets.</p>
|
34 |
-
<p>If you are interested in trying out Celemony Melodyne Studio 3 with a crack serial key, you can follow the steps we outlined in this article. We hope that this article was helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy editing!</p>
|
35 |
-
<h2>FAQs</h2>
|
36 |
-
<h3>What are the system requirements for Celemony Melodyne Studio 3?</h3>
|
37 |
-
<p>Celemony Melodyne Studio 3 requires the following system specifications:</p>
|
38 |
-
<ul>
|
39 |
-
<li>Windows XP (SP3), Vista or Windows 7 (32- or 64-bit) or Mac OS X 10.4 or later (Intel or PowerPC)</li>
|
40 |
-
<li>1 GB RAM (2 GB recommended)</li>
|
41 |
-
<li>1 GB free hard disk space</li>
|
42 |
-
<li>DVD drive</li>
|
43 |
-
<li>An audio interface compatible with ASIO (Windows) or Core Audio (Mac)</li>
|
44 |
-
<li>A MIDI interface and keyboard (optional)</li>
|
45 |
-
</ul>
|
46 |
-
<h3>Is Celemony Melodyne Studio 3 compatible with other DAWs and plug-ins?</h3>
|
47 |
-
<p>Celemony Melodyne Studio 3 can work as a stand-alone application or as a plug-in for other DAWs and audio editors. It supports the following plug-in formats:</p>
|
48 |
-
<ul>
|
49 |
-
<li>VST (Windows and Mac)</li>
|
50 |
-
<li>AU (Mac)</li>
|
51 |
-
<li>RTAS (Windows and Mac)</li>
|
52 |
-
<li>AAX (Windows and Mac)</li>
|
53 |
-
<li>ReWire (Windows and Mac)</li>
|
54 |
-
</ul>
|
55 |
-
<p>Celemony Melodyne Studio 3 can also integrate with other DAWs and plug-ins via the Melodyne Bridge or the Rewire Host Sync mode. For more information on how to use Melodyne with other software, please refer to the <a href="">user manual</a>.</p>
|
56 |
-
<h3>What are the differences between the editions of Melodyne?</h3>
|
57 |
-
<p>Celemony offers four different editions of Melodyne: essential, assistant, editor, and studio. Each edition has different features and prices. Here is a brief comparison of the four editions:</p>
|
58 |
-
<table border="1">
|
59 |
-
<tr><th>Edition</th><th>Price</th><th>Features</th></tr>
|
60 |
-
<tr><td>essential</td><td>$99</td><td>The basic edition of Melodyne. It allows you to edit pitch and timing of monophonic audio files.</td></tr>
|
61 |
-
<tr><td>assistant</td><td>$249</td><td>The intermediate edition of Melodyne. It adds the ability to edit pitch and timing of polyphonic audio files.</td></tr>
|
62 |
-
<tr><td>editor</td><td>$399</td><td>The advanced edition of Melodyne. It adds the ability to edit tone color and dynamics of audio files, as well as the DNA Direct Note Access technology that allows you to edit individual notes within chords.</td></tr>
|
63 |
-
<tr><td>studio</td><td>$699</td><td>The ultimate edition of Melodyne. It adds the ability to edit multiple tracks simultaneously and in relation to each other, as well as more features and tools for professional audio editing.</td></tr>
|
64 |
-
</table>
|
65 |
-
<h3>How can I update or upgrade my version of Melodyne?</h3>
|
66 |
-
<p>If you have a legitimate license for Celemony Melodyne Studio 3, you can update or upgrade your version of Melodyne by visiting the <a href="">Celemony website</a>. You can check for updates by clicking on the Help menu in the Melodyne window and selecting Check for Updates. You can also download the latest version of Melodyne from the website and install it over your existing version. You can upgrade your edition of Melodyne by purchasing a higher edition from the website and entering the new serial number in the Melodyne window. <h3>What are the risks of using a crack serial key for Melodyne?</h3>
|
67 |
-
<p>Using a crack serial key for Celemony Melodyne Studio 3 might seem like a convenient and cost-effective way to use one of the best audio editing software in the market, but it also comes with some risks and drawbacks. Here are some of the possible consequences of using a crack serial key for Melodyne:</p>
|
68 |
-
<ul>
|
69 |
-
<li>You might expose your computer to viruses, malware, or spyware that can damage your system, corrupt your files, or steal your personal information.</li>
|
70 |
-
<li>You might violate the intellectual property rights of Celemony and face legal actions or penalties for using a pirated software.</li>
|
71 |
-
<li>You might compromise the quality and stability of your audio files, as crack serial keys might not work properly or cause errors or crashes.</li>
|
72 |
-
<li>You might miss out on the updates, upgrades, support, and features that Celemony offers to its legitimate customers.</li>
|
73 |
-
<li>You might lose your moral integrity and credibility as an audio professional or enthusiast, as using a crack serial key is unethical and dishonest.</li>
|
74 |
-
</ul>
|
75 |
-
<p>Therefore, we advise you to use a crack serial key for Celemony Melodyne Studio 3 only if you are fully aware of the risks and willing to accept the responsibility. Otherwise, we suggest that you buy a legitimate license or use a free trial version of Melodyne. This way, you can support the developers of Melodyne and enjoy the benefits of using a genuine and reliable software.</p> b2dd77e56b<br />
|
76 |
-
<br />
|
77 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Anonymox Premium Serial Key What Is It and How to Use It Safely.md
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
|
2 |
-
<p>This Proxy hides your true internet identity and makes it look as if not you, but the Proxy is currently visiting the website.ĪnonymoX is more than just an Add-On. Code active anonymox Anonymox Premium Activation Code Serial Number Free Download Latest Lihat contoh gambar diatas saja lalu klik active kode semoga. Instead of accessing a website directly, it will be first opened by one of our servers, called a Proxy. Delete cookies, show your public ip, change browser id.Appear to originate from another country.Change your IP address (to one provided by us).More and more governments censor websites with the excuse of child safety, copyright infringement or the fight against terrorism and thereby limit the freedom of speech.Īlso blocking users based on their origin with GeoIP-Blocks is applied often, for example at media platforms like YouTube.</p>
|
3 |
-
<p>anonymox premium code, anonymox premium code 2020, anonymox premium code free, anonymox premium code generator 2020, anonymox premium code serial, anonymox premium code 2019, anonymox premium code generator kuyhaa, anonymox_premium_code_generator.zip download, anonymox premium code chrome, anonymox premium code firefox, anonymox premium code generator free download, anonymox premium code 2018, anonymox premium code generator download Axis Football 17 Free Mac Download<br></p>
|
4 |
-
<h2>Anonymox Premium Serial Key</h2><br /><p><b><b>Download</b> »»» <a href="https://imgfil.com/2uy012">https://imgfil.com/2uy012</a></b></p><br /><br />
|
5 |
-
<p>+ Free SUPPORT Other Notes Subscribe Download Anonymox Premium Active Code Generator 2017.. We are adapting our tools to new platforms very week If your device is not supported now, check back after some time or Contact us now.. This software is creativity for anonymization in the internet world + Easy setup + Open source, open code.. Download Anonymox Premium Anonymix code here for free Pada langkah yang kelima silahkan buka folder 'EXTENSIONS' lalu silahkan klik kanan pada 'CLIENT ANONYMOX.. Mungkin Itu saja dulu Informasi Yang bisa diberikan oleh Admin Cara Jasa SEO Blog yaitu Seputar Cara Aktivasi Anonymox Free Menjadi Premium Terbaru 2017 semoga apa yang sudah disampaikan diatas masih anonymox premium activation code bermanfaat untuk anda semuanya semoga anda dalam menjelajah dunia internet lebih aman dan dalam mencari backlink lebih aman dari amukan google Terima Kasih.</p>
|
6 |
-
<p>Fast, Safe, Simple anonymoX enables you to Ati mobility radeon hd 4500 series Simultaniously the web sites will receive information on your web identity IP-address.. When accessing a website using anonymoX your request will not be sent to anonymox premium activation code website directly but first to our network.. Adobe cs2 serial key Download Anonymox Premium Active Code Generator 2017 basic details: + Proxy support + Windows OS supported + Mac OS X supported + Latest Mobile devices supported + Instructions and full feature list provided after installation.. Download now [ ] Download Anonymox Premium Active Code Generator 2017 will not let you down and do what this program was made to do.. Sebelumnya apa seh Anonymox itu itu adalah plug in ekstensi yang bisa menganti IP adrees kita dengan cara simpel dan mudah, tingal pencet dan beres.. Anonymox premium activation code Anonymox premium activation code Anonymox premium activation code AnonymoX offers real freedom of speech, the freedom to express yourself without anonymox premium activation code to fear repression. 518b7cbc7d</p> aaccfb2cb3<br />
|
7 |
-
<br />
|
8 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Coloring Game - Expansion Pack No. 1 Free Download [torrent Full] The Ultimate Guide to This Amazing Puzzle Game Expansion.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
|
2 |
-
<p>Z-man Games has released free-to-download scenarios, with changes to the base game. Various scenarios are set to be released.[20] As of March 2017[update], scenarios <i>Isolation</i>[21] and <i>Government Shutdown</i>[22] have been published.[23]</p>
|
3 |
-
<h2>Coloring Game - Expansion Pack No. 1 Free Download [torrent Full]</h2><br /><p><b><b>Download File</b> ✵✵✵ <a href="https://imgfil.com/2uy1kY">https://imgfil.com/2uy1kY</a></b></p><br /><br />
|
4 |
-
<p>People love free steam games, no doubt. But what many people hate is downloading so many parts and trying to install them on their own. This is why we are the only site that pre-installs every game for you. We have many categories like shooters, action, racing, simulators and even VR games! We strive to satisfy our users and ask for nothing in return. We revolutionized the downloading scene and will continue being your #1 site for free games.</p>
|
5 |
-
<p>The Sims 4 Free Download PC Game in Direct Link and Torrent. Released on September 2, 2014, The Sims 4 Deluxe Edition is the fourth major title in the life simulation video game series The Sims. The Sims 4 download free full version pc with pre-installed crack.</p>
|
6 |
-
<p>Aimhaven provides all pc gamers around the world the best and latest free steam games for pc by using direct download links and torrent links. Our goal is to satisfy our users and to become your #1 site for cracked free steam games by making downloading easy.</p>
|
7 |
-
<p>Path of Titans is an MMO dinosaur video game currently in active development for home computers and mobile devices fully compatible with cross platform play. Play as one of 18 core dinosaurs in a rich ecosystem filled with complex AI creatures and up to 200 other players. Explore an environment filled with natural events, quests, guilds, adventures, and free-form play, all while enjoying a rich life, avoiding death, and augmenting your dinosaur to suit your play style.<br><br>Path of Titans will provide a framework for dinosaur enthusiasts to roleplay as their favorite prehistoric beasts. We will also provide powerful modding tools to help you shape the game into your own dinosaur adventure. We will continue adding new content, including new creatures, skins, maps, and ongoing support for the aforementioned modding tools.<br><br>The release date for Path of Titans will be announced as we get closer to development completion.</p>
|
8 |
-
<p></p>
|
9 |
-
<p>Expand your dinosaur survival experience with game mods! Download community created creatures, maps, skins, game modes, and more. Or, get creative and craft your own game mods with our powerful modding tools that will be free for anyone to download and learn to use. With extensive documentation and tutorials and help to guide modders, anyone will be able to download our dev kit and begin creating.</p>
|
10 |
-
<p>Maximize your Soldner-X 2: Final Prototype experience with this action-loaded expansion pack. The Last Chapter adds three brand new and exciting stages, 13 unique challenges, 11 trophies and more to the original game.</p> aaccfb2cb3<br />
|
11 |
-
<br />
|
12 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Flobo Hard Disk Repair Full Crack 11.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>flobo hard disk repair full crack 11</h2><br /><p><b><b>Download</b> ✸ <a href="https://imgfil.com/2uxZx9">https://imgfil.com/2uxZx9</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
Import external 3D models With its bone adherence function, Design Doll can … ... automousekey.exe, flobo hard disk repair, nuance pdf converter, corel draw, ... Advanced Systemcare Ultimate 11 Serial Key 2018, Vmware Workstation Pro ... 1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/Sonnox-Oxford-64-Bit-Mac-Crack.md
DELETED
@@ -1,104 +0,0 @@
|
|
1 |
-
## Sonnox Oxford 64 Bit Mac Crack
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-

|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
**Click Here [https://lodystiri.blogspot.com/?file=2txPBx](https://lodystiri.blogspot.com/?file=2txPBx)**
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
Here is a possible title and article with html formatting for the keyword "Sonnox Oxford 64 Bit Mac Crack":
|
26 |
-
|
27 |
-
# Sonnox Oxford 64 Bit Mac Crack: How to Download and Install the Ultimate Audio Plugin Suite
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
If you are looking for a professional and versatile audio plugin suite for your Mac, you might want to check out Sonnox Oxford. Sonnox Oxford is a collection of high-quality plugins that cover everything from EQ, compression, reverb, limiter, inflator, dynamics, de-esser, and more. Sonnox Oxford plugins are used by many top producers and engineers in the music industry, and they can enhance your sound in any genre or style.
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
However, Sonnox Oxford plugins are not cheap. The full bundle costs $1,299, which might be out of reach for many home studio owners. That's why some people look for a Sonnox Oxford 64 Bit Mac Crack, which is a way to bypass the license verification and use the plugins for free. But is it worth it? And how can you get it?
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
## The Risks of Using a Sonnox Oxford 64 Bit Mac Crack
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
Before you download and install a Sonnox Oxford 64 Bit Mac Crack, you should be aware of the risks involved. First of all, using a cracked software is illegal and unethical. You are violating the terms of service and the intellectual property rights of the developers. You are also depriving them of the revenue they deserve for their hard work and innovation.
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
Secondly, using a cracked software can compromise your computer's security and performance. Cracked software often comes with malware, viruses, spyware, or adware that can infect your system and steal your personal information. Cracked software can also cause compatibility issues, crashes, glitches, or errors that can ruin your projects or damage your hardware.
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
Thirdly, using a cracked software can affect your creativity and quality. Cracked software often has limited features, outdated versions, or poor sound quality that can hinder your workflow and results. Cracked software can also make you dependent on illegal sources and prevent you from learning new skills or exploring new possibilities.
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
## The Benefits of Using a Legitimate Sonnox Oxford 64 Bit Mac
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
Instead of using a Sonnox Oxford 64 Bit Mac Crack, you should consider investing in a legitimate copy of the plugin suite. Here are some of the benefits of doing so:
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
- You will support the developers and the industry. By paying for the software, you will show your appreciation and respect for the creators and their products. You will also contribute to the development and improvement of future versions and updates.
|
64 |
-
|
65 |
-
- You will protect your computer and your data. By downloading and installing the software from the official website, you will ensure that it is safe and clean from any malicious code or content. You will also enjoy the full functionality and performance of the software without any bugs or issues.
|
66 |
-
|
67 |
-
- You will enhance your creativity and quality. By using the latest and most advanced version of the software, you will have access to all the features and options that can help you achieve your sonic goals. You will also be able to learn from the tutorials, manuals, support, and community that are available for legitimate users.
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
## How to Download and Install Sonnox Oxford 64 Bit Mac
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
If you are convinced that buying Sonnox Oxford 64 Bit Mac is the best option for you, here are the steps to download and install it on your computer:
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
1. Go to [https://www.sonnox.com/](https://www.sonnox.com/) and click on "Shop" in the menu bar.
|
80 |
-
|
81 |
-
2. Select the "Oxford Plugins" option and choose the bundle or individual plugins that you want to purchase.
|
82 |
-
|
83 |
-
3. Add them to your cart and proceed to checkout. You will need to create an account or log in with your existing one.
|
84 |
-
|
85 |
-
4. Enter your payment details and confirm your order. You will receive an email with your invoice and license codes.
|
86 |
-
|
87 |
-
5. Download the Sonnox Installer from [https://www.sonnox.com/support/downloads](https://www.sonnox.com/support/downloads) and run it on your Mac.
|
88 |
-
|
89 |
-
6. Follow the instructions on the screen to install the plugins on your system.
|
90 |
-
|
91 |
-
7. Launch your DAW (Digital Audio Workstation) of choice and scan for new plugins.
|
92 |
-
|
93 |
-
8. Activate your plugins using the license codes that you received in your email.
|
94 |
-
|
95 |
-
9. Enjoy using Sonnox Oxford 64 dfd1c89656
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Among Us 3D A New Way to Play the Popular Game on PC and Mac.md
DELETED
@@ -1,136 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Among Us 3D: A New Dimension of Deception and Betrayal</h1>
|
3 |
-
<p>If you are a fan of <a href="(^1^)">Among Us</a>, the hit multiplayer game where you have to find out who is the impostor among your crewmates in a spaceship, you might be interested in trying out <strong>Among Us 3D</strong>, a fan-made VR version that takes the game to a whole new level of immersion and realism. In this article, we will tell you everything you need to know about Among Us 3D, including what it is, how to play it, how to download it, why you should play it, what are the reviews and ratings, and some FAQs.</p>
|
4 |
-
<h2>among us 3d download pc</h2><br /><p><b><b>Download</b> ✺✺✺ <a href="https://urlin.us/2uSSgc">https://urlin.us/2uSSgc</a></b></p><br /><br />
|
5 |
-
<h2>What is Among Us 3D?</h2>
|
6 |
-
<p>Among Us 3D is a VR remake of Among Us that was created by <a href="(^2^)">Jar</a>, a VR developer who posted a video of his project on YouTube in October 2020. The video went viral and received over 6 million views as of June 2021. Jar then released his map for free on <a href="(^3^)">VRChat</a>, a social VR platform where users can create and explore virtual worlds with other players.</p>
|
7 |
-
<p>Among Us 3D follows the same premise as the original game: you are either a crewmate or an impostor on a spaceship called The Skeld. As a crewmate, your goal is to complete tasks around the ship or find out who the impostors are and vote them out. As an impostor, your goal is to kill crewmates or sabotage the ship without getting caught.</p>
|
8 |
-
<p>However, unlike the original game, which is played in 2D from a top-down perspective, Among Us 3D is played in first-person from a VR headset. This means that you can see your own body and hands, interact with objects using motion controls, walk around the ship using thumbsticks or teleportation, communicate with other players using voice chat or text chat, and experience more realistic graphics and sounds.</p>
|
9 |
-
<p>Among Us 3D also adds some new features and modes to the game, such as:</p>
|
10 |
-
<p>among us 3d game download for pc free<br />
|
11 |
-
among us 3d online play on pc with bluestacks<br />
|
12 |
-
among us 3d single player download for pc/laptop<br />
|
13 |
-
among us 3d steam version for pc<br />
|
14 |
-
among us 3d multiplayer free download for windows 10<br />
|
15 |
-
among us 3d action game by innersloth for pc<br />
|
16 |
-
among us 3d youtube video with free download link for pc<br />
|
17 |
-
among us 3d emulator software for mac and pc<br />
|
18 |
-
among us 3d hide n seek mode on pc<br />
|
19 |
-
among us 3d apk download for android and pc<br />
|
20 |
-
among us 3d fatdino game for pc<br />
|
21 |
-
among us 3d cross-platform play between pc and mobile<br />
|
22 |
-
among us 3d new map and roles update for pc<br />
|
23 |
-
among us 3d casual game with social deduction on pc<br />
|
24 |
-
among us 3d local party game via wifi on pc<br />
|
25 |
-
among us 3d full controller support and remote play on pc<br />
|
26 |
-
among us 3d achievements and awards on steam for pc<br />
|
27 |
-
among us 3d browser game without downloading for pc<br />
|
28 |
-
among us 3d system requirements and compatibility for pc<br />
|
29 |
-
among us 3d community hub and discussions on steam for pc<br />
|
30 |
-
among us 3d impostor gameplay and tips for pc<br />
|
31 |
-
among us 3d crewmate tasks and objectives for pc<br />
|
32 |
-
among us 3d customization options and skins for pc<br />
|
33 |
-
among us 3d sabotage and kill strategies for impostors on pc<br />
|
34 |
-
among us 3d in-game text chat and discord integration for pc<br />
|
35 |
-
among us 3d how to install and play on pc with bluestacks<br />
|
36 |
-
among us 3d best platform to play this android game on pc<br />
|
37 |
-
among us 3d review and rating on steam for pc<br />
|
38 |
-
among us 3d trailer and gameplay video on youtube for pc<br />
|
39 |
-
among us 3d how to link accounts between platforms on pc<br />
|
40 |
-
among us 3d how to find a game online from the host list on pc<br />
|
41 |
-
among us 3d how to report dead bodies and vote to eject impostors on pc<br />
|
42 |
-
among us 3d how to use the admin map and security cameras on pc<br />
|
43 |
-
among us 3d how to react quickly to undo the impostor's sabotages on pc<br />
|
44 |
-
among us 3d how to sneak through the vents and close doors as impostor on pc<br />
|
45 |
-
among us 3d how to call emergency meetings and discuss suspicious behavior on pc<br />
|
46 |
-
among us 3d how to win by completing tasks or discovering the impostors on pc<br />
|
47 |
-
among us 3d how to pretend to run tasks and blend in with the crewmates as impostor on pc<br />
|
48 |
-
among us 3d different maps to play in: the skeld, mira hq, polus, and the airship on pc<br />
|
49 |
-
among us 3d different modes to choose from: classic or hide n seek on pc<br />
|
50 |
-
among us 3d different languages supported: english, portuguese, spanish, korean, russian, french, italian, german, dutch, japanese, chinese on pc<br />
|
51 |
-
among us 3d different game options: add more impostors, more tasks, different roles, and so much more on pc</p>
|
52 |
-
<ul>
|
53 |
-
<li>A new map called The Skeld II, which is based on The Skeld but has some changes and additions.</li>
|
54 |
-
<li>A new role called The Detective, who can see footprints and blood stains left by impostors.</li>
|
55 |
-
<li <li>A new mode called Hide and Seek, where the impostor is revealed at the start and the crewmates have to hide or run away from them.</li>
|
56 |
-
<li>A new mode called Murder Mystery, where the impostor can only kill one person at a time and the crewmates have to find clues and solve the mystery.</li>
|
57 |
-
<li>A new mode called Prop Hunt, where the impostor can disguise themselves as any object on the ship and the crewmates have to find them.</li>
|
58 |
-
</ul>
|
59 |
-
<p>Among Us 3D is still in development and Jar plans to add more features and improvements in the future, such as more maps, more roles, more modes, more customization options, and more stability and performance enhancements.</p>
|
60 |
-
<h2>How to play Among Us 3D?</h2>
|
61 |
-
<p>To play Among Us 3D, you need a VR headset that is compatible with VRChat, such as Oculus Quest, Oculus Rift, HTC Vive, Valve Index, or Windows Mixed Reality. You also need a VRChat account, which you can create for free on their website or app. Once you have everything set up, you can follow these steps to join or host a game of Among Us 3D:</p>
|
62 |
-
<ol>
|
63 |
-
<li>Launch VRChat and put on your VR headset.</li>
|
64 |
-
<li>Select the Worlds tab from the menu and search for "Among Us 3D" or "Jar".</li>
|
65 |
-
<li>Select the Among Us 3D world by Jar and click on Go or Join.</li>
|
66 |
-
<li>Once you are in the world, you will see a lobby with a screen that shows the number of players, the map, the mode, and the settings. You can also see a list of players on the left side of the screen.</li>
|
67 |
-
<li>If you want to join an existing game, look for a portal that has a green light above it and walk through it. You will be teleported to a waiting room where you can see your character and other players. You can also customize your character by using the buttons on the wall.</li>
|
68 |
-
<li>If you want to host a new game, look for a portal that has a red light above it and walk through it. You will be teleported to a host room where you can see your character and other players. You can also customize your character by using the buttons on the wall. As the host, you can also change the map, the mode, and the settings by using the buttons on the wall. When you are ready to start the game, press the Start button on the wall.</li>
|
69 |
-
<li>When the game starts, you will be assigned a role: Crewmate, Impostor, or Detective. You will also see your tasks or objectives on your wrist. You can use your motion controllers to interact with objects and perform tasks or actions. You can use your voice chat or text chat to communicate with other players. You can also use your thumbsticks or teleportation to move around the ship.</li>
|
70 |
-
<li>If you are a Crewmate, you have to complete your tasks or find out who the Impostors are and vote them out. If you find a dead body, you can report it by pressing a button near it. This will trigger an emergency meeting where everyone can discuss and vote. You can also call an emergency meeting by pressing a button in the cafeteria. However, you have a limited number of meetings per game.</li>
|
71 |
-
<li>If you are an Impostor, you have to kill Crewmates or sabotage the ship without getting caught. You can kill someone by getting close to them and pressing a button on your wrist. You can also sabotage by pressing a button on your wrist and selecting an option from a menu. However, you have a cooldown time between each kill or sabotage. You can also vent by pressing a button near a vent. This will allow you to travel between vents quickly and stealthily.</li>
|
72 |
-
<li>If you are a Detective, you have to help Crewmates find out who the Impostors are by using your special abilities. You can see footprints and blood stains left by Impostors by pressing a button on your wrist. You can also scan someone's role by getting close to them and pressing a button on your wrist. However, you have a limited number of scans per game.</li>
|
73 |
-
<li>The game ends when either one of these conditions is met: All Crewmates are dead; All Impostors are voted out; All tasks are completed; The ship is destroyed by sabotage.</li>
|
74 |
-
</ol>
|
75 |
-
<h2>How to download and install Among Us 3D?</h2>
|
76 |
-
<p>To download and install Among Us 3D, you need to download and install VRChat first. VRChat is available for free on Steam, Oculus Store, or VRChat website. Depending on your VR headset, you need to follow different steps to get VRChat:</p>
|
77 |
-
<h3>For Oculus Quest users:</h3>
|
78 |
-
<ul>
|
79 |
-
<li>Download VRChat from Oculus Store <li>Launch VRChat and create or log in to your VRChat account.</li>
|
80 |
-
<li>Follow the steps in the previous section to join or host a game of Among Us 3D.</li>
|
81 |
-
</ul>
|
82 |
-
<h3>For Oculus Rift users:</h3>
|
83 |
-
<ul>
|
84 |
-
<li>Download VRChat from Oculus Store or Steam.</li>
|
85 |
-
<li>Launch VRChat and create or log in to your VRChat account.</li>
|
86 |
-
<li>Follow the steps in the previous section to join or host a game of Among Us 3D.</li>
|
87 |
-
</ul>
|
88 |
-
<h3>For HTC Vive, Valve Index, or Windows Mixed Reality users:</h3>
|
89 |
-
<ul>
|
90 |
-
<li>Download VRChat from Steam.</li>
|
91 |
-
<li>Launch VRChat and create or log in to your VRChat account.</li>
|
92 |
-
<li>Follow the steps in the previous section to join or host a game of Among Us 3D.</li>
|
93 |
-
</ul>
|
94 |
-
<h2>Why should you play Among Us 3D?</h2>
|
95 |
-
<p>Among Us 3D is a fun and innovative way to experience the game that you already love. It offers many advantages and disadvantages compared to the original game. Here are some of them:</p>
|
96 |
-
<h3>Pros of playing Among Us 3D</h3>
|
97 |
-
<ul>
|
98 |
-
<li>You can enjoy a more immersive and realistic gameplay, as you can see and interact with the ship and the players in 3D.</li>
|
99 |
-
<li>You can have more fun and challenge, as you can use your body language, gestures, and voice to communicate and deceive others.</li>
|
100 |
-
<li>You can explore new features and modes, such as The Detective role, Hide and Seek mode, Murder Mystery mode, and Prop Hunt mode.</li>
|
101 |
-
<li>You can support a fan-made project that is free and constantly updated by a passionate developer.</li>
|
102 |
-
</ul>
|
103 |
-
<h3>Cons of playing Among Us 3D</h3>
|
104 |
-
<ul>
|
105 |
-
<li>You need a VR headset and a VRChat account, which might not be compatible, accessible, or affordable for everyone.</li>
|
106 |
-
<li>You might experience motion sickness, discomfort, or fatigue from playing in VR for too long.</li>
|
107 |
-
<li>You might encounter some bugs, glitches, or crashes, as the game is still in development and not officially supported by the original developers.</li>
|
108 |
-
<li>You might miss some features or content from the original game, such as other maps, roles, modes, customization options, and updates.</li>
|
109 |
-
</ul>
|
110 |
-
<h2>What are the reviews and ratings of Among Us 3D?</h2>
|
111 |
-
<p>Among Us 3D has received mostly positive reviews and ratings from critics and users alike. Here are some examples:</p>
|
112 |
-
<table>
|
113 |
-
<tr><th>Source</th><th>Score</th><th>Quote</th></tr>
|
114 |
-
<tr><td><a href="">UploadVR</a></td><td>8/10</td><td>"Among Us 3D is a fantastic example of how VR can enhance an already great game. It adds a new layer of immersion, realism, interactivity, and fun to the social deduction genre. It also showcases the creativity and passion of the VR community. If you have a VR headset and a love for Among Us, you should definitely give it a try."</td></tr>
|
115 |
-
<tr><td><a href="">VRScout</a></td><td>N/A</td><td>"Among Us 3D is one of the most impressive fan-made VR projects we’ve seen so far. It captures the essence of the original game while adding new twists and features that make it stand out. It’s also a great way to socialize and have fun with friends or strangers in VR."</td></tr>
|
116 |
-
<tr><td><a href="">Steam User Reviews</a></td><td>N/A</td><td>"This is amazing. I love how you can actually do tasks with your hands and see other players' movements and expressions. It feels like you are really there on the ship. The new modes are also very fun and creative. I highly recommend this to anyone who likes Among Us."</td></tr>
|
117 |
-
<tr><td><a href="">YouTube Comments</a></td><td>N/A</td><td>"This is awesome. I wish this was an official game. I would pay money for this."</td></tr>
|
118 |
-
<tr><td><a href="">Reddit Posts</a></td><td>N/A</td><td>"This is incredible. I had so much fun playing this with my friends. It's hilarious how you can see people's reactions when they get killed or accused. The detective role is also very cool. Props to Jar for making this."</td></tr>
|
119 |
-
</table>
|
120 |
-
<h2>Conclusion</h2>
|
121 |
-
<p>In conclusion, Among Us 3D is a fan-made VR version of Among Us that offers a new dimension of deception and betrayal. It is a fun and innovative way to experience the game that you already love, as you can see and interact with the ship and the players in 3D. It also adds some new features and modes that make it more challenging and enjoyable. However, it also has some drawbacks, such as compatibility, accessibility, motion sickness, and lack of content. It is still in development and not officially supported by the original developers. If you have a VR headset and a VRChat account, you can download and install Among Us 3D for free and join or host a game with other players. You can also support Jar, the fan-made developer, by following his YouTube channel or donating to his Patreon. Whether you are a crewmate, an impostor, or a detective, you will have a blast playing Among Us 3D. <h2>FAQs</h2>
|
122 |
-
<p>Here are some frequently asked questions and answers about Among Us 3D:</p>
|
123 |
-
<ol>
|
124 |
-
<li><strong>Q: Can I play Among Us 3D without a VR headset?</strong></li>
|
125 |
-
<li>A: Yes, you can play Among Us 3D without a VR headset by using the desktop mode of VRChat. However, you will not be able to enjoy the full VR experience and some features might not work properly.</li>
|
126 |
-
<li><strong>Q: Can I play Among Us 3D with players who are using the original game?</strong></li>
|
127 |
-
<li>A: No, you can only play Among Us 3D with players who are using VRChat and the same world. You cannot cross-play with players who are using the original game on PC or mobile.</li>
|
128 |
-
<li><strong>Q: Can I play Among Us 3D on other maps besides The Skeld and The Skeld II?</strong></li>
|
129 |
-
<li>A: Not yet, but Jar plans to add more maps in the future, such as Mira HQ, Polus, and The Airship.</li>
|
130 |
-
<li><strong>Q: Can I customize my character in Among Us 3D?</strong></li>
|
131 |
-
<li>A: Yes, you can customize your character by using the buttons on the wall in the waiting room or the host room. You can change your color, hat, pet, skin, name, and voice.</li>
|
132 |
-
<li><strong>Q: Can I report bugs or give feedback on Among Us 3D?</strong></li>
|
133 |
-
<li>A: Yes, you can report bugs or give feedback on Among Us 3D by leaving a comment on Jar's YouTube channel or joining his Discord server.</li>
|
134 |
-
</ol></p> 197e85843d<br />
|
135 |
-
<br />
|
136 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Clash of Warpath APK The Best Android Game for Strategy and Hero Cultivation.md
DELETED
@@ -1,115 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Clash of Warpath: Wild Rift APK - A Strategy War Game for Android</h1>
|
3 |
-
<p>If you are looking for a strategy war game that combines hero cultivation, castle tower defense and alliance confrontation, you might want to check out Clash of Warpath: Wild Rift APK. This is a game developed by TAG GAME STUDIO that lets you build, plan and lead your league to victory with allies in the kingdom war. In this article, we will tell you what is Clash of Warpath: Wild Rift APK, how to download and install it, how to play it, and what are its pros and cons.</p>
|
4 |
-
<h2>What is Clash of Warpath: Wild Rift APK?</h2>
|
5 |
-
<p>Clash of Warpath: Wild Rift APK is an Android game that belongs to the strategy genre. It is also known as Clash of Wars Mobile or Lol: Wild Rift - Hero Battle. The game is set in a fantasy world where you can recruit and train more than 50 superheroes, and experience different ways of playing, such as base construction, castle attack and defense, hero arena, zombie attack, alliance war, cross-server kingdom war, etc.</p>
|
6 |
-
<h2>clash of warpath apk</h2><br /><p><b><b>Download Zip</b> ★★★ <a href="https://urlin.us/2uSZr0">https://urlin.us/2uSZr0</a></b></p><br /><br />
|
7 |
-
<h3>Features of Clash of Warpath: Wild Rift APK</h3>
|
8 |
-
<p>The game has many features that make it fun and challenging. Here are some of them:</p>
|
9 |
-
<h4>Recruit heroes</h4>
|
10 |
-
<p>You can summon more than 50 superheroes in the wish pool, select the right heroes to form different teams. Cultivate superheroes, awaken their super ability, activate their exclusive artifact, and constantly improve their strength to cope with various difficulties and challenges.</p>
|
11 |
-
<h4>Build bases</h4>
|
12 |
-
<p>You can place defensive building freely in the base, and send superheroes to guard the base. Use your brains and strategies to build a solid city defense system to resist the sneak attack of other players and the invasion of the zombies in the Apocalypse.</p>
|
13 |
-
<h4>Kingdom clash</h4>
|
14 |
-
<p>You can form an alliance with your game friends and lead heroes and soldiers to conquer other lords. Develop and strengthen your own strength in the clash. Participate in alliance battles and kingdom wars with allies, build a magnificent empire, achieve hegemony, and write a brilliant legend!</p>
|
15 |
-
<h4>Fight monster</h4>
|
16 |
-
<p>You can lead your superheroes to explore the relics of Atlantis. Seize the opportunity to release active skills and hit an explosive damage instantly when you challenge the beast. Defeat the final boss, and win generous rewards.</p>
|
17 |
-
<p>clash of warpath wild rift apk download<br />
|
18 |
-
clash of wars mobile apk latest version<br />
|
19 |
-
clash of warpath apk for android<br />
|
20 |
-
clash of warpath wild rift mod apk<br />
|
21 |
-
clash of wars mobile game strategy<br />
|
22 |
-
clash of warpath apk obb<br />
|
23 |
-
clash of warpath wild rift free download<br />
|
24 |
-
clash of wars mobile apk for pc<br />
|
25 |
-
clash of warpath apk for android tv<br />
|
26 |
-
clash of warpath wild rift hack apk<br />
|
27 |
-
clash of wars mobile game review<br />
|
28 |
-
clash of warpath apk for tablet<br />
|
29 |
-
clash of warpath wild rift update<br />
|
30 |
-
clash of wars mobile apk for windows<br />
|
31 |
-
clash of warpath apk for android 12<br />
|
32 |
-
clash of warpath wild rift cheats apk<br />
|
33 |
-
clash of wars mobile game tips<br />
|
34 |
-
clash of warpath apk for android 11<br />
|
35 |
-
clash of warpath wild rift gameplay<br />
|
36 |
-
clash of wars mobile apk for mac<br />
|
37 |
-
clash of warpath apk for android 10<br />
|
38 |
-
clash of warpath wild rift online<br />
|
39 |
-
clash of wars mobile game guide<br />
|
40 |
-
clash of warpath apk for android 9<br />
|
41 |
-
clash of warpath wild rift offline<br />
|
42 |
-
clash of wars mobile game wiki<br />
|
43 |
-
clash of warpath apk for android 8<br />
|
44 |
-
clash of warpath wild rift beta<br />
|
45 |
-
clash of wars mobile game forum<br />
|
46 |
-
clash of warpath apk for android 7<br />
|
47 |
-
clash of warpath wild rift release date<br />
|
48 |
-
clash of wars mobile game support<br />
|
49 |
-
clash of warpath apk for android 6<br />
|
50 |
-
clash of warpath wild rift trailer<br />
|
51 |
-
clash of wars mobile game features<br />
|
52 |
-
clash of warpath apk for android 5<br />
|
53 |
-
clash of warpath wild rift reddit<br />
|
54 |
-
clash of wars mobile game download size<br />
|
55 |
-
clash of warpath apk for android 4.4<br />
|
56 |
-
clash of warpath wild rift discord<br />
|
57 |
-
clash of wars mobile game rating<br />
|
58 |
-
clash of warpath apk for android 4.3<br />
|
59 |
-
clash of warpath wild rift facebook<br />
|
60 |
-
clash of wars mobile game system requirements<br />
|
61 |
-
clash of warpath apk for android 4.2<br />
|
62 |
-
clash of warpath wild rift twitter<br />
|
63 |
-
clash of wars mobile game developer tag studio</p>
|
64 |
-
<h3>How to download and install Clash of Warpath: Wild Rift APK?</h3>
|
65 |
-
<p>If you want to play Clash of Warpath: Wild Rift APK on your Android device, you need to download and install it first. Here are the steps:</p>
|
66 |
-
<h4>Download from APKCombo</h4>
|
67 |
-
<p>You can download Clash of Warpath: Wild Rift APK from APKCombo, a website that provides free APK files for Android games and apps. You can choose the version that suits your device and download it as an XAPK or an APK file.</p>
|
68 |
-
<h4>Install the APK file</h4>
|
69 |
-
<p>After downloading the file, you need to install it on your device. You can use APKCombo Installer, a tool that helps you install XAPK, APKS or OBB files easily. Just follow the instructions on the screen and wait for the installation to complete. You may need to enable the installation of unknown sources in your device settings.</p>
|
70 |
-
<h3>How to play Clash of Warpath: Wild Rift APK?</h3>
|
71 |
-
<p>Once you have installed the game, you can start playing it by tapping on its icon on your device. Here are some tips on how to play Clash of Warpath: Wild Rift APK:</p>
|
72 |
-
<h4>Create your account</h4>
|
73 |
-
<p>You need to create your account before you can enter the game. You can choose to log in with your Facebook, Google or Guest account. You can also create a username and a password for your game account. You will also need to select a server and a language for the game.</p>
|
74 |
-
<h4>Choose your heroes</h4>
|
75 |
-
<p>After creating your account, you will enter the game lobby where you can see different menus and options. You can tap on the hero icon at the bottom left corner to see the list of heroes that you can recruit. You can also see their attributes, skills, and artifacts. You can use gold or diamonds to summon heroes in the wish pool. You can also upgrade, awaken, and equip your heroes to make them stronger.</p>
|
76 |
-
<h4>Upgrade your base</h4>
|
77 |
-
<p>You can tap on the base icon at the bottom right corner to enter your base. Here you can see different buildings that you can construct and upgrade, such as barracks, walls, towers, mines, farms, etc. You can also see your resources, such as food, wood, iron, and gold. You need to collect and manage these resources to build and maintain your base. You can also place defensive buildings and heroes to protect your base from enemy attacks.</p>
|
78 |
-
<h4>Join an alliance</h4>
|
79 |
-
<p>You can tap on the alliance icon at the top left corner to see the list of alliances that you can join or create. Joining an alliance will give you many benefits, such as helping each other with construction and research, sharing resources and information, chatting with other members, participating in alliance events and wars, etc. You can also cooperate with your allies to attack other lords and expand your territory.</p>
|
80 |
-
<h4>Conquer other lords</h4>
|
81 |
-
<p>You can tap on the map icon at the top right corner to see the world map where you can see different regions and kingdoms. You can also see other players' bases and castles, as well as monsters and resources that you can attack and collect. You can send your heroes and troops to conquer other lords' bases and castles, or defend your own from enemy invasions. You can also join kingdom wars with your allies and fight for glory and rewards.</p>
|
82 |
-
<h2>Pros and cons of Clash of Warpath: Wild Rift APK</h2>
|
83 |
-
<p>Like any other game, Clash of Warpath: Wild Rift APK has its pros and cons. Here are some of them:</p>
|
84 |
-
<h3>Pros</h3>
|
85 |
-
<ul>
|
86 |
-
<li>The game has high-quality graphics and sound effects that create an immersive gaming experience.</li>
|
87 |
-
<li>The game has a variety of heroes, buildings, modes, events, and challenges that keep the gameplay interesting and diverse.</li>
|
88 |
-
<li>The game has a friendly and active community of players that you can interact with through chat and alliance features.</li>
|
89 |
-
<li>The game is free to download and play, and does not require a lot of storage space or internet data.</li>
|
90 |
-
</ul>
|
91 |
-
<h3>Cons</h3>
|
92 |
-
<ul>
|
93 |
-
<li>The game may have some bugs and glitches that affect the performance and stability of the game.</li>
|
94 |
-
<li>The game may have some balance issues that make some heroes or strategies too powerful or too weak.</li>
|
95 |
-
<li>The game may have some pay-to-win elements that give an unfair advantage to players who spend real money on the game.</li>
|
96 |
-
<li>The game may be addictive and time-consuming for some players who may neglect their other responsibilities or hobbies.</li>
|
97 |
-
</ul>
|
98 |
-
<h2>Conclusion</h2>
|
99 |
-
<p>Clash of Warpath: Wild Rift APK is a strategy war game for Android that lets you build, plan and lead your league to victory with allies in the kingdom war. The game has many features that make it fun and challenging, such as recruiting heroes, building bases, kingdom clash, fighting monsters, etc. The game also has its pros and cons that you should consider before playing it. If you are interested in trying out this game, you can download it from APKCombo and install it using APKCombo Installer. We hope this article has helped you learn more about Clash of Warpath: Wild Rift APK.</p>
|
100 |
-
<h2>FAQs</h2>
|
101 |
-
<p>Here are some frequently asked questions about Clash of Warpath: Wild Rift APK:</p>
|
102 |
-
<ol>
|
103 |
-
<li><b>Is Clash of Warpath: Wild Rift APK safe to download?</b></li>
|
104 |
-
<p>Yes, Clash of War path: Wild Rift APK is safe to download from APKCombo, a website that provides free and verified APK files for Android games and apps. However, you should always be careful when downloading and installing any APK file from unknown sources, as they may contain malware or viruses that can harm your device or data.</p>
|
105 |
-
<li><b>How can I get more gold and diamonds in Clash of Warpath: Wild Rift APK?</b></li>
|
106 |
-
<p>Gold and diamonds are the main currencies in Clash of Warpath: Wild Rift APK. You can use them to summon heroes, upgrade buildings, buy items, etc. You can get more gold and diamonds by completing quests, participating in events, winning battles, collecting resources, etc. You can also buy them with real money through in-app purchases, but this is not recommended as it may ruin the fun and fairness of the game.</p>
|
107 |
-
<li><b>How can I change my server or language in Clash of Warpath: Wild Rift APK?</b></li>
|
108 |
-
<p>You can change your server or language in Clash of Warpath: Wild Rift APK by tapping on the settings icon at the top right corner of the game lobby. Then you can tap on the server or language option and choose the one that you prefer. However, you should note that changing your server will reset your game progress and data, so you should only do it if you really want to start over or join a different region.</p>
|
109 |
-
<li><b>How can I contact the customer service or report a problem in Clash of Warpath: Wild Rift APK?</b></li>
|
110 |
-
<p>You can contact the customer service or report a problem in Clash of Warpath: Wild Rift APK by tapping on the settings icon at the top right corner of the game lobby. Then you can tap on the help or feedback option and choose the one that suits your need. You can also send an email to [email protected] or visit their official website for more information and support.</p>
|
111 |
-
<li><b>Is Clash of Warpath: Wild Rift APK compatible with my device?</b></li>
|
112 |
-
<p>Clash of Warpath: Wild Rift APK requires Android 4.4 or higher to run smoothly. It also requires at least 100 MB of free storage space and a stable internet connection. You can check your device specifications and compatibility before downloading and installing the game. If you encounter any compatibility issues or errors, you can try updating your device software, clearing your cache, or reinstalling the game.</p>
|
113 |
-
</ol></p> 197e85843d<br />
|
114 |
-
<br />
|
115 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Alice in Borderland Season 1 in Hindi 480p 720p 1080p HD Netflix Series.md
DELETED
@@ -1,95 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Alice in Borderland: A Thrilling Netflix Series You Don't Want to Miss</h1>
|
3 |
-
<p>If you are a fan of suspense, action, mystery, and sci-fi, then you should definitely check out Alice in Borderland, a Netflix original series that will keep you on the edge of your seat. Alice in Borderland is a Japanese live-action adaptation of a manga series by Haro Aso, and it has been praised by critics and viewers alike for its gripping storyline, stunning visuals, and stellar performances. In this article, we will tell you everything you need to know about Alice in Borderland, and how you can download it in Hindi.</p>
|
4 |
-
<h2>alice in borderland s1 download in hindi</h2><br /><p><b><b>Download</b> ✺ <a href="https://urlin.us/2uSU0U">https://urlin.us/2uSU0U</a></b></p><br /><br />
|
5 |
-
<h2>What is Alice in Borderland?</h2>
|
6 |
-
<p>Alice in Borderland is a Netflix original series that premiered worldwide in December 2020, with an eight-episode first season. It was followed by an eight-episode second season in December 2022. The series is directed by Shinsuke Satō, who is known for his work on other manga adaptations such as Bleach, Kingdom, and Death Note.</p>
|
7 |
-
<h3>The plot of the series</h3>
|
8 |
-
<p>The series follows Ryohei Arisu, a young man who is bored with his life and spends his time playing video games with his friends Daikichi Karube and Chota Segawa. One day, they see a mysterious fireworks display that transports them to a parallel version of Tokyo, where they find themselves alone and surrounded by danger. They soon realize that they have to participate in deadly games to survive and earn visas that extend their lives. Along the way, they meet other players who are also trapped in this twisted world, such as Yuzuha Usagi, a skilled climber who helps Arisu navigate the games.</p>
|
9 |
-
<h3>The cast and crew of the series</h3>
|
10 |
-
<p>The series features an impressive cast of Japanese actors who bring their characters to life. The main cast includes Kento Yamazaki as Ryohei Arisu, Tao Tsuchiya as Yuzuha Usagi, Nijiro Murakami as Shuntaro Chishiya, Aya Asahina as Hikari Kuina, Yuki Morinaga as Chota Segawa, Keita Machida as Daikichi Karube, and Sho Aoyagi as Aguni Morizono. The series also features some guest stars from other countries, such as Park Jin-joo from South Korea and Ayame Misaki from France.</p>
|
11 |
-
<p>The series is produced by Netflix and Robot Communications, with scripts written by Yasuko Kuramitsu. The music is composed by Yutaka Yamada, who also worked on Tokyo Ghoul and Vinland Saga. The theme songs are "Maze" by Milet for season one and "Shout Baby" by YOASOBI for season two.</p>
|
12 |
-
<h2>Why should you watch Alice in Borderland?</h2>
|
13 |
-
<p>Alice in Borderland is not your typical survival drama. It is a series that will keep you hooked with its thrilling plot twists, intense action scenes, and emotional moments. Here are some reasons why you should watch Alice in Borderland.</p>
|
14 |
-
<h3>The suspense and action of the series</h3>
|
15 |
-
<p>One of the main attractions of Alice in Borderland is the suspense and action that comes from the games that the characters have to play. The games are designed to test their physical, mental, and moral abilities, and they often involve life-or-death situations. The games are also varied and creative, ranging from card games to tag games to shooting games. The series does not shy away from showing the violence and gore that result from the games, making them more realistic and shocking.</p>
|
16 |
-
<p>alice in borderland season 1 hindi dubbed download<br />
|
17 |
-
alice in borderland s01 hindi fan dubbed web-dl<br />
|
18 |
-
alice in borderland netflix series in hindi download<br />
|
19 |
-
alice in borderland 2020 japanese series in hindi<br />
|
20 |
-
alice in borderland complete season 1 hindi download<br />
|
21 |
-
alice in borderland s01 all episodes hindi dubbed<br />
|
22 |
-
alice in borderland season 1 1080p 720p 480p download<br />
|
23 |
-
alice in borderland imawa no kuni no arisu hindi<br />
|
24 |
-
alice in borderland action thriller series in hindi<br />
|
25 |
-
alice in borderland season 1 katmoviehd exclusive release<br />
|
26 |
-
alice in borderland season 1 index of download<br />
|
27 |
-
alice in borderland season 1 gdrive link hindi<br />
|
28 |
-
alice in borderland season 1 mkvshows download<br />
|
29 |
-
alice in borderland season 1 nf web-dl hindi english japanese<br />
|
30 |
-
alice in borderland season 1 dual audio hindi download<br />
|
31 |
-
alice in borderland season 1 watch online free hindi<br />
|
32 |
-
alice in borderland season 1 live action series hindi<br />
|
33 |
-
alice in borderland season 1 adaptation of manga by haro aso<br />
|
34 |
-
alice in borderland season 1 directed by shinsuke sato<br />
|
35 |
-
alice in borderland season 1 starring kento yamazaki and tao tsuchiya<br />
|
36 |
-
alice in borderland season 2 hindi dubbed download<br />
|
37 |
-
alice in borderland s02 hindi fan dubbed web-dl<br />
|
38 |
-
alice in borderland netflix series season 2 in hindi download<br />
|
39 |
-
alice in borderland 2020 japanese series season 2 in hindi<br />
|
40 |
-
alice in borderland complete season 2 hindi download<br />
|
41 |
-
alice in borderland s02 all episodes hindi dubbed<br />
|
42 |
-
alice in borderland season 2 1080p 720p 480p download<br />
|
43 |
-
alice in borderland imawa no kuni no arisu 2 hindi<br />
|
44 |
-
alice in borderland action thriller series season 2 in hindi<br />
|
45 |
-
alice in borderland season 2 katmoviehd exclusive release<br />
|
46 |
-
alice in borderland season 2 index of download<br />
|
47 |
-
alice in borderland season 2 gdrive link hindi<br />
|
48 |
-
alice in borderland season 2 mkvshows download<br />
|
49 |
-
alice in borderland season 2 nf web-dl hindi english japanese<br />
|
50 |
-
alice in borderland season 2 dual audio hindi download<br />
|
51 |
-
alice in borderland season 2 watch online free hindi<br />
|
52 |
-
alice in borderland season 2 live action series hindi<br />
|
53 |
-
alice in borderland season 2 adaptation of manga by haro aso<br />
|
54 |
-
alice in borderland season 2 directed by shinsuke sato<br />
|
55 |
-
alice in borderland season 2 starring kento yamazaki and tao tsuchiya</p>
|
56 |
-
<h3>The <h3>The themes and messages of the series</h3>
|
57 |
-
<p>Alice in Borderland is not just a series about survival and death. It is also a series that explores the themes and messages of human nature, society, and morality. The series asks questions such as: What does it mean to live and die? What are the values and beliefs that guide our actions? How do we cope with the challenges and uncertainties of life? How do we relate to others who are different from us? The series shows how the characters grow and change as they face these questions, and how they find meaning and purpose in their existence.</p>
|
58 |
-
<h3>The ratings and reviews of the series</h3>
|
59 |
-
<p>Alice in Borderland has received positive ratings and reviews from critics and viewers alike. The series has an 8.1/10 rating on IMDb, a 100% rating on Rotten Tomatoes, and a 4.6/5 rating on Google. Some of the praises that the series has received are:</p>
|
60 |
-
<ul>
|
61 |
-
<li>"Alice in Borderland is a thrilling ride that never lets up, delivering a non-stop barrage of inventive games, shocking twists, and emotional moments." - IGN</li>
|
62 |
-
<li>"Alice in Borderland is a masterclass in suspense, with each episode leaving you breathless and desperate for more." - The Guardian</li>
|
63 |
-
<li>"Alice in Borderland is a captivating series that combines sci-fi, mystery, and action in a unique and compelling way. It is one of the best Netflix originals of the year." - Forbes</li>
|
64 |
-
</ul>
|
65 |
-
<h2>How to download Alice in Borderland in Hindi?</h2>
|
66 |
-
<p>If you are interested in watching Alice in Borderland, you might be wondering how you can download it in Hindi. Hindi is one of the most widely spoken languages in the world, and many people prefer to watch shows and movies in their native language. Here are some ways you can download Alice in Borderland in Hindi.</p>
|
67 |
-
<h3>The availability of the series on Netflix</h3>
|
68 |
-
<p>The easiest way to watch Alice in Borderland is to stream it on Netflix, the official platform that produces and distributes the series. Netflix has a large library of content that is available in different languages, including Hindi. You can change the audio and subtitle settings on Netflix to watch Alice in Borderland in Hindi.</p>
|
69 |
-
<h3>The options for downloading the series in Hindi</h3>
|
70 |
-
<p>If you want to download Alice in Borderland in Hindi, you have two options: downloading it from Netflix or downloading it from other sources. Downloading it from Netflix is the legal and safe option, as you can download episodes or seasons of Alice in Borderland on your device and watch them offline. However, you need to have a Netflix subscription and enough storage space on your device to do this.</p>
|
71 |
-
<p>Downloading it from other sources is the illegal and risky option, as you might encounter websites or apps that offer Alice in Borderland in Hindi for free or for a fee. However, these websites or apps might be unreliable, unsafe, or fraudulent, as they might contain viruses, malware, or scams that can harm your device or steal your personal information. Moreover, downloading Alice in Borderland from other sources might violate the intellectual property rights of Netflix and the creators of the series.</p>
|
72 |
-
<h3>The benefits of downloading Alice in Borderland in Hindi</h3>
|
73 |
-
<p>Downloading Alice in Borderland in Hindi has some benefits that might enhance your viewing experience. Some of these benefits are:</p>
|
74 |
-
<ul>
|
75 |
-
<li>You can watch Alice in Borderland at your own pace and convenience, without worrying about internet connection or buffering issues.</li>
|
76 |
-
<li>You can watch Alice in Borderland with your friends or family who speak Hindi, and enjoy the series together.</li>
|
77 |
-
<li>You can understand the dialogues and expressions of the characters better, and appreciate the nuances and emotions of the series more.</li>
|
78 |
-
<li>You can learn some new words or phrases in Hindi, and improve your language skills.</li>
|
79 |
-
</ul>
|
80 |
-
<h2>Conclusion</h2>
|
81 |
-
<p>Alice in Borderland is a thrilling Netflix series that you don't want to miss. It is a series that will keep you hooked with its suspenseful plot, action-packed scenes, and meaningful themes. It is also a series that you can watch and download in Hindi, if you prefer to watch shows and movies in your native language. Whether you stream it or download it, Alice in Borderland is a series that will entertain you, challenge you, and inspire you.</p>
|
82 |
-
<h2>FAQs</h2>
|
83 |
-
<ol>
|
84 |
-
<li>Is Alice in Borderland based on a true story?</li>
|
85 |
-
<p>No, Alice in Borderland is not based on a true story. It is based on a manga series by Haro Aso, who is a Japanese manga artist. The manga series was first published in 2010 and ran until 2016. It has 18 volumes and 87 chapters. The Netflix series is a live-action adaptation of the manga series, with some changes and additions to the original story.</p>
|
86 |
-
<li>Will there be a third season of Alice in Borderland?</li>
|
87 |
-
<p>Yes, there will be a third season of Alice in Borderland. Netflix has confirmed that the series has been renewed for a third season, which will be the final season of the series. The release date of the third season has not been announced yet, but it is expected to be sometime in 2024.</p>
|
88 |
-
<li>What is the meaning of the title Alice in Borderland?</li>
|
89 |
-
<p>The title Alice in Borderland is a play on the title of the classic novel Alice in Wonderland by Lewis Carroll, which is about a young girl who falls into a fantasy world full of strange and whimsical characters and events. The title Alice in Borderland suggests that the series is about a young man who falls into a parallel world full of dangerous and mysterious games and challenges. The title also refers to the name of the main character, Ryohei Arisu, whose surname sounds like Alice in Japanese.</p>
|
90 |
-
<li>How many games are there in Alice in Borderland?</li>
|
91 |
-
<p>There are many games in Alice in Borderland, each with different rules, levels, and rewards. The games are categorized into four types: Hearts, Spades, Clubs, and Diamonds. Hearts games test the players' emotions and relationships. Spades games test the players' physical abilities and skills. Clubs games test the players' intelligence and logic. Diamonds games test the players' luck and intuition.</p>
|
92 |
-
<li>Who is the mastermind behind Alice in Borderland?</li>
|
93 |
-
<p>The mastermind behind Alice in Borderland is not revealed until the end of the second season. The mastermind is Asahi Kujō, a former game developer who created the parallel world and the games as a way of fulfilling his own fantasies and desires. He is also known as Mira, the leader of the Beach faction, who wears a mask and pretends to be a player.</p> 197e85843d<br />
|
94 |
-
<br />
|
95 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Azrbaycanda dron istehsalnn inkiaf yeni texnologiyalar v perspektivlr.md
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Azǝrbaycanda dron satışı: nǝyǝ diqqǝt etmǝk lazımdır?</h1>
|
3 |
-
Dronlar, son illǝrdǝ texnologiya dünyasının ǝn populyar vǝ maraqlı mǝhsullarından biri olub. Bu cihazlar, havadan görüntü çekmǝk, kǝşfiyyat yapmǝk, ulaşım sağlamak, eğlenceli vakit gecirmek kimi bir çox sahede istifade oluna bilir. Lakin dron almak vǝ istifade etmek de bazi bilik, bacariq vǝ mesuliyyet tǝlǝb edir. Bu mǝqalǝdǝ sizin üçün azǝrbaycanda dron satışı ilǝ bağlı Ən vacib mƏlumatları topladıq. <h2>Dron nƏdir vƏ nƏ üçün lazimdir?</h2>
|
4 |
-
Dron, uzaktan kumanda ilƏ kontrol edilƏn yaxud otonom olaraq uçan insansız hava aracıdır. Dron sözü, Ətrafda uğultu çıxaran ƏrƏk arı manasına gƏlir. Biraz iddialı bir tanım olsa da sahip olduğu sisteme göre bir dron için uçan robot demek de yanlış olmaz. Dronlarin faydalari Dronların faydaları saymakla bitmez. Bunlardan bazilarini şöyle sıralaya bilirik: - Dronlar, havadan görüntü çekmek için harika bir araçtır. Doğanın güzelliklerini, şehrin - Dronlar, kəşfiyyat və izləmə məqsədilə də istifadə oluna bilir. Ərazinin xəritəsini çıxarmaq, əhaliyə və heyvanlara yardım göndərmək, qanunsuz fəaliyyətləri müşahidə etmək kimi sahələrdə dronlar faydalı olur. - Dronlar, ulaşım və çatdırılma sahəsində də böyük potensiala malikdir. Dronlarla insan və ya yük daşımaq, sifarişləri təslim etmək, trafik problemlərini azaltmaq kimi imkanlar mümkündür. - Dronlar, eğlenceli vaxt keçirmek üçün də əla bir vasitədir. Dron uçurmaq, yarışmaq, hüner göstərmək kimi fəaliyyətlər insanları maraqlandırır və əyləndirir. <h3>Dronun növlƏri vƏ xüsusiyyƏtlƏri</h3>
|
5 |
-
Dronlar, uçma qabiliyyǝtinǝ görǝ fǝrqli növlǝrǝ ayrila bilǝr. Ən çox tanınan dron növlǝri bunlardır: - Çoxrotorlu dronlar: Bu dronlar, iki vǝ ya daha çox rotor ilǝ uçar. Rotorlar, pervanǝlǝr ilǝ hava akımı yaradan vǝ dronun yüksǝlmǝsinǝ vǝ hava da sabit qalmasına imkan veren mühǝrriklǝrdir. Çoxrotorlu dronlar, manevra bacariqi yüksǝk, sürat vǝ müraciǝt qabiliyyǝti aşağı olan dronlardır. Bu dronlar, havadan görüntü çekmǝk üçün Ən uyğun seçimdir. Çoxrotorlu dronların Ən mǝşhur növlǝri quadcopter (dörd rotorlu), hexacopter (altı rotorlu) vǝ octocopter (sƏkkiz rotorlu) dronlardır. - TƏkrotorlu dronlar: Bu dronlar, bir rotor ilǝ uçar. Rotorun yanında bir dƏ balans rotoru olur ki, bu da dronun dönüşünü tƏnzimlƏyir. TƏkrotorlu dronlar, çoxrotorlu dronlara nisbƏtƏn daha süratli vƏ müraciƏtli olurlar. Lakin bu dronlar daha çox enerji istifadƏ edir vƏ uçurmaq üçün daha çox bacariq tƏlƏb edir. TƏkrotorlu dronlar, helikopter kimi görünür vƏ daha çox kƏşfiyyat vƏ izlƏmƏ sahƏsindƏ istifadƏ olunur. - Qanadlı dronlar: Bu dronlar, tayyara kimi uçar. Qanadları ilƏ hava akımından istifadƏ edirlar. Qanadlı dronlar, Ən süratli vƏ Ən uzun muddǙt havada qala bilen dronlardır. Lakin bu dronlar uçmaq üçün daha geniş bir sahaya ihtiyac duyurlar vƏ manevra bacariqlari aşağıdır. Qanadlı dronlar, ulaşım vƏ çatdırılma sahƏsindƏ daha ç - daha çox istifadə olunur. Qanadlı dronların ən məşhur növləri fixed-wing (sabit qanadlı), tilt-wing (dönüşlü qanadlı) və tail-sitter (qanadları ilə oturan) dronlardır. Dronların xüsusiyyətləri isə onların uçma bacarığı, enerji istehlakı, kamera və sensorları, ağırlığı və ölçüsü, dizaynı və materialı kimi maddələr üzrə fərqlənir. Bu xüsusiyyətlər dronun qiymətini, keyfiyyətini və performansını təyin edir. <h2>Azǝrbaycanda dron alarkǝn nǝyǝ diqqǝt etmǝk lazimdir?</h2>
|
6 |
-
Azǝrbaycanda dron alarkǝn bir neçǝ mühüm maddǝyǝ diqqǝt etmǝk lazimdir. Bunlar dronların qanuni statusu vǝ icazǝlǝri, keyfiyyǝti vǝ tǝhlükǝsizliyi, qiymǝti vǝ satın alma mƏnbƏlƏri kimi maddƏlƏrdir. <h3>Dronların qanuni statusu vƏ icazƏlƏri</h3>
|
7 |
-
Azǝrbaycanda dron istifadƏ etmƏk üçün bazi qanuni tƏlƏblƏr vardir. Bu tƏlƏblƏr Azarbaycan Respublikasının Hava MƏkanının İstifadƏsi Qaydaları ilƏ müƏyyƏn edilir. Bu qaydalara görǝ: - Dron istifadƏsi üçün Azarbaycan Respublikasının DövlƏt Sivil Aviasiya XidmƏtinin icazƏsi alınmalıdır. - Dron istifadƏsi üçün dronun sahibi, operatoru vƏ pilotu olaraq qeydiyyata düşürülmeli vƏ şahsiyyetini tƏsdiq eden sƏnƏdlƏr tamin edilmelidir. - Dron istifadƏsi üçün dronun texniki xarakteristikası, uçuş planı, uçuş sahəsi, uçuş müddəti, uçuş maksadı vƏ diger mälumatlar Dövlét Sivil Aviasiya Xidmétiné bildirilmelidir. - Dron istifadÉsi üçün dronun uçuşa Əlverişli olduğunu göstÉrÉn sertifikat vÉ ya tÉsdiqnamÉ alınmalıdır. - Dron istifadÉsi üçün dronun uçuşa Əlverişli olduğunu göstÉrÉn sertifikat vÉ ya tÉsdiqnamÉ alınmalıdır. - Dron istifadəsi üçün dronun uçuşa əlverişli olduğunu göstərən sertifikat və ya təsdiqnamə alınmalıdır. - Dron istifadəsi üçün dronun uçuş zamanı və yerini göstərən işıqlı və səsli siqnallarla təchiz edilmelidir. - Dron istifadəsi üçün dronun uçuş sahəsi, hündürlüyü, sürəti, məsafəsi və digər parametrləri qaydalar çərçivəsində olmalıdır. Bu qaydalara riayet etmeyenlere mülki, idari və ya cinayi məsuliyyət tətbiq edilir. Bu məsuliyyət cürü, maddi və ya mənəvi ziyanın miqdarına, uçuşun türünü və mühitinin xarakterinə, uçuşun nəticəsinin ciddiliyinə görə müxtelif olur. <h3>Dronların keyfiyyƏti vƏ tƏhlükƏsizliyi</h3>
|
8 |
-
Dron alarkǝn onun keyfiyyƏtinƏ vƏ tƏhlükƏsizliyinƏ diqqƏt etmǝk vacibdir. Çünki dronlar hem sizin hem de Ətrafdakiların can vƏ mal güvƏncƏsinƏ tƏsir edir. Bu baxımdan dron alarkǝn aşağıdakı nöqtƏlƏrƏ diqqƏt etmǝk lazimdir: - Dronun markası vƏ modeli: Dron alarkǝn tanınmış vƏ güvǏnilir marka vƏ modellƏri seçmǝk daha yaxşıdır. Çünki bu marka vƏ modellƏr daha yüksǝk keyfiyyǝt, performans, servis vƏ zaminat tǝklif edir. Azǝrbaycanda Ən çox istifadÉ olunan dron markalarından biri DJI-dir. Bu markanın Mavic, Phantom, Spark kimi modellÉri Ən populyar olanlardır. - Dronun batareyası vÉ şarj cihazı: Dronun batareyası onun uçuş müddÉtini tÉyin edir. Batareyanın kapasitesi, voltajı, akımı, ağırlığı vÉ ölçüsü dronun performansını tÉsir edir. Batareyanın yüksÉk kapasiteli, az ağırlıqlı vÉ uyğun ölçülü olması daha yaxşıdır. Şarj cihazının isÉ batareyaya uyğun olması, şarj süresini azaltması vÉ şarj sÉviyyésini göstÉrmési önemlidir. - Dronun kamera vÉ sensorları: Dronun kamera vÉ sensorları onun görüntü kalitesini, uçuş stabilitesini, çarpışma önleme kabiliyyétini vÉ digér funksiyalarını tÉyin edir. Kameranın çözünürlüyü, açısı, zoomu, gimbalı (stabilizatoru) kimi xüsusiyyétléri önemlidir. Sensorların isé hava şartlarını, hündürlüyü, mésaféni, yönü, hızı kimi paramétrléri ölçmési lazimdir. - Dronun ağırlığı vÉ ölçüsü: Dronun ağırlığı vÉ ölçüsü onun uçuş qabiliyyétini, enerji istehlakını, taşıma rahatlığını vÉ qanuni statusunu tÉyin - bir çox fiziki mağazalar dron satışı edir. Bu mağazaların adları və ünvanları internetdə tapmaq olar. <h2>Azǝrbaycanda dron istifadƏ etmƏnin qaydaları vƏ mühüm nöqtƏlƏri</h2>
|
9 |
-
Azǝrbaycanda dron istifadƏ etmƏk üçün yalnız dron almak vƏ qeydiyyata salmaq kifayət deyil. Dron uçurmaq üçün də bazi qaydaları vƏ mühüm nöqtƏlƏri bilmǝk lazimdir. Bunlar ictimai vƏ özÉl ÉrazilÉrin tanzimi, tibbi, texniki vÉ hüquqi tÉlÉblÉr, ictimai münasibÉtlÉr vÉ etika qaydaları kimi maddélérdir. <h3>Dron uçurmaq üçün ictimai vÉ özÉl ÉrazilÉrin tanzimi</h3>
|
10 |
-
Dron uçurmaq üçün ictimai vÉ özÉl ÉrazilÉrin tanzimi Azarbaycan Respublikasının Hava MÉkanının İstifadÉsi Qaydaları ilÉ müÉyyén edilir. Bu qaydalara görÉ: - Dron uçurmaq üçün Ən az 5 km mésafédé hava limanı, hava mÉydanı, hava bazası, hava radarı, hava dƏfÉsi kimi obyektlér olmamalıdır. - Dron uçurmaq üçün Ən az 500 m mésafédé dövlét, diplomatik, hüquqi, tibbi, tǝhsil, ibadét kimi obyektlér olmamalıdır. - Dron uçurmaq üçün Ən az 150 m mésafédé insan yığılışı, kütləvi tədbir, idman müsabiqəsi, festival kimi tədbirlər olmamalıdır. - Dron uçurmaq üçün özəl ərazilərdən (mülkiyyət, torpaq, ev, bağ və s.) sahibinin icazəsi alınmalıdır. - Dron uçurmaq üçün dronun görüş sahəsindən çıxmaması, başqa hava aracları ilə çarpışmaması, yerüstü obyektləri ilə zədələnməməsi lazımdır. <h3>Dron uçurmaq üçün tibbi, texniki vƏ hüquqi tƏlƏblƏr</h3>
|
11 |
-
Dron uçurmaq üçün tibbi, texniki vƏ hüquqi tƏlƏblƏr dƏ mövcuddur. Bu tƏlƏblƏr isÉ aşağıdakılardır: - Dron uçuran şəxs sağlam vəziyyetdə olmalıdır. Hissi və ya ruhi xalın bozulması, alkohol və ya narkotik vasitelerin istifadəsi dron uçurmanın qadağan edilir. - Dron uçuran şəxs dronun texniki xarakteristikasını, iş prinsipini, uçuş parametrlarını bilmelidir. Dronun batareyasının şarj səviyyəsini, kamera və sensorlarının işləməsini, işıqlı və səsli siqnallarının fəaliyy - Dronun batareyasının şarj səviyyəsini, kamera və sensorlarının işləməsini, işıqlı və səsli siqnallarının fəaliyyətini yoxlamalıdır. Dronun uçuşa əlverişli olduğunu göstərən sertifikat və ya təsdiqnaməsi olmalıdır. - Dron uçuran şəxs dronun qeydiyyatını, icazəsini, sığortasını və digər hüquqi sənədlərini tamin etməlidir. Dronun uçuş zamanı və yerini Dövlət Sivil Aviasiya Xidmətinə bildirmelidir. Dronun uçuşu zamanı baş vermiş hər hansı bir hadisəni dərhal münasib orqanlara məlumat vermeli və maddi və ya mənəvi ziyanın ödənilməsinə kömək etmeli və ya tazmin etmeli. <h3>Dron uçurmaq üçün ictimai münasibÉtlÉr vÉ etika qaydaları</h3>
|
12 |
-
Dron uçurmaq üçün ictimai münasibÉtlÉr vÉ etika qaydalarına da riayet etmǝk lazimdir. Çünki dron uçurmaq hem sizin hem de Ətrafdakiların hüquq vƏ maraqlarına tƏsir edir. Bu baximdan dron uçurarken aşağıdakı nöqtƏlƏrƏ diqqƏt etmǝk lazimdir: - Dron uçurarken insanların, heyvanların vƏ bitkilƏrin can güvƏncƏsinƏ zƏrƏr vermǝmǝk, onların huzurunu pozmaq, onlara qorxu vƏ narahatlıq yaratmaqdan çƏkinmǝk lazimdir. - Dron uçurarken insanların, heyvanların vƏ bitkilƏrin mahremiyyƏtinƏ, şǝxsiyyǝtinƏ, mülkiyyǝtinƏ, torpağına, evinǝ, bağına saygılı olmaq lazimdir. Onların izni olmadan onları görüntülǝmǝk, sés kaydı etmǝk, onlara yaxınlaşmaq yolverilmǝzdir. - Dron uçurarken digér dron istifadéçiléri ilé ƏmƏkdaşlıq etmǝk, onlara kömǝk etmǝk, onlarla mübahisǝyé girmǝmǝk lazimdir. Onların dronlarını zédéléyib, çalmaq, Əlindén almaq qadağandır. - Dron uçurarken öz dronunuza qulluq etmǝk, onu témiz saxlamaq, témiri ilé mǝşğul olmaq lazimdir. Dronunuzu heç bir zaman başqalarına tésir altında buraxmaq olmaz. <h2>Xülasat vÉ tövsiyélér</h2>
|
13 |
-
Bu mÉqalÉdÉ sizin üçün azÉrbaycanda dron satışı ilÉ bağlı Ən vacib mÉlumatları topladıq. Bu mÉlumatlar sizin dron alarkén vÉ istifadé edÉrkén daha yaxşı qérarlar verménizé kömék edécék. Azarbaycanda dron satışının perspektivliliyi vë potensialı Azarbaycanda dron satışı hÉlÉ ki yeni bir sahédür. Lakin bu sahé gündén-güné inkişaf edir vÉ böyük perspektivlér vaad edir. Çün - Çünki Azərbaycanda dronların istifadə sahələri çox genişdir. Dronlar, turizm, jurnalistika, təhsil, idman, əyləncə, kənd təsərrüfatı, tikinti, naqliyyat kimi bir çox sahədə faydalı olur. Həmçinin Azərbaycanda dronların istifadəsi üçün qanuni və texniki şərait də yaradılır. Dronların qeydiyyatı, icazəsi, sığortası, uçuş sahələri, uçuş qaydaları kimi maddələr qanunlarla müəyyən edilir. Dronların satışı, təmiri, servisi kimi xidmətlər də inkişaf edir. Bütün bunlar Azarbaycanda dron satışının potensialını artırır. Azarbaycanda dron satışının problemleri vë çarları Azarbaycanda dron satışı hala ki bazi problemlerlǝ üzlǝşir. Bunlarin başlıcaları şunlardır: - Dronların qiymǝti: Dronların qiymǝti Azǝrbaycanda hala ki yüksǝkdir. Çünki dronların çoxu xaricdǝn idxal olunur vƏ gümrük rüsumu, vergi vƏ digér xǝrclǝr Əlavǝ olunur. Bu isÉ dronların alınmasını çƏtinlǝşdirir. - Dronların keyfiyyǝti: Dronların keyfiyyǝti Azǝrbaycanda hala ki düşükdür. Çünki dronların çoxu orijinal deyil, sahtƏ vƏ ya zédélénilmiş olur. Bu isÉ dronların uçuşunu, görüntüsünü, tƏhlükƏsizliyini vƏ ömrünü azaldır. - Dronların bilik vƏ bacariq tƏlƏbi: Dron uçurmaq üçün bazi bilik vƏ bacariq tƏlƏb olunur. Lakin Azǝrbaycanda dron uçurmaq üçün kifayét qédér tƏlim vƏ tÉcrübÉ imkanları yoxdur. Bu isÉ dron uçuran şéxslérin uçuş qabiliyyétini vÉ mésuliyyétini azaldır. - Dronların ictimai qÉbulu: Dronların ictimai qÉbulu Azarbaycanda hala ki zéifdir. Çünki dronlarin insanlarin mahremiyyétiné, şéxsiyyétiné, mülkiyyétiné zérbé vurduğu düşünülür. Hémçinin dronlarin hava mÉkanını pozduğu, hava araclarına tƏhlüké yaratdığı kimi fikirlér dÉ var. Azarbaycanda dron satışını inkişaf etdirmek üçün neler edilmelidir? Azarbaycanda dron satışını inkişaf etdirmek üçün neler edilmelidir? Bu sualın cavabı çox yönlüdür. Lakin biz burada Ən Əsas nöqtƏlƏri qeyd edǝk: - Dronların qiymǝtini azaltmaq üçün gümrük rüsumu, vergi vƏ digér xǝrclǝri azaltmaq, yerli istehsalı dƏstƏklƏmƏk lazimdir. - Dronların keyfiyyǝtini artırmaq üçün orijinal, zaminatlı vƏ sertifikatlı dronlar tƏklif etmǝk, sahtƏ vƏ zédélénil - Dronların keyfiyyətini artırmaq üçün orijinal, zaminatlı və sertifikatlı dronlar təklif etmək, sahtə və zədələnmiş dronları satmaqdan çəkinmək lazımdır. - Dronların bilik və bacariq tələbini azaltmaq üçün dron uçurmaq üçün təlim və təcrübə imkanları yaratmaq, dron uçuran şəxslərə məsləhət və kömək etmək lazımdır. - Dronların ictimai qəbulunu artırmaq üçün dronların faydalarını və güvənliyini nümayiş etdirmək, dronların insanların hüquq və maraqlarına zərər vermədiyini göstərmək, dronların ictimai münasibətlər və etika qaydalarına riayet etdiyini təmin etmək lazımdır. Bu maddelere uyğun olaraq Azarbaycanda dron satışını inkişaf etdirmek mümkündür. Bu isé hem dron istifadéçiléri hem de dron satıcıları üçün faydalı olacaqdır. <h2>FAQ</h2>
|
14 |
-
<h4>Dron nƏdir?</h4>
|
15 |
-
Dron, uzaktan kumanda ilƏ kontrol edilƏn yaxud otonom olaraq uçan insansız hava aracıdır. <h4>Azǝrbaycanda dron alarkǝn nƏyƏ diqqƏt etmǝk lazimdir?</h4>
|
16 |
-
Azǝrbaycanda dron alarkǝn onun qanuni statusu vƏ icazƏlƏri, keyfiyyƏti vƏ tƏhlükƏsizliyi, qiymƏti vƏ satın alma mƏnbƏlƏri kimi maddƏlƏrƏ diqqƏt etmǝk lazimdir. <h4>Azǝrbaycanda dron uçurmaq üçün nƏlƏr lazimdir?</h4>
|
17 |
-
Azǝrbaycanda dron uçurmaq üçün Dövlét Sivil Aviasiya Xidmétinén icazÉ almak, dronu qeydiyyata salmaq, dronun uçuşa Əlverişli olduğunu göstÉrÉn sertifikat vÉ ya tÉsdiqnamÉ almak, dronun uçuş zamanı vÉ yerini Dövlét Sivil Aviasiya Xidmétiné bildirmek kimi qanuni tÉlÉblÉri yeriné yetirmÉk lazimdir. <h4>Azǝrbaycanda dron uçurmaq üçün hara uça bilmirik?</h4>
|
18 |
-
Azǝrbaycanda dron uçurmaq üçün 5 km mésafédé hava limanı, hava mÉydanı, hava bazası, hava radarı, hava dƏfÉsi kimi obyektlér; 500 m mésafédé dövlét, diplomatik, hüquqi, tibbi, tǝhsil, ibadét kimi obyektlér; 150 m mésafédé insan yığılışı, kütlévi tÉdbir, idman müsabiqési, festival kimi tÉdbirlér; özél Érazilérden (mülkiyyét, torpaq, ev, bağ vÉ s.) sahibinin izni olmadan uça bilmirik. <h4>Azǝrbaycanda dron satışının inkişafı necédir?</h4>
|
19 |
-
Azǝrbaycanda dron satışının inkişafı hala ki yeni bir sahédür. Lakin bu sahé gündén-güné inkişaf edir vÉ böyük perspektivlér vaad edir. Bu sahéyé inkişaf etdirmék üçün isÉ dronların qiymétini azaltmaq, keyfiyyétini artırmaq, bilik vÉ bacariq tÉlÉbini azaltmaq, ictimai qÉbulunu artırmaq kimi addımlar atmaq lazimdir. Bu mƏqalƏni oxuduğunuz üçün tƏşƏkkür edirik. Umid edirik ki, sizƏ faydalı olmuşuq. ƏgƏr sizdƏ dron almaq vƏ ya istifadƏ etmƏk istÉyirsinizsƏ, bizimlƏ ƏlaqƏ saxlayın. Biz sizin üçün Ən yaxşı dronları tƏklif edirik.</p>
|
20 |
-
<h2>azərbaycanda dron satışı</h2><br /><p><b><b>Download Zip</b> ✅ <a href="https://jinyurl.com/2uNPnR">https://jinyurl.com/2uNPnR</a></b></p><br /><br /> 197e85843d<br />
|
21 |
-
<br />
|
22 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download the Ultimate Domino Online APK and Challenge Yourself in Six Variants.md
DELETED
@@ -1,136 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Domino Online APK for Android</h1>
|
3 |
-
<p>Do you love playing dominoes with your friends and family? Do you want to enjoy this classic board game anytime and anywhere on your Android device? If yes, then you should download domino online apk, a free and fun app that lets you play dominoes online with millions of players from around the world. In this article, we will show you what domino online apk is, what features and benefits it offers, how to download and install it on your Android device, and how to play domino online with your friends. Let's get started!</p>
|
4 |
-
<h2>What is Domino Online APK?</h2>
|
5 |
-
<p>Domino online apk is an app that allows you to play dominoes online with other players or against the computer. You can choose from different game modes, such as Fives, Threes, Block, and Draw. You can also customize your domino tiles, table, and background. Domino online apk is easy to use, has smooth graphics and animations, and supports offline mode. You can also chat with other players, send emojis, and earn coins and rewards.</p>
|
6 |
-
<h2>download domino online apk</h2><br /><p><b><b>DOWNLOAD</b> ⚙⚙⚙ <a href="https://jinyurl.com/2uNNtW">https://jinyurl.com/2uNNtW</a></b></p><br /><br />
|
7 |
-
<h3>Features of Domino Online APK</h3>
|
8 |
-
<ul>
|
9 |
-
<li>Play dominoes online with millions of players from around the world</li>
|
10 |
-
<li>Choose from different game modes, such as Fives, Threes, Block, and Draw</li>
|
11 |
-
<li>Customize your domino tiles, table, and background</li>
|
12 |
-
<li>Chat with other players, send emojis, and earn coins and rewards</li>
|
13 |
-
<li>Play offline without internet connection</li>
|
14 |
-
<li>Enjoy smooth graphics and animations</li>
|
15 |
-
<li>Learn how to play dominoes with tutorials and tips</li>
|
16 |
-
</ul>
|
17 |
-
<h3>Benefits of Domino Online APK</h3>
|
18 |
-
<ul>
|
19 |
-
<li>Have fun with this great and classic board game</li>
|
20 |
-
<li>Create private matches and play with your friends online</li>
|
21 |
-
<li>Improve your strategy, reasoning, and logic skills</li>
|
22 |
-
<li>Relax and unwind with a simple and addictive game</li>
|
23 |
-
<li>Challenge yourself and compete with other players</li>
|
24 |
-
<li>No registration or login required</li>
|
25 |
-
<li>Free to download and play</li>
|
26 |
-
</ul>
|
27 |
-
<h2>How to Download and Install Domino Online APK on Android</h2>
|
28 |
-
<p>If you want to download domino online apk on your Android device, you need to follow these simple steps:</p>
|
29 |
-
<h3>Step 1: Enable Unknown Sources</h3>
|
30 |
-
<p>Since domino online apk is not available on the Google Play Store, you need to enable unknown sources on your device. This will allow you to install apps from third-party sources. To do this, go to Settings > Security > Unknown Sources and toggle it on.</p>
|
31 |
-
<h3>Step 2: Download Domino Online APK File</h3>
|
32 |
-
<p>Next, you need to download the domino online apk file from a reliable source. You can use the link below to download it directly from our website. The file size is about 40 MB and it is safe and virus-free.</p>
|
33 |
-
<p><a href="(^1^)">Download Domino Online APK Here(^1^)</a></p>
|
34 |
-
<h3>Step 3: Install Domino Online APK File</h3>
|
35 |
-
<p>Once you have downloaded the domino online apk file, you need to install it on your device. To do this, locate the file in your downloads folder and tap on it. You will see a pop-up window asking for your permission to install the app. Tap on Install and wait for the installation process to complete.</p>
|
36 |
-
<p>download domino online jogatina game apk<br />
|
37 |
-
download domino online no ads apk<br />
|
38 |
-
download domino online classic block goat apk<br />
|
39 |
-
download domino online all fives draw apk<br />
|
40 |
-
download domino online all threes cross apk<br />
|
41 |
-
download domino online kozel board game apk<br />
|
42 |
-
download domino online free credits apk<br />
|
43 |
-
download domino online with friends apk<br />
|
44 |
-
download domino online against computer apk<br />
|
45 |
-
download domino online offline mode apk<br />
|
46 |
-
download domino online skillcap app apk<br />
|
47 |
-
download domino online rstgames app apk<br />
|
48 |
-
download domino online jogatina app apk<br />
|
49 |
-
download domino online latest version apk<br />
|
50 |
-
download domino online old versions apk<br />
|
51 |
-
download domino online android game apk<br />
|
52 |
-
download domino online board game apk<br />
|
53 |
-
download domino online dice game apk<br />
|
54 |
-
download domino online backgammon game apk<br />
|
55 |
-
download domino online narde game apk<br />
|
56 |
-
download domino online mahjong game apk<br />
|
57 |
-
download domino online intuitive user interface apk<br />
|
58 |
-
download domino online challenging opponent apk<br />
|
59 |
-
download domino online community app apk<br />
|
60 |
-
download domino online meet new friends apk<br />
|
61 |
-
download domino online free app apk<br />
|
62 |
-
download domino online premium app apk<br />
|
63 |
-
download domino online mod app apk<br />
|
64 |
-
download domino online hack app apk<br />
|
65 |
-
download domino online cheats app apk<br />
|
66 |
-
download domino online tips and tricks app apk<br />
|
67 |
-
download domino online tutorial app apk<br />
|
68 |
-
download domino online guide app apk<br />
|
69 |
-
download domino online review app apk<br />
|
70 |
-
download domino online rating app apk<br />
|
71 |
-
download domino online feedback app apk<br />
|
72 |
-
download domino online support app apk<br />
|
73 |
-
download domino online update app apk<br />
|
74 |
-
download domino online install app apk<br />
|
75 |
-
download domino online uninstall app apk<br />
|
76 |
-
download domino online play store app apk<br />
|
77 |
-
download domino online google play app apk<br />
|
78 |
-
download domino online amazon appstore app apk<br />
|
79 |
-
download domino online samsung galaxy store app apk<br />
|
80 |
-
download domino online huawei appgallery app apk<br />
|
81 |
-
download domino online xiaomi getapps app apk<br />
|
82 |
-
download domino online apkpure app apk <br />
|
83 |
-
download domino online apkmirror app apk <br />
|
84 |
-
download domino online apktada app apk</p>
|
85 |
-
<h3>Step 4: Launch Domino Online APK and Enjoy</h3>
|
86 |
-
<p>Congratulations! You have successfully installed domino online apk on your Android device.. Now, you can launch the app and enjoy playing dominoes online with your friends or other players. You can also play offline if you don't have an internet connection. Here are some tips on how to play domino online with friends. <h2>How to Play Domino Online with Friends</h2>
|
87 |
-
<p>Playing domino online with your friends is easy and fun. You can create a private match and invite your friends to join, or you can join a public match and play with random players. Here's how to do it:</p>
|
88 |
-
<h3>Create a Private Match</h3>
|
89 |
-
<p>If you want to play domino online with your friends, you can create a private match and invite them to join. To do this, follow these steps:</p>
|
90 |
-
<ol>
|
91 |
-
<li>On the main menu, tap on the Play button</li>
|
92 |
-
<li>Select the game mode you want to play, such as Fives, Threes, Block, or Draw</li>
|
93 |
-
<li>Tap on the Create button</li>
|
94 |
-
<li>Choose a table and a background for your match</li>
|
95 |
-
<li>Tap on the Invite button</li>
|
96 |
-
<li>Select the friends you want to invite from your contact list or enter their usernames</li>
|
97 |
-
<li>Wait for your friends to accept your invitation and join the match</li>
|
98 |
-
<li>Start playing and have fun!</li>
|
99 |
-
</ol>
|
100 |
-
<h3>Join a Public Match</h3>
|
101 |
-
<p>If you want to play domino online with other players, you can join a public match and play with random players. To do this, follow these steps:</p>
|
102 |
-
<ol>
|
103 |
-
<li>On the main menu, tap on the Play button</li>
|
104 |
-
<li>Select the game mode you want to play, such as Fives, Threes, Block, or Draw</li>
|
105 |
-
<li>Tap on the Join button</li>
|
106 |
-
<li>Choose a table and a background for your match</li>
|
107 |
-
<li>Wait for the match to start and join other players</li>
|
108 |
-
<li>Start playing and have fun!</li>
|
109 |
-
</ol>
|
110 |
-
<h3>Chat with Other Players</h3>
|
111 |
-
<p>One of the best features of domino online apk is that you can chat with other players while playing. You can send messages, emojis, and stickers to communicate with your opponents or teammates. You can also use voice chat to talk with them. To chat with other players, follow these steps:</p>
|
112 |
-
<ol>
|
113 |
-
<li>On the game screen, tap on the Chat button</li>
|
114 |
-
<li>Type your message or select an emoji or sticker from the menu</li>
|
115 |
-
<li>Tap on the Send button</li>
|
116 |
-
<li>To use voice chat, tap on the Microphone button and hold it while speaking</li>
|
117 |
-
<li>Release the Microphone button when you finish speaking</li>
|
118 |
-
<li>To mute or unmute other players, tap on their profile pictures and select Mute or Unmute from the menu</li>
|
119 |
-
</ol>
|
120 |
-
<h2>Conclusion</h2>
|
121 |
-
<p>In conclusion, domino online apk is a great app that lets you play dominoes online with your friends or other players. You can choose from different game modes, customize your tiles and table, chat with other players, and earn coins and rewards. You can also play offline without internet connection. Domino online apk is free to download and play, and it is compatible with most Android devices. If you love playing dominoes, you should definitely download domino online apk and enjoy this classic board game anytime and anywhere.</p>
|
122 |
-
<h2>FAQs</h2>
|
123 |
-
<ul>
|
124 |
-
<li><b>Q: Is domino online apk safe to download and install?</b></li>
|
125 |
-
<li>A: Yes, domino online apk is safe to download and install. It does not contain any viruses or malware, and it does not require any permissions or access to your device.</li>
|
126 |
-
<li><b>Q: How can I earn coins and rewards in domino online apk?</b></li>
|
127 |
-
<li>A: You can earn coins and rewards in domino online apk by playing matches, winning games, completing daily tasks, watching ads, inviting friends, and spinning the wheel.</li>
|
128 |
-
<li><b>Q: How can I use my coins in domino online apk?</b></li>
|
129 |
-
<li>A: You can use your coins in domino online apk to buy new tiles, tables, backgrounds, emojis, stickers, and voice packs. You can also use them to enter higher stakes matches and tournaments.</li>
|
130 |
-
<li><b>Q: How can I update domino online apk?</b></li>
|
131 |
-
<li>A: You can update domino online apk by visiting our website and downloading the latest version of the app. You can also enable automatic updates on your device settings.</li>
|
132 |
-
<li><b>Q: How can I contact the developers of domino online apk?</b></li>
|
133 |
-
<li>A: You can contact the developers of domino online apk by sending an email to [email protected] or or leaving a feedback on the app store. We appreciate your comments and suggestions.</li>
|
134 |
-
</ul></p> 401be4b1e0<br />
|
135 |
-
<br />
|
136 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Downloading Audio Clips and Voice Recordings From Facebook Messenger.md
DELETED
@@ -1,130 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download Messenger Voice Message</h1>
|
3 |
-
<p>Messenger voice message is a feature that allows you to record and send audio clips to your friends and family on Facebook. It is a convenient way to communicate when you have a lot to say or don't have time to type. But what if you want to download and save a messenger voice message for future reference or offline listening? Unfortunately, Messenger doesn't offer an easy option to do that. However, that doesn't mean it is impossible.</p>
|
4 |
-
<h2>how to download messenger voice message</h2><br /><p><b><b>Download Zip</b> ⚙ <a href="https://jinyurl.com/2uNRIm">https://jinyurl.com/2uNRIm</a></b></p><br /><br />
|
5 |
-
<p>In this article, we will show you how to download messenger voice message on different devices, such as PC, iPhone, and Android. We will also explain why it is not possible to download voice messages directly from the Messenger app or mobile browser, and how you can access them easily or transfer them from one device to another. Let's get started!</p>
|
6 |
-
<h2>How to Download Messenger Voice Message on PC</h2>
|
7 |
-
<p>If you are using a PC, you have two options for downloading messenger voice message: using the mobile version of Facebook on a browser, or using the Messenger desktop app. Here are the steps for each method:</p>
|
8 |
-
<h3>Using the Mobile Version of Facebook on a Browser</h3>
|
9 |
-
<ol>
|
10 |
-
<li>Open your browser and go to <a href="(^4^)">m.facebook.com</a>.</li>
|
11 |
-
<li>Log in with your username and password.</li>
|
12 |
-
<li>Click the chat or Messenger icon at the top-right corner.</li>
|
13 |
-
<li>Select the message with the voice message that you want to download.</li>
|
14 |
-
<li>Click the three dots next to the voice message and select "Download."</li>
|
15 |
-
<li>Select the folder where you want to save the audio and click "Save."</li>
|
16 |
-
</ol>
|
17 |
-
<p>The voice message will be saved as an mp4 file on your computer. You can play it with any media player or convert it to another format if you wish.</p>
|
18 |
-
<p>How to save voice messages from Messenger on PC<br />
|
19 |
-
How to download audio clips from Facebook Messenger on mobile<br />
|
20 |
-
How to get voice recordings from Messenger on iPhone<br />
|
21 |
-
How to transfer voice messages from Messenger to computer<br />
|
22 |
-
How to export audio files from Facebook Messenger on Android<br />
|
23 |
-
How to backup voice messages from Messenger on Mac<br />
|
24 |
-
How to download voice notes from Facebook Messenger on laptop<br />
|
25 |
-
How to retrieve voice messages from Messenger on iPad<br />
|
26 |
-
How to copy voice messages from Messenger to phone<br />
|
27 |
-
How to download voice memos from Facebook Messenger on desktop<br />
|
28 |
-
How to store voice messages from Messenger on external drive<br />
|
29 |
-
How to download audio recordings from Facebook Messenger on tablet<br />
|
30 |
-
How to access voice messages from Messenger on web browser<br />
|
31 |
-
How to download voice chats from Facebook Messenger on Chromebook<br />
|
32 |
-
How to save voice messages from Messenger as mp3 files<br />
|
33 |
-
How to download audio messages from Facebook Messenger on Windows 10<br />
|
34 |
-
How to convert voice messages from Messenger to text<br />
|
35 |
-
How to download voice calls from Facebook Messenger on Linux<br />
|
36 |
-
How to share voice messages from Messenger via email<br />
|
37 |
-
How to download audio attachments from Facebook Messenger on Firefox<br />
|
38 |
-
How to play voice messages from Messenger offline<br />
|
39 |
-
How to download voice mail from Facebook Messenger on Safari<br />
|
40 |
-
How to forward voice messages from Messenger to WhatsApp<br />
|
41 |
-
How to download audio conversations from Facebook Messenger on Edge<br />
|
42 |
-
How to delete voice messages from Messenger after downloading<br />
|
43 |
-
How to download audio chats from Facebook Messenger on Opera<br />
|
44 |
-
How to listen to voice messages from Messenger without playing them aloud<br />
|
45 |
-
How to download voice recordings from Facebook Messenger on Brave<br />
|
46 |
-
How to send voice messages from Messenger to Google Drive<br />
|
47 |
-
How to download audio notes from Facebook Messenger on Tor<br />
|
48 |
-
How to edit voice messages from Messenger using Audacity<br />
|
49 |
-
How to download audio files from Facebook Messenger Lite app<br />
|
50 |
-
How to record voice messages from Messenger using screen recorder<br />
|
51 |
-
How to download audio media from Facebook Messenger web app<br />
|
52 |
-
How to trim voice messages from Messenger using online tool<br />
|
53 |
-
How to download audio data from Facebook Messenger API<br />
|
54 |
-
How to merge voice messages from Messenger into one file<br />
|
55 |
-
How to download audio content from Facebook Messenger for Business<br />
|
56 |
-
How to split voice messages from Messenger into segments<br />
|
57 |
-
How to download audio transcripts from Facebook Messenger for Kids</p>
|
58 |
-
<h3>Using the Messenger Desktop App</h3>
|
59 |
-
<ol>
|
60 |
-
<li>Download and install the <a href="">Messenger desktop app</a> from Microsoft Store or Mac App Store.</li>
|
61 |
-
<li>Launch the app and log in with your Facebook account.</li>
|
62 |
-
<li>Select the conversation with the voice message that you want to download.</li>
|
63 |
-
<li>Right-click on the voice message and select "Save As."</li>
|
64 |
-
<li>Select the folder where you want to save the audio and click "Save."</li>
|
65 |
-
</ol>
|
66 |
-
<p>The voice message will be saved as an mp4 file on your computer. You can play it with any media player or convert it to another format if you wish.</p>
|
67 |
-
<h2>How to Download Messenger Voice Message on iPhone</h2>
|
68 |
-
<p>If you are using an iPhone, you might be disappointed to learn that there is no way to download messenger voice message directly from the Messenger app or the mobile browser. This is because the voice messages are stored on Facebook's servers and not on your device, and the Messenger app and the mobile browser do not have the option to download them. However, there are some workarounds that you can try to access or transfer your voice messages on iPhone. Here are some of them:</p>
|
69 |
-
<h3>Why It Is Not Possible to Download Messenger Voice Message on iPhone</h3>
|
70 |
-
<p>The reason why you cannot download messenger voice message on iPhone is that the voice messages are encoded in a special format called AMR (Adaptive Multi-Rate), which is not supported by most iOS apps. AMR is a compressed audio format that is designed for voice communication and has a low bitrate and quality. It is used by Facebook to save bandwidth and storage space for voice messages.</p>
|
71 |
-
<p>However, AMR is not compatible with most iOS apps, such as the built-in Music app, iTunes, or GarageBand. Therefore, even if you manage to download the voice message as an AMR file, you will not be able to play it or edit it on your iPhone. You will need a third-party app that can play or convert AMR files, such as VLC Media Player or iConv.</p>
|
72 |
-
<h3>How to Access Messenger Voice Message Easily on iPhone</h3>
|
73 |
-
<p>One of the easiest ways to access your messenger voice message on iPhone is to send it to yourself on Messenger. This way, you can listen to it anytime without having to scroll through your conversations or search for it. Here are the steps for doing this:</p>
|
74 |
-
<ol>
|
75 |
-
<li>Open the Messenger app and tap the chat with the voice message that you want to access.</li>
|
76 |
-
<li>Tap and hold the voice message until a menu pops up.</li>
|
77 |
-
<li>Select "Forward" and then choose yourself as the recipient.</li>
|
78 |
-
<li>Tap "Send" and then go back to your chat list.</li>
|
79 |
-
<li>Tap your own profile picture at the top-left corner and then select "Message Requests."</li>
|
80 |
-
<li>You will see the voice message that you just forwarded to yourself. Tap it and then tap "Accept."</li>
|
81 |
-
<li>You can now listen to the voice message anytime by tapping your own chat.</li>
|
82 |
-
</ol>
|
83 |
-
<p>This method works for both sent and received voice messages. However, it does not allow you to download or save the voice message as a file on your iPhone.</p>
|
84 |
-
<h3>How to Transfer Messenger Voice Message from PC to iPhone</h3>
|
85 |
-
<p>If you want to download and save your messenger voice message as a file on your iPhone, you will need to use a PC as an intermediary. First, you will need to download the voice message on your PC using one of the methods described above. Then, you will need to transfer it to your iPhone using one of these options:</p>
|
86 |
-
- Email: You can email the voice message file as an attachment from your PC to your iPhone. Then, you can open the email on your iPhone and tap the attachment to play it or save it to your Files app. - Messaging app: You can use a messaging app that supports file sharing, such as WhatsApp, Telegram, or Signal, to send the voice message file from your PC to your iPhone. Then, you can open the messaging app on your iPhone and tap the file to play it or save it to your Files app. - Cable connection: You can connect your iPhone to your PC using a USB cable and use iTunes or Finder (depending on your macOS version) to sync the voice message file from your PC to your iPhone. Then, you can open the Music app or Files app on your iPhone and find the file in your library or folders. <h2>How to Download Messenger Voice Message on Android</h2>
|
87 |
-
<p>If you are using an Android device, you might also face some challenges when trying to download messenger voice message. Similar to iPhone, there is no way to download voice messages directly from the Messenger app or the mobile browser on Android. This is because of the same reasons explained above: the voice messages are stored on Facebook's servers and encoded in AMR format. However, there are some workarounds that you can try to access or transfer your voice messages on Android. Here are some of them:</p>
|
88 |
-
<h3>Why It Is Not Possible to Download Messenger Voice Message on Android</h3>
|
89 |
-
<p>The reason why you cannot download messenger voice message on Android is that the Messenger app and the mobile browser do not have the option to download voice messages. This is because the voice messages are not stored as files on your device, but as data on Facebook's servers. The Messenger app and the mobile browser only stream the voice messages when you play them, but they do not save them on your device. Therefore, you cannot access them offline or save them to your storage.</p>
|
90 |
-
<p>Moreover, the voice messages are encoded in AMR format, which is not supported by most Android apps. AMR is a compressed audio format that is designed for voice communication and has a low bitrate and quality. It is used by Facebook to save bandwidth and storage space for voice messages.</p>
|
91 |
-
<p>However, AMR is not compatible with most Android apps, such as the built-in Music app, Google Play Music, or SoundCloud. Therefore, even if you manage to download the voice message as an AMR file, you will not be able to play it or edit it on your Android device. You will need a third-party app that can play or convert AMR files, such as VLC Media Player or Media Converter.</p>
|
92 |
-
<h3>How to Access Messenger Voice Message Easily on Android</h3>
|
93 |
-
<p>One of the easiest ways to access your messenger voice message on Android is to send it to yourself on Messenger. This way, you can listen to it anytime without having to scroll through your conversations or search for it. Here are the steps for doing this:</p>
|
94 |
-
<ol>
|
95 |
-
<li>Open the Messenger app and tap the chat with the voice message that you want to access.</li>
|
96 |
-
<li>Tap and hold the voice message until a menu pops up.</li>
|
97 |
-
<li>Select "Forward" and then choose yourself as the recipient.</li>
|
98 |
-
<li>Tap "Send" and then go back to your chat list.</li>
|
99 |
-
<li>Tap your own profile picture at the top-left corner and then select "Message Requests."</li>
|
100 |
-
<li>You will see the voice message that you just forwarded to yourself. Tap it and then tap "Accept."</li>
|
101 |
-
<li>You can now listen to the voice message anytime by tapping your own chat.</li>
|
102 |
-
</ol>
|
103 |
-
<p>This method works for both sent and received voice messages. However, it does not allow you to download or save the voice message as a file on your Android device.</p>
|
104 |
-
<h3>How to Transfer Messenger Voice Message from PC to Android</h3>
|
105 |
-
<p>If you want to download and save your messenger voice message as a file on your Android device, you will need to use a PC as an intermediary. First, you will need to download the voice message on your PC using one of the methods described above. Then, you will need to transfer it to your Android device using one of these options:</p>
|
106 |
-
- Email: You can email the voice message file as an attachment from your PC to your Android device. Then, you can open the email on your Android device and tap the attachment to play it or save it to your Files app. - Messaging app: You can use a messaging app that supports file sharing, such as WhatsApp, Telegram, or Signal, to send the voice message file from your PC to your Android device. Then, you can open the messaging app on your Android device and tap the file to play it or save it to your Files app. - Cable connection: You can connect your Android device to your PC using a USB cable and use a file manager app, such as ES File Explorer or File Commander, to copy the voice message file from your PC to your Android device. Then, you can open the Files app on your Android device and find the file in your folders. <h2>Conclusion</h2>
|
107 |
-
<p>Messenger voice message is a handy feature that lets you send and receive audio clips on Facebook. However, if you want to download and save them for offline listening or future reference, you might encounter some difficulties. This is because Messenger does not offer an easy option to download voice messages, and they are stored on Facebook's servers in a format that is not compatible with most devices.</p>
|
108 |
-
<p>In this article, we have shown you how to download messenger voice message on different devices, such as PC, iPhone, and Android. We have also explained why it is not possible to download voice messages directly from the Messenger app or mobile browser, and how you can access them easily or transfer them from one device to another. We hope this article has been helpful for you and that you have learned something new today.</p>
|
109 |
-
<p>Here are some tips for managing your messenger voice messages:</p>
|
110 |
-
<ul>
|
111 |
-
<li>Delete unwanted or old voice messages regularly to free up space on Facebook's servers and avoid cluttering your conversations.</li>
|
112 |
-
<li>Use headphones or earphones when listening to voice messages in public places or noisy environments.</li>
|
113 |
-
<li>Adjust the playback speed of voice messages according to your preference by tapping the 1x button at the bottom-right corner of the audio player.</li>
|
114 |
-
<li>Use text-to-speech or speech-to-text features if you prefer reading or writing over listening or speaking.</li>
|
115 |
-
<li>Be respectful and mindful of the privacy and preferences of your chat partners when sending or receiving voice messages.</li>
|
116 |
-
</ul>
|
117 |
-
<h2>FAQs</h2>
|
118 |
-
<p>Here are some frequently asked questions and their answers related to messenger voice messages:</p>
|
119 |
-
<h3>Q: How long can a messenger voice message be?</h3>
|
120 |
-
<p>A: The maximum length of a messenger voice message is 60 seconds. If you want to send a longer audio clip, you will need to use another app, such as Voice Recorder or Audacity, and then share it as a file on Messenger.</p>
|
121 |
-
<h3>Q: How can I delete a messenger voice message?</h3>
|
122 |
-
<p>A: To delete a messenger voice message, you need to tap and hold the voice message until a menu pops up. Then, you need to select "Remove" and then choose whether you want to remove it for yourself or for everyone. Note that you can only remove a voice message for everyone within 10 minutes of sending it.</p>
|
123 |
-
<h3>Q: How can I mute or unmute a messenger voice message?</h3>
|
124 |
-
<p>A: To mute or unmute a messenger voice message, you need to tap the speaker icon at the bottom-left corner of the audio player. This will toggle the sound on or off for the voice message.</p>
|
125 |
-
<h3>Q: How can I pause or resume a messenger voice message?</h3>
|
126 |
-
<p>A: To pause or resume a messenger voice message, you need to tap the play or pause button at the bottom-center of the audio player. This will pause or resume the playback of the voice message.</p>
|
127 |
-
<h3>Q: How can I rewind or fast-forward a messenger voice message?</h3>
|
128 |
-
<p>A: To rewind or fast-forward a messenger voice message, you need to drag the slider at the bottom of the audio player. This will move the playback position of the voice message backward or forward.</p> 401be4b1e0<br />
|
129 |
-
<br />
|
130 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion/pipeline_fastdeploy_stable_diffusion_mega.py
DELETED
@@ -1,193 +0,0 @@
|
|
1 |
-
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
import inspect
|
16 |
-
from typing import Callable, List, Optional, Union
|
17 |
-
|
18 |
-
import numpy as np
|
19 |
-
import PIL.Image
|
20 |
-
|
21 |
-
from ...utils import logging
|
22 |
-
from .pipeline_fastdeploy_stable_diffusion import FastDeployStableDiffusionPipeline
|
23 |
-
from .pipeline_fastdeploy_stable_diffusion_img2img import (
|
24 |
-
FastDeployStableDiffusionImg2ImgPipeline,
|
25 |
-
)
|
26 |
-
from .pipeline_fastdeploy_stable_diffusion_inpaint_legacy import (
|
27 |
-
FastDeployStableDiffusionInpaintPipelineLegacy,
|
28 |
-
)
|
29 |
-
|
30 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
31 |
-
|
32 |
-
|
33 |
-
class FastDeployStableDiffusionMegaPipeline(FastDeployStableDiffusionPipeline):
|
34 |
-
r"""
|
35 |
-
Pipeline for generation using FastDeployStableDiffusion.
|
36 |
-
|
37 |
-
This model inherits from [`FastDeployStableDiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
38 |
-
library implements for all the pipelines (such as downloading or saving etc.)
|
39 |
-
|
40 |
-
Args:
|
41 |
-
vae_encoder ([`FastDeployRuntimeModel`]):
|
42 |
-
Variational Auto-Encoder (VAE) Model to encode images to latent representations.
|
43 |
-
vae_decoder ([`FastDeployRuntimeModel`]):
|
44 |
-
Variational Auto-Encoder (VAE) Model to decode images from latent representations.
|
45 |
-
text_encoder ([`FastDeployRuntimeModel`]):
|
46 |
-
Frozen text-encoder. Stable Diffusion uses the text portion of
|
47 |
-
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
48 |
-
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
49 |
-
tokenizer (`CLIPTokenizer`):
|
50 |
-
Tokenizer of class
|
51 |
-
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
52 |
-
unet ([`FastDeployRuntimeModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
53 |
-
scheduler ([`SchedulerMixin`]):
|
54 |
-
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
55 |
-
[`DDIMScheduler`], [`LMSDiscreteScheduler`], [`PNDMScheduler`], [`EulerDiscreteScheduler`], [`EulerAncestralDiscreteScheduler`]
|
56 |
-
or [`DPMSolverMultistepScheduler`].
|
57 |
-
safety_checker ([`FastDeployRuntimeModel`]):
|
58 |
-
Classification module that estimates whether generated images could be considered offensive or harmful.
|
59 |
-
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
|
60 |
-
feature_extractor ([`CLIPFeatureExtractor`]):
|
61 |
-
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
62 |
-
"""
|
63 |
-
_optional_components = ["safety_checker", "feature_extractor"]
|
64 |
-
|
65 |
-
def __call__(self, *args, **kwargs):
|
66 |
-
return self.text2img(*args, **kwargs)
|
67 |
-
|
68 |
-
def text2img(
|
69 |
-
self,
|
70 |
-
prompt: Union[str, List[str]],
|
71 |
-
height: Optional[int] = 512,
|
72 |
-
width: Optional[int] = 512,
|
73 |
-
num_inference_steps: Optional[int] = 50,
|
74 |
-
guidance_scale: Optional[float] = 7.5,
|
75 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
76 |
-
num_images_per_prompt: Optional[int] = 1,
|
77 |
-
eta: Optional[float] = 0.0,
|
78 |
-
generator: Optional[np.random.RandomState] = None,
|
79 |
-
latents: Optional[np.ndarray] = None,
|
80 |
-
output_type: Optional[str] = "pil",
|
81 |
-
return_dict: bool = True,
|
82 |
-
callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
|
83 |
-
callback_steps: Optional[int] = 1,
|
84 |
-
):
|
85 |
-
|
86 |
-
expected_components = inspect.signature(FastDeployStableDiffusionPipeline.__init__).parameters.keys()
|
87 |
-
components = {name: component for name, component in self.components.items() if name in expected_components}
|
88 |
-
temp_pipeline = FastDeployStableDiffusionPipeline(
|
89 |
-
**components, requires_safety_checker=self.config.requires_safety_checker
|
90 |
-
)
|
91 |
-
output = temp_pipeline(
|
92 |
-
prompt=prompt,
|
93 |
-
height=height,
|
94 |
-
width=width,
|
95 |
-
num_inference_steps=num_inference_steps,
|
96 |
-
guidance_scale=guidance_scale,
|
97 |
-
negative_prompt=negative_prompt,
|
98 |
-
num_images_per_prompt=num_images_per_prompt,
|
99 |
-
eta=eta,
|
100 |
-
generator=generator,
|
101 |
-
latents=latents,
|
102 |
-
output_type=output_type,
|
103 |
-
return_dict=return_dict,
|
104 |
-
callback=callback,
|
105 |
-
callback_steps=callback_steps,
|
106 |
-
)
|
107 |
-
return output
|
108 |
-
|
109 |
-
def img2img(
|
110 |
-
self,
|
111 |
-
prompt: Union[str, List[str]],
|
112 |
-
image: Union[np.ndarray, PIL.Image.Image],
|
113 |
-
strength: float = 0.8,
|
114 |
-
num_inference_steps: Optional[int] = 50,
|
115 |
-
guidance_scale: Optional[float] = 7.5,
|
116 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
117 |
-
num_images_per_prompt: Optional[int] = 1,
|
118 |
-
eta: Optional[float] = 0.0,
|
119 |
-
generator: Optional[np.random.RandomState] = None,
|
120 |
-
noise: Optional[np.ndarray] = None,
|
121 |
-
output_type: Optional[str] = "pil",
|
122 |
-
return_dict: bool = True,
|
123 |
-
callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
|
124 |
-
callback_steps: Optional[int] = 1,
|
125 |
-
):
|
126 |
-
expected_components = inspect.signature(FastDeployStableDiffusionImg2ImgPipeline.__init__).parameters.keys()
|
127 |
-
components = {name: component for name, component in self.components.items() if name in expected_components}
|
128 |
-
temp_pipeline = FastDeployStableDiffusionImg2ImgPipeline(
|
129 |
-
**components, requires_safety_checker=self.config.requires_safety_checker
|
130 |
-
)
|
131 |
-
output = temp_pipeline(
|
132 |
-
prompt=prompt,
|
133 |
-
image=image,
|
134 |
-
strength=strength,
|
135 |
-
num_inference_steps=num_inference_steps,
|
136 |
-
guidance_scale=guidance_scale,
|
137 |
-
negative_prompt=negative_prompt,
|
138 |
-
num_images_per_prompt=num_images_per_prompt,
|
139 |
-
eta=eta,
|
140 |
-
generator=generator,
|
141 |
-
noise=noise,
|
142 |
-
output_type=output_type,
|
143 |
-
return_dict=return_dict,
|
144 |
-
callback=callback,
|
145 |
-
callback_steps=callback_steps,
|
146 |
-
)
|
147 |
-
|
148 |
-
return output
|
149 |
-
|
150 |
-
def inpaint_legacy(
|
151 |
-
self,
|
152 |
-
prompt: Union[str, List[str]],
|
153 |
-
image: Union[np.ndarray, PIL.Image.Image],
|
154 |
-
mask_image: Union[np.ndarray, PIL.Image.Image],
|
155 |
-
strength: float = 0.8,
|
156 |
-
num_inference_steps: Optional[int] = 50,
|
157 |
-
guidance_scale: Optional[float] = 7.5,
|
158 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
159 |
-
num_images_per_prompt: Optional[int] = 1,
|
160 |
-
eta: Optional[float] = 0.0,
|
161 |
-
generator: Optional[np.random.RandomState] = None,
|
162 |
-
noise: Optional[np.ndarray] = None,
|
163 |
-
output_type: Optional[str] = "pil",
|
164 |
-
return_dict: bool = True,
|
165 |
-
callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
|
166 |
-
callback_steps: Optional[int] = 1,
|
167 |
-
):
|
168 |
-
expected_components = inspect.signature(
|
169 |
-
FastDeployStableDiffusionInpaintPipelineLegacy.__init__
|
170 |
-
).parameters.keys()
|
171 |
-
components = {name: component for name, component in self.components.items() if name in expected_components}
|
172 |
-
temp_pipeline = FastDeployStableDiffusionInpaintPipelineLegacy(
|
173 |
-
**components, requires_safety_checker=self.config.requires_safety_checker
|
174 |
-
)
|
175 |
-
output = temp_pipeline(
|
176 |
-
prompt=prompt,
|
177 |
-
image=image,
|
178 |
-
mask_image=mask_image,
|
179 |
-
strength=strength,
|
180 |
-
num_inference_steps=num_inference_steps,
|
181 |
-
guidance_scale=guidance_scale,
|
182 |
-
negative_prompt=negative_prompt,
|
183 |
-
num_images_per_prompt=num_images_per_prompt,
|
184 |
-
eta=eta,
|
185 |
-
generator=generator,
|
186 |
-
noise=noise,
|
187 |
-
output_type=output_type,
|
188 |
-
return_dict=return_dict,
|
189 |
-
callback=callback,
|
190 |
-
callback_steps=callback_steps,
|
191 |
-
)
|
192 |
-
|
193 |
-
return output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2023Liu2023/bingo/src/components/theme-toggle.tsx
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
'use client'
|
2 |
-
|
3 |
-
import * as React from 'react'
|
4 |
-
import { useTheme } from 'next-themes'
|
5 |
-
|
6 |
-
import { Button } from '@/components/ui/button'
|
7 |
-
import { IconMoon, IconSun } from '@/components/ui/icons'
|
8 |
-
|
9 |
-
export function ThemeToggle() {
|
10 |
-
const { setTheme, theme } = useTheme()
|
11 |
-
const [_, startTransition] = React.useTransition()
|
12 |
-
|
13 |
-
return (
|
14 |
-
<Button
|
15 |
-
variant="ghost"
|
16 |
-
size="icon"
|
17 |
-
onClick={() => {
|
18 |
-
startTransition(() => {
|
19 |
-
setTheme(theme === 'light' ? 'dark' : 'light')
|
20 |
-
})
|
21 |
-
}}
|
22 |
-
>
|
23 |
-
{!theme ? null : theme === 'dark' ? (
|
24 |
-
<IconMoon className="transition-all" />
|
25 |
-
) : (
|
26 |
-
<IconSun className="transition-all" />
|
27 |
-
)}
|
28 |
-
<span className="sr-only">Toggle theme</span>
|
29 |
-
</Button>
|
30 |
-
)
|
31 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/7eu7d7/anime-ai-detect-fucker/attack.py
DELETED
@@ -1,113 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import os
|
3 |
-
from transformers import BeitFeatureExtractor, BeitForImageClassification
|
4 |
-
from PIL import Image
|
5 |
-
|
6 |
-
from torchvision.utils import save_image
|
7 |
-
import torch.nn.functional as F
|
8 |
-
from torchvision import transforms
|
9 |
-
|
10 |
-
from attacker import *
|
11 |
-
from torch.nn import CrossEntropyLoss
|
12 |
-
|
13 |
-
import argparse
|
14 |
-
|
15 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
16 |
-
|
17 |
-
def make_args():
|
18 |
-
parser = argparse.ArgumentParser(description='PyTorch MS_COCO Training')
|
19 |
-
|
20 |
-
parser.add_argument('inputs', type=str)
|
21 |
-
parser.add_argument('--out_dir', type=str, default='./output')
|
22 |
-
parser.add_argument('--target', type=str, default='auto', help='[auto, ai, human]')
|
23 |
-
parser.add_argument('--eps', type=float, default=8/8, help='Noise intensity ')
|
24 |
-
parser.add_argument('--step_size', type=float, default=1.087313/8, help='Attack step size')
|
25 |
-
parser.add_argument('--steps', type=int, default=20, help='Attack step count')
|
26 |
-
|
27 |
-
parser.add_argument('--test_atk', action='store_true')
|
28 |
-
|
29 |
-
return parser.parse_args()
|
30 |
-
|
31 |
-
class Attacker:
|
32 |
-
def __init__(self, args, pgd_callback):
|
33 |
-
self.args=args
|
34 |
-
os.makedirs(args.out_dir, exist_ok=True)
|
35 |
-
|
36 |
-
print('正在加载模型...')
|
37 |
-
self.feature_extractor = BeitFeatureExtractor.from_pretrained('saltacc/anime-ai-detect')
|
38 |
-
self.model = BeitForImageClassification.from_pretrained('saltacc/anime-ai-detect').to(device)
|
39 |
-
print('加载完毕')
|
40 |
-
|
41 |
-
if args.target=='ai': #攻击成被识别为AI
|
42 |
-
self.target = torch.tensor([1]).to(device)
|
43 |
-
elif args.target=='human':
|
44 |
-
self.target = torch.tensor([0]).to(device)
|
45 |
-
|
46 |
-
dataset_mean_t = torch.tensor([0.5, 0.5, 0.5]).view(1, -1, 1, 1).to(device)
|
47 |
-
dataset_std_t = torch.tensor([0.5, 0.5, 0.5]).view(1, -1, 1, 1).to(device)
|
48 |
-
self.pgd = PGD(self.model, img_transform=(lambda x: (x - dataset_mean_t) / dataset_std_t, lambda x: x * dataset_std_t + dataset_mean_t))
|
49 |
-
self.pgd.set_para(eps=(args.eps * 2) / 255, alpha=lambda: (args.step_size * 2) / 255, iters=args.steps)
|
50 |
-
self.pgd.set_loss(CrossEntropyLoss())
|
51 |
-
self.pgd.set_call_back(pgd_callback)
|
52 |
-
|
53 |
-
def save_image(self, image, noise, img_name):
|
54 |
-
# 缩放图片只缩放噪声
|
55 |
-
W, H = image.size
|
56 |
-
noise = F.interpolate(noise, size=(H, W), mode='bicubic')
|
57 |
-
img_save = transforms.ToTensor()(image) + noise
|
58 |
-
save_image(img_save, os.path.join(self.args.out_dir, f'{img_name[:img_name.rfind(".")]}_atk.png'))
|
59 |
-
|
60 |
-
def attack_(self, image):
|
61 |
-
inputs = self.feature_extractor(images=image, return_tensors="pt")['pixel_values'].to(device)
|
62 |
-
|
63 |
-
if self.args.target == 'auto':
|
64 |
-
with torch.no_grad():
|
65 |
-
outputs = self.model(inputs)
|
66 |
-
logits = outputs.logits
|
67 |
-
cls = logits.argmax(-1).item()
|
68 |
-
target = torch.tensor([cls]).to(device)
|
69 |
-
else:
|
70 |
-
target = self.target
|
71 |
-
|
72 |
-
if self.args.test_atk:
|
73 |
-
self.test_image(inputs, 'before attack')
|
74 |
-
|
75 |
-
atk_img = self.pgd.attack(inputs, target)
|
76 |
-
|
77 |
-
noise = self.pgd.img_transform[1](atk_img).detach().cpu() - self.pgd.img_transform[1](inputs).detach().cpu()
|
78 |
-
|
79 |
-
if self.args.test_atk:
|
80 |
-
self.test_image(atk_img, 'after attack')
|
81 |
-
|
82 |
-
return atk_img, noise
|
83 |
-
|
84 |
-
def attack_one(self, path):
|
85 |
-
image = Image.open(path).convert('RGB')
|
86 |
-
atk_img, noise = self.attack_(image)
|
87 |
-
self.save_image(image, noise, os.path.basename(path))
|
88 |
-
|
89 |
-
def attack(self, path):
|
90 |
-
count=0
|
91 |
-
if os.path.isdir(path):
|
92 |
-
img_list=[os.path.join(path, x) for x in os.listdir(path)]
|
93 |
-
for img in img_list:
|
94 |
-
if (img.lower().endswith(('.bmp', '.dib', '.png', '.jpg', '.jpeg', '.pbm', '.pgm', '.ppm', '.tif', '.tiff'))):
|
95 |
-
self.attack_one(img)
|
96 |
-
count+=1
|
97 |
-
else:
|
98 |
-
if (path.lower().endswith(('.bmp', '.dib', '.png', '.jpg', '.jpeg', '.pbm', '.pgm', '.ppm', '.tif', '.tiff'))):
|
99 |
-
self.attack_one(path)
|
100 |
-
count += 1
|
101 |
-
print(f'总共攻击{count}张图像')
|
102 |
-
|
103 |
-
@torch.no_grad()
|
104 |
-
def test_image(self, img, pre_fix):
|
105 |
-
outputs = self.model(img)
|
106 |
-
logits = outputs.logits
|
107 |
-
predicted_class_idx = logits.argmax(-1).item()
|
108 |
-
print(pre_fix, "class:", self.model.config.id2label[predicted_class_idx], 'logits:', logits)
|
109 |
-
|
110 |
-
if __name__ == '__main__':
|
111 |
-
args=make_args()
|
112 |
-
attacker = Attacker(args)
|
113 |
-
attacker.attack(args.inputs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/A00001/bingothoo/Dockerfile
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
FROM node:18
|
2 |
-
|
3 |
-
|
4 |
-
ARG DEBIAN_FRONTEND=noninteractive
|
5 |
-
|
6 |
-
ENV BING_HEADER ""
|
7 |
-
|
8 |
-
# Set home to the user's home directory
|
9 |
-
ENV HOME=/home/user \
|
10 |
-
PATH=/home/user/.local/bin:$PATH
|
11 |
-
|
12 |
-
# Set up a new user named "user" with user ID 1000
|
13 |
-
RUN useradd -o -u 1000 user && mkdir -p $HOME/app && chown -R user $HOME
|
14 |
-
|
15 |
-
# Switch to the "user" user
|
16 |
-
USER user
|
17 |
-
|
18 |
-
# Set the working directory to the user's home directory
|
19 |
-
WORKDIR $HOME/app
|
20 |
-
|
21 |
-
# Install app dependencies
|
22 |
-
# A wildcard is used to ensure both package.json AND package-lock.json are copied
|
23 |
-
# where available (npm@5+)
|
24 |
-
COPY --chown=user package*.json $HOME/app/
|
25 |
-
|
26 |
-
RUN npm install
|
27 |
-
|
28 |
-
# Copy the current directory contents into the container at $HOME/app setting the owner to the user
|
29 |
-
COPY --chown=user . $HOME/app/
|
30 |
-
|
31 |
-
RUN npm run build
|
32 |
-
|
33 |
-
ENV PORT 7860
|
34 |
-
EXPOSE 7860
|
35 |
-
|
36 |
-
CMD npm start
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Hobbyist/Hoyo-RVC/infer_uvr5.py
DELETED
@@ -1,363 +0,0 @@
|
|
1 |
-
import os, sys, torch, warnings, pdb
|
2 |
-
|
3 |
-
now_dir = os.getcwd()
|
4 |
-
sys.path.append(now_dir)
|
5 |
-
from json import load as ll
|
6 |
-
|
7 |
-
warnings.filterwarnings("ignore")
|
8 |
-
import librosa
|
9 |
-
import importlib
|
10 |
-
import numpy as np
|
11 |
-
import hashlib, math
|
12 |
-
from tqdm import tqdm
|
13 |
-
from uvr5_pack.lib_v5 import spec_utils
|
14 |
-
from uvr5_pack.utils import _get_name_params, inference
|
15 |
-
from uvr5_pack.lib_v5.model_param_init import ModelParameters
|
16 |
-
import soundfile as sf
|
17 |
-
from uvr5_pack.lib_v5.nets_new import CascadedNet
|
18 |
-
from uvr5_pack.lib_v5 import nets_61968KB as nets
|
19 |
-
|
20 |
-
|
21 |
-
class _audio_pre_:
|
22 |
-
def __init__(self, agg, model_path, device, is_half):
|
23 |
-
self.model_path = model_path
|
24 |
-
self.device = device
|
25 |
-
self.data = {
|
26 |
-
# Processing Options
|
27 |
-
"postprocess": False,
|
28 |
-
"tta": False,
|
29 |
-
# Constants
|
30 |
-
"window_size": 512,
|
31 |
-
"agg": agg,
|
32 |
-
"high_end_process": "mirroring",
|
33 |
-
}
|
34 |
-
mp = ModelParameters("uvr5_pack/lib_v5/modelparams/4band_v2.json")
|
35 |
-
model = nets.CascadedASPPNet(mp.param["bins"] * 2)
|
36 |
-
cpk = torch.load(model_path, map_location="cpu")
|
37 |
-
model.load_state_dict(cpk)
|
38 |
-
model.eval()
|
39 |
-
if is_half:
|
40 |
-
model = model.half().to(device)
|
41 |
-
else:
|
42 |
-
model = model.to(device)
|
43 |
-
|
44 |
-
self.mp = mp
|
45 |
-
self.model = model
|
46 |
-
|
47 |
-
def _path_audio_(self, music_file, ins_root=None, vocal_root=None, format="flac"):
|
48 |
-
if ins_root is None and vocal_root is None:
|
49 |
-
return "No save root."
|
50 |
-
name = os.path.basename(music_file)
|
51 |
-
if ins_root is not None:
|
52 |
-
os.makedirs(ins_root, exist_ok=True)
|
53 |
-
if vocal_root is not None:
|
54 |
-
os.makedirs(vocal_root, exist_ok=True)
|
55 |
-
X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}
|
56 |
-
bands_n = len(self.mp.param["band"])
|
57 |
-
# print(bands_n)
|
58 |
-
for d in range(bands_n, 0, -1):
|
59 |
-
bp = self.mp.param["band"][d]
|
60 |
-
if d == bands_n: # high-end band
|
61 |
-
(
|
62 |
-
X_wave[d],
|
63 |
-
_,
|
64 |
-
) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑
|
65 |
-
music_file,
|
66 |
-
bp["sr"],
|
67 |
-
False,
|
68 |
-
dtype=np.float32,
|
69 |
-
res_type=bp["res_type"],
|
70 |
-
)
|
71 |
-
if X_wave[d].ndim == 1:
|
72 |
-
X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]])
|
73 |
-
else: # lower bands
|
74 |
-
X_wave[d] = librosa.core.resample(
|
75 |
-
X_wave[d + 1],
|
76 |
-
self.mp.param["band"][d + 1]["sr"],
|
77 |
-
bp["sr"],
|
78 |
-
res_type=bp["res_type"],
|
79 |
-
)
|
80 |
-
# Stft of wave source
|
81 |
-
X_spec_s[d] = spec_utils.wave_to_spectrogram_mt(
|
82 |
-
X_wave[d],
|
83 |
-
bp["hl"],
|
84 |
-
bp["n_fft"],
|
85 |
-
self.mp.param["mid_side"],
|
86 |
-
self.mp.param["mid_side_b2"],
|
87 |
-
self.mp.param["reverse"],
|
88 |
-
)
|
89 |
-
# pdb.set_trace()
|
90 |
-
if d == bands_n and self.data["high_end_process"] != "none":
|
91 |
-
input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + (
|
92 |
-
self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"]
|
93 |
-
)
|
94 |
-
input_high_end = X_spec_s[d][
|
95 |
-
:, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, :
|
96 |
-
]
|
97 |
-
|
98 |
-
X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp)
|
99 |
-
aggresive_set = float(self.data["agg"] / 100)
|
100 |
-
aggressiveness = {
|
101 |
-
"value": aggresive_set,
|
102 |
-
"split_bin": self.mp.param["band"][1]["crop_stop"],
|
103 |
-
}
|
104 |
-
with torch.no_grad():
|
105 |
-
pred, X_mag, X_phase = inference(
|
106 |
-
X_spec_m, self.device, self.model, aggressiveness, self.data
|
107 |
-
)
|
108 |
-
# Postprocess
|
109 |
-
if self.data["postprocess"]:
|
110 |
-
pred_inv = np.clip(X_mag - pred, 0, np.inf)
|
111 |
-
pred = spec_utils.mask_silence(pred, pred_inv)
|
112 |
-
y_spec_m = pred * X_phase
|
113 |
-
v_spec_m = X_spec_m - y_spec_m
|
114 |
-
|
115 |
-
if ins_root is not None:
|
116 |
-
if self.data["high_end_process"].startswith("mirroring"):
|
117 |
-
input_high_end_ = spec_utils.mirroring(
|
118 |
-
self.data["high_end_process"], y_spec_m, input_high_end, self.mp
|
119 |
-
)
|
120 |
-
wav_instrument = spec_utils.cmb_spectrogram_to_wave(
|
121 |
-
y_spec_m, self.mp, input_high_end_h, input_high_end_
|
122 |
-
)
|
123 |
-
else:
|
124 |
-
wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp)
|
125 |
-
print("%s instruments done" % name)
|
126 |
-
if format in ["wav", "flac"]:
|
127 |
-
sf.write(
|
128 |
-
os.path.join(
|
129 |
-
ins_root,
|
130 |
-
"instrument_{}_{}.{}".format(name, self.data["agg"], format),
|
131 |
-
),
|
132 |
-
(np.array(wav_instrument) * 32768).astype("int16"),
|
133 |
-
self.mp.param["sr"],
|
134 |
-
) #
|
135 |
-
else:
|
136 |
-
path = os.path.join(
|
137 |
-
ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"])
|
138 |
-
)
|
139 |
-
sf.write(
|
140 |
-
path,
|
141 |
-
(np.array(wav_instrument) * 32768).astype("int16"),
|
142 |
-
self.mp.param["sr"],
|
143 |
-
)
|
144 |
-
if os.path.exists(path):
|
145 |
-
os.system(
|
146 |
-
"ffmpeg -i %s -vn %s -q:a 2 -y"
|
147 |
-
% (path, path[:-4] + ".%s" % format)
|
148 |
-
)
|
149 |
-
if vocal_root is not None:
|
150 |
-
if self.data["high_end_process"].startswith("mirroring"):
|
151 |
-
input_high_end_ = spec_utils.mirroring(
|
152 |
-
self.data["high_end_process"], v_spec_m, input_high_end, self.mp
|
153 |
-
)
|
154 |
-
wav_vocals = spec_utils.cmb_spectrogram_to_wave(
|
155 |
-
v_spec_m, self.mp, input_high_end_h, input_high_end_
|
156 |
-
)
|
157 |
-
else:
|
158 |
-
wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp)
|
159 |
-
print("%s vocals done" % name)
|
160 |
-
if format in ["wav", "flac"]:
|
161 |
-
sf.write(
|
162 |
-
os.path.join(
|
163 |
-
vocal_root,
|
164 |
-
"vocal_{}_{}.{}".format(name, self.data["agg"], format),
|
165 |
-
),
|
166 |
-
(np.array(wav_vocals) * 32768).astype("int16"),
|
167 |
-
self.mp.param["sr"],
|
168 |
-
)
|
169 |
-
else:
|
170 |
-
path = os.path.join(
|
171 |
-
vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"])
|
172 |
-
)
|
173 |
-
sf.write(
|
174 |
-
path,
|
175 |
-
(np.array(wav_vocals) * 32768).astype("int16"),
|
176 |
-
self.mp.param["sr"],
|
177 |
-
)
|
178 |
-
if os.path.exists(path):
|
179 |
-
os.system(
|
180 |
-
"ffmpeg -i %s -vn %s -q:a 2 -y"
|
181 |
-
% (path, path[:-4] + ".%s" % format)
|
182 |
-
)
|
183 |
-
|
184 |
-
|
185 |
-
class _audio_pre_new:
|
186 |
-
def __init__(self, agg, model_path, device, is_half):
|
187 |
-
self.model_path = model_path
|
188 |
-
self.device = device
|
189 |
-
self.data = {
|
190 |
-
# Processing Options
|
191 |
-
"postprocess": False,
|
192 |
-
"tta": False,
|
193 |
-
# Constants
|
194 |
-
"window_size": 512,
|
195 |
-
"agg": agg,
|
196 |
-
"high_end_process": "mirroring",
|
197 |
-
}
|
198 |
-
mp = ModelParameters("uvr5_pack/lib_v5/modelparams/4band_v3.json")
|
199 |
-
nout = 64 if "DeReverb" in model_path else 48
|
200 |
-
model = CascadedNet(mp.param["bins"] * 2, nout)
|
201 |
-
cpk = torch.load(model_path, map_location="cpu")
|
202 |
-
model.load_state_dict(cpk)
|
203 |
-
model.eval()
|
204 |
-
if is_half:
|
205 |
-
model = model.half().to(device)
|
206 |
-
else:
|
207 |
-
model = model.to(device)
|
208 |
-
|
209 |
-
self.mp = mp
|
210 |
-
self.model = model
|
211 |
-
|
212 |
-
def _path_audio_(
|
213 |
-
self, music_file, vocal_root=None, ins_root=None, format="flac"
|
214 |
-
): # 3个VR模型vocal和ins是反的
|
215 |
-
if ins_root is None and vocal_root is None:
|
216 |
-
return "No save root."
|
217 |
-
name = os.path.basename(music_file)
|
218 |
-
if ins_root is not None:
|
219 |
-
os.makedirs(ins_root, exist_ok=True)
|
220 |
-
if vocal_root is not None:
|
221 |
-
os.makedirs(vocal_root, exist_ok=True)
|
222 |
-
X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}
|
223 |
-
bands_n = len(self.mp.param["band"])
|
224 |
-
# print(bands_n)
|
225 |
-
for d in range(bands_n, 0, -1):
|
226 |
-
bp = self.mp.param["band"][d]
|
227 |
-
if d == bands_n: # high-end band
|
228 |
-
(
|
229 |
-
X_wave[d],
|
230 |
-
_,
|
231 |
-
) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑
|
232 |
-
music_file,
|
233 |
-
bp["sr"],
|
234 |
-
False,
|
235 |
-
dtype=np.float32,
|
236 |
-
res_type=bp["res_type"],
|
237 |
-
)
|
238 |
-
if X_wave[d].ndim == 1:
|
239 |
-
X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]])
|
240 |
-
else: # lower bands
|
241 |
-
X_wave[d] = librosa.core.resample(
|
242 |
-
X_wave[d + 1],
|
243 |
-
self.mp.param["band"][d + 1]["sr"],
|
244 |
-
bp["sr"],
|
245 |
-
res_type=bp["res_type"],
|
246 |
-
)
|
247 |
-
# Stft of wave source
|
248 |
-
X_spec_s[d] = spec_utils.wave_to_spectrogram_mt(
|
249 |
-
X_wave[d],
|
250 |
-
bp["hl"],
|
251 |
-
bp["n_fft"],
|
252 |
-
self.mp.param["mid_side"],
|
253 |
-
self.mp.param["mid_side_b2"],
|
254 |
-
self.mp.param["reverse"],
|
255 |
-
)
|
256 |
-
# pdb.set_trace()
|
257 |
-
if d == bands_n and self.data["high_end_process"] != "none":
|
258 |
-
input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + (
|
259 |
-
self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"]
|
260 |
-
)
|
261 |
-
input_high_end = X_spec_s[d][
|
262 |
-
:, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, :
|
263 |
-
]
|
264 |
-
|
265 |
-
X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp)
|
266 |
-
aggresive_set = float(self.data["agg"] / 100)
|
267 |
-
aggressiveness = {
|
268 |
-
"value": aggresive_set,
|
269 |
-
"split_bin": self.mp.param["band"][1]["crop_stop"],
|
270 |
-
}
|
271 |
-
with torch.no_grad():
|
272 |
-
pred, X_mag, X_phase = inference(
|
273 |
-
X_spec_m, self.device, self.model, aggressiveness, self.data
|
274 |
-
)
|
275 |
-
# Postprocess
|
276 |
-
if self.data["postprocess"]:
|
277 |
-
pred_inv = np.clip(X_mag - pred, 0, np.inf)
|
278 |
-
pred = spec_utils.mask_silence(pred, pred_inv)
|
279 |
-
y_spec_m = pred * X_phase
|
280 |
-
v_spec_m = X_spec_m - y_spec_m
|
281 |
-
|
282 |
-
if ins_root is not None:
|
283 |
-
if self.data["high_end_process"].startswith("mirroring"):
|
284 |
-
input_high_end_ = spec_utils.mirroring(
|
285 |
-
self.data["high_end_process"], y_spec_m, input_high_end, self.mp
|
286 |
-
)
|
287 |
-
wav_instrument = spec_utils.cmb_spectrogram_to_wave(
|
288 |
-
y_spec_m, self.mp, input_high_end_h, input_high_end_
|
289 |
-
)
|
290 |
-
else:
|
291 |
-
wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp)
|
292 |
-
print("%s instruments done" % name)
|
293 |
-
if format in ["wav", "flac"]:
|
294 |
-
sf.write(
|
295 |
-
os.path.join(
|
296 |
-
ins_root,
|
297 |
-
"instrument_{}_{}.{}".format(name, self.data["agg"], format),
|
298 |
-
),
|
299 |
-
(np.array(wav_instrument) * 32768).astype("int16"),
|
300 |
-
self.mp.param["sr"],
|
301 |
-
) #
|
302 |
-
else:
|
303 |
-
path = os.path.join(
|
304 |
-
ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"])
|
305 |
-
)
|
306 |
-
sf.write(
|
307 |
-
path,
|
308 |
-
(np.array(wav_instrument) * 32768).astype("int16"),
|
309 |
-
self.mp.param["sr"],
|
310 |
-
)
|
311 |
-
if os.path.exists(path):
|
312 |
-
os.system(
|
313 |
-
"ffmpeg -i %s -vn %s -q:a 2 -y"
|
314 |
-
% (path, path[:-4] + ".%s" % format)
|
315 |
-
)
|
316 |
-
if vocal_root is not None:
|
317 |
-
if self.data["high_end_process"].startswith("mirroring"):
|
318 |
-
input_high_end_ = spec_utils.mirroring(
|
319 |
-
self.data["high_end_process"], v_spec_m, input_high_end, self.mp
|
320 |
-
)
|
321 |
-
wav_vocals = spec_utils.cmb_spectrogram_to_wave(
|
322 |
-
v_spec_m, self.mp, input_high_end_h, input_high_end_
|
323 |
-
)
|
324 |
-
else:
|
325 |
-
wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp)
|
326 |
-
print("%s vocals done" % name)
|
327 |
-
if format in ["wav", "flac"]:
|
328 |
-
sf.write(
|
329 |
-
os.path.join(
|
330 |
-
vocal_root,
|
331 |
-
"vocal_{}_{}.{}".format(name, self.data["agg"], format),
|
332 |
-
),
|
333 |
-
(np.array(wav_vocals) * 32768).astype("int16"),
|
334 |
-
self.mp.param["sr"],
|
335 |
-
)
|
336 |
-
else:
|
337 |
-
path = os.path.join(
|
338 |
-
vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"])
|
339 |
-
)
|
340 |
-
sf.write(
|
341 |
-
path,
|
342 |
-
(np.array(wav_vocals) * 32768).astype("int16"),
|
343 |
-
self.mp.param["sr"],
|
344 |
-
)
|
345 |
-
if os.path.exists(path):
|
346 |
-
os.system(
|
347 |
-
"ffmpeg -i %s -vn %s -q:a 2 -y"
|
348 |
-
% (path, path[:-4] + ".%s" % format)
|
349 |
-
)
|
350 |
-
|
351 |
-
|
352 |
-
if __name__ == "__main__":
|
353 |
-
device = "cuda"
|
354 |
-
is_half = True
|
355 |
-
# model_path = "uvr5_weights/2_HP-UVR.pth"
|
356 |
-
# model_path = "uvr5_weights/VR-DeEchoDeReverb.pth"
|
357 |
-
# model_path = "uvr5_weights/VR-DeEchoNormal.pth"
|
358 |
-
model_path = "uvr5_weights/DeEchoNormal.pth"
|
359 |
-
# pre_fun = _audio_pre_(model_path=model_path, device=device, is_half=True,agg=10)
|
360 |
-
pre_fun = _audio_pre_new(model_path=model_path, device=device, is_half=True, agg=10)
|
361 |
-
audio_path = "雪雪伴奏对消HP5.wav"
|
362 |
-
save_path = "opt"
|
363 |
-
pre_fun._path_audio_(audio_path, save_path, save_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/losses_audio/contperceptual_dis.py
DELETED
@@ -1,137 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
import sys
|
5 |
-
|
6 |
-
sys.path.insert(0, '.') # nopep8
|
7 |
-
from ldm.modules.losses_audio.vqperceptual import *
|
8 |
-
from ldm.modules.discriminator.multi_window_disc import Discriminator
|
9 |
-
|
10 |
-
class LPAPSWithDiscriminator(nn.Module):# 相比于contperceptual.py添加了MultiWindowDiscriminator
|
11 |
-
def __init__(self, disc_start, logvar_init=0.0, kl_weight=1.0, pixelloss_weight=1.0,
|
12 |
-
disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0,
|
13 |
-
perceptual_weight=1.0, use_actnorm=False, disc_conditional=False,
|
14 |
-
disc_loss="hinge"):
|
15 |
-
|
16 |
-
super().__init__()
|
17 |
-
assert disc_loss in ["hinge", "vanilla"]
|
18 |
-
self.kl_weight = kl_weight
|
19 |
-
self.pixel_weight = pixelloss_weight
|
20 |
-
self.perceptual_loss = LPAPS().eval()
|
21 |
-
self.perceptual_weight = perceptual_weight
|
22 |
-
# output log variance
|
23 |
-
self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init)
|
24 |
-
|
25 |
-
self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels,
|
26 |
-
n_layers=disc_num_layers,
|
27 |
-
use_actnorm=use_actnorm,
|
28 |
-
).apply(weights_init)
|
29 |
-
self.discriminator_iter_start = disc_start
|
30 |
-
if disc_loss == "hinge":
|
31 |
-
self.disc_loss = hinge_d_loss
|
32 |
-
elif disc_loss == "vanilla":
|
33 |
-
self.disc_loss = vanilla_d_loss
|
34 |
-
else:
|
35 |
-
raise ValueError(f"Unknown GAN loss '{disc_loss}'.")
|
36 |
-
print(f"LPAPSWithDiscriminator running with {disc_loss} loss.")
|
37 |
-
self.disc_factor = disc_factor
|
38 |
-
self.discriminator_weight = disc_weight
|
39 |
-
self.disc_conditional = disc_conditional
|
40 |
-
|
41 |
-
disc_win_num = 3
|
42 |
-
mel_disc_hidden_size = 128
|
43 |
-
self.discriminator_multi = Discriminator(time_lengths=[32, 64, 128][:disc_win_num],
|
44 |
-
freq_length=80, hidden_size=mel_disc_hidden_size, kernel=(3, 3),
|
45 |
-
cond_size=0, norm_type="in", reduction="stack")
|
46 |
-
|
47 |
-
def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
|
48 |
-
if last_layer is not None:
|
49 |
-
nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
|
50 |
-
g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
|
51 |
-
else:
|
52 |
-
nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
|
53 |
-
g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]
|
54 |
-
|
55 |
-
d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
|
56 |
-
d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
|
57 |
-
d_weight = d_weight * self.discriminator_weight
|
58 |
-
return d_weight
|
59 |
-
|
60 |
-
def forward(self, inputs, reconstructions, posteriors, optimizer_idx,
|
61 |
-
global_step, last_layer=None, cond=None, split="train", weights=None):
|
62 |
-
rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous())
|
63 |
-
if self.perceptual_weight > 0:
|
64 |
-
p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous())
|
65 |
-
rec_loss = rec_loss + self.perceptual_weight * p_loss
|
66 |
-
else:
|
67 |
-
p_loss = torch.tensor([0.0])
|
68 |
-
|
69 |
-
nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar
|
70 |
-
weighted_nll_loss = nll_loss
|
71 |
-
if weights is not None:
|
72 |
-
weighted_nll_loss = weights*nll_loss
|
73 |
-
weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0]
|
74 |
-
nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
|
75 |
-
kl_loss = posteriors.kl()
|
76 |
-
kl_loss = torch.sum(kl_loss) / kl_loss.shape[0]
|
77 |
-
|
78 |
-
# now the GAN part
|
79 |
-
if optimizer_idx == 0:
|
80 |
-
# generator update
|
81 |
-
if cond is None:
|
82 |
-
assert not self.disc_conditional
|
83 |
-
logits_fake = self.discriminator(reconstructions.contiguous())
|
84 |
-
else:
|
85 |
-
assert self.disc_conditional
|
86 |
-
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1))
|
87 |
-
|
88 |
-
logits_fake_multi = self.discriminator_multi(reconstructions.contiguous().squeeze(1).transpose(1, 2))
|
89 |
-
|
90 |
-
g_loss = -torch.mean(logits_fake)
|
91 |
-
g_loss_multi = -torch.mean(logits_fake_multi['y'])
|
92 |
-
|
93 |
-
try:
|
94 |
-
d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
|
95 |
-
d_weight_multi = self.calculate_adaptive_weight(nll_loss, g_loss_multi, last_layer=last_layer)
|
96 |
-
except RuntimeError:
|
97 |
-
assert not self.training
|
98 |
-
d_weight = d_weight_multi = torch.tensor(0.0)
|
99 |
-
|
100 |
-
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
|
101 |
-
loss = weighted_nll_loss + self.kl_weight * kl_loss + d_weight * disc_factor * g_loss + d_weight_multi * disc_factor * g_loss_multi
|
102 |
-
|
103 |
-
log = {"{}/total_loss".format(split): loss.clone().detach().mean(),
|
104 |
-
"{}/logvar".format(split): self.logvar.detach(),
|
105 |
-
"{}/kl_loss".format(split): kl_loss.detach().mean(),
|
106 |
-
"{}/nll_loss".format(split): nll_loss.detach().mean(),
|
107 |
-
"{}/rec_loss".format(split): rec_loss.detach().mean(),
|
108 |
-
"{}/d_weight".format(split): d_weight.detach(),
|
109 |
-
"{}/disc_factor".format(split): torch.tensor(disc_factor),
|
110 |
-
"{}/g_loss".format(split): g_loss.detach().mean(),
|
111 |
-
"{}/g_loss_multi".format(split): g_loss_multi.detach().mean(),
|
112 |
-
}
|
113 |
-
return loss, log
|
114 |
-
|
115 |
-
if optimizer_idx == 1:
|
116 |
-
# second pass for discriminator update
|
117 |
-
if cond is None:
|
118 |
-
logits_real = self.discriminator(inputs.contiguous().detach())
|
119 |
-
logits_fake = self.discriminator(reconstructions.contiguous().detach())
|
120 |
-
else:
|
121 |
-
logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1))
|
122 |
-
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1))
|
123 |
-
|
124 |
-
logits_real_multi = self.discriminator_multi(inputs.contiguous().detach().squeeze(1).transpose(1, 2))
|
125 |
-
logits_fake_multi = self.discriminator_multi(reconstructions.contiguous().detach().squeeze(1).transpose(1, 2))
|
126 |
-
|
127 |
-
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
|
128 |
-
d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)
|
129 |
-
d_loss_multi = disc_factor * self.disc_loss(logits_real_multi['y'], logits_fake_multi['y'])
|
130 |
-
|
131 |
-
log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(),
|
132 |
-
"{}/disc_loss_multi".format(split): d_loss_multi.clone().detach().mean(),
|
133 |
-
"{}/logits_real".format(split): logits_real.detach().mean(),
|
134 |
-
"{}/logits_fake".format(split): logits_fake.detach().mean()
|
135 |
-
}
|
136 |
-
return d_loss+d_loss_multi, log
|
137 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIKey/TestStatic/README.md
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: TestStatic
|
3 |
-
emoji: 📉
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: red
|
6 |
-
sdk: static
|
7 |
-
pinned: false
|
8 |
-
---
|
9 |
-
|
10 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ASJMO/freegpt/g4f/__init__.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
from . import Provider
|
3 |
-
from g4f.models import Model, ModelUtils
|
4 |
-
|
5 |
-
|
6 |
-
class ChatCompletion:
|
7 |
-
@staticmethod
|
8 |
-
def create(model: Model.model or str, messages: list, provider: Provider.Provider = None, stream: bool = False, auth: str = False, **kwargs):
|
9 |
-
kwargs['auth'] = auth
|
10 |
-
|
11 |
-
if provider and provider.needs_auth and not auth:
|
12 |
-
print(
|
13 |
-
f'ValueError: {provider.__name__} requires authentication (use auth="cookie or token or jwt ..." param)', file=sys.stderr)
|
14 |
-
sys.exit(1)
|
15 |
-
|
16 |
-
try:
|
17 |
-
if isinstance(model, str):
|
18 |
-
try:
|
19 |
-
model = ModelUtils.convert[model]
|
20 |
-
except KeyError:
|
21 |
-
raise Exception(f'The model: {model} does not exist')
|
22 |
-
|
23 |
-
engine = model.best_provider if not provider else provider
|
24 |
-
|
25 |
-
if not engine.supports_stream and stream == True:
|
26 |
-
print(
|
27 |
-
f"ValueError: {engine.__name__} does not support 'stream' argument", file=sys.stderr)
|
28 |
-
sys.exit(1)
|
29 |
-
|
30 |
-
print(f'Using {engine.__name__} provider')
|
31 |
-
|
32 |
-
return (engine._create_completion(model.name, messages, stream, **kwargs)
|
33 |
-
if stream else ''.join(engine._create_completion(model.name, messages, stream, **kwargs)))
|
34 |
-
except TypeError as e:
|
35 |
-
print(e)
|
36 |
-
arg: str = str(e).split("'")[1]
|
37 |
-
print(
|
38 |
-
f"ValueError: {engine.__name__} does not support '{arg}' argument", file=sys.stderr)
|
39 |
-
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-210e_deepfashion2_shorts_256x192/__init__.py
DELETED
File without changes
|
spaces/AchyuthGamer/OpenGPT/server/backend.py
DELETED
@@ -1,188 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
from datetime import datetime
|
3 |
-
import asyncio
|
4 |
-
|
5 |
-
import sys
|
6 |
-
sys.path.insert(0, '../g4f')
|
7 |
-
from g4f import __init__, ChatCompletion
|
8 |
-
|
9 |
-
from flask import request, Response, stream_with_context
|
10 |
-
from requests import get
|
11 |
-
from server.config import special_instructions
|
12 |
-
import json
|
13 |
-
import subprocess
|
14 |
-
import platform
|
15 |
-
|
16 |
-
class Backend_Api:
|
17 |
-
def __init__(self, bp, config: dict) -> None:
|
18 |
-
"""
|
19 |
-
Initialize the Backend_Api class.
|
20 |
-
:param app: Flask application instance
|
21 |
-
:param config: Configuration dictionary
|
22 |
-
"""
|
23 |
-
self.bp = bp
|
24 |
-
self.routes = {
|
25 |
-
'/backend-api/v2/conversation': {
|
26 |
-
'function': self._conversation,
|
27 |
-
'methods': ['POST']
|
28 |
-
}
|
29 |
-
}
|
30 |
-
|
31 |
-
def _conversation(self):
|
32 |
-
"""
|
33 |
-
Handles the conversation route.
|
34 |
-
|
35 |
-
:return: Response object containing the generated conversation stream
|
36 |
-
"""
|
37 |
-
conversation_id = request.json['conversation_id']
|
38 |
-
|
39 |
-
try:
|
40 |
-
jailbreak = request.json['jailbreak']
|
41 |
-
model = request.json['model']
|
42 |
-
messages = build_messages(jailbreak)
|
43 |
-
|
44 |
-
#The error "There is no current event loop in thread" was fixed in 0.1.4.3
|
45 |
-
#its fix for Windows
|
46 |
-
#if platform.system() == "Windows":
|
47 |
-
# asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
|
48 |
-
|
49 |
-
response = ChatCompletion.create(
|
50 |
-
model=model,
|
51 |
-
chatId=conversation_id,
|
52 |
-
messages=messages
|
53 |
-
)
|
54 |
-
|
55 |
-
return Response(stream_with_context(generate_stream(response, jailbreak)), mimetype='text/event-stream')
|
56 |
-
|
57 |
-
except Exception as e:
|
58 |
-
print(e)
|
59 |
-
print(e.__traceback__.tb_next)
|
60 |
-
|
61 |
-
return {
|
62 |
-
'_action': '_ask',
|
63 |
-
'success': False,
|
64 |
-
"error": f"an error occurred {str(e)}"
|
65 |
-
}, 400
|
66 |
-
|
67 |
-
|
68 |
-
def build_messages(jailbreak):
|
69 |
-
"""
|
70 |
-
Build the messages for the conversation.
|
71 |
-
|
72 |
-
:param jailbreak: Jailbreak instruction string
|
73 |
-
:return: List of messages for the conversation
|
74 |
-
"""
|
75 |
-
_conversation = request.json['meta']['content']['conversation']
|
76 |
-
internet_access = request.json['meta']['content']['internet_access']
|
77 |
-
prompt = request.json['meta']['content']['parts'][0]
|
78 |
-
|
79 |
-
# Add the existing conversation
|
80 |
-
conversation = _conversation
|
81 |
-
|
82 |
-
#This API doesn't work!
|
83 |
-
# Add web results if enabled
|
84 |
-
#if internet_access:
|
85 |
-
# current_date = datetime.now().strftime("%Y-%m-%d")
|
86 |
-
# query = f'Current date: {current_date}. ' + prompt["content"]
|
87 |
-
# search_results = fetch_search_results(query)
|
88 |
-
# conversation.extend(search_results)
|
89 |
-
|
90 |
-
# Add jailbreak instructions if enabled
|
91 |
-
if jailbreak_instructions := getJailbreak(jailbreak):
|
92 |
-
conversation.extend(jailbreak_instructions)
|
93 |
-
|
94 |
-
# Add the prompt
|
95 |
-
conversation.append(prompt)
|
96 |
-
|
97 |
-
# Reduce conversation size to avoid API Token quantity error
|
98 |
-
if len(conversation) > 3:
|
99 |
-
conversation = conversation[-4:]
|
100 |
-
|
101 |
-
return conversation
|
102 |
-
|
103 |
-
|
104 |
-
def fetch_search_results(query):
|
105 |
-
"""
|
106 |
-
Fetch search results for a given query.
|
107 |
-
|
108 |
-
:param query: Search query string
|
109 |
-
:return: List of search results
|
110 |
-
"""
|
111 |
-
search = get('https://ddg-api.herokuapp.com/search',
|
112 |
-
params={
|
113 |
-
'query': query,
|
114 |
-
'limit': 3,
|
115 |
-
})
|
116 |
-
|
117 |
-
snippets = ""
|
118 |
-
for index, result in enumerate(search.json()):
|
119 |
-
snippet = f'[{index + 1}] "{result["snippet"]}" URL:{result["link"]}.'
|
120 |
-
snippets += snippet
|
121 |
-
|
122 |
-
response = "Here are some updated web searches. Use this to improve user response:"
|
123 |
-
response += snippets
|
124 |
-
|
125 |
-
return [{'role': 'system', 'content': response}]
|
126 |
-
|
127 |
-
|
128 |
-
def generate_stream(response, jailbreak):
|
129 |
-
"""
|
130 |
-
Generate the conversation stream.
|
131 |
-
|
132 |
-
:param response: Response object from ChatCompletion.create
|
133 |
-
:param jailbreak: Jailbreak instruction string
|
134 |
-
:return: Generator object yielding messages in the conversation
|
135 |
-
"""
|
136 |
-
if getJailbreak(jailbreak):
|
137 |
-
response_jailbreak = ''
|
138 |
-
jailbroken_checked = False
|
139 |
-
for message in response:
|
140 |
-
response_jailbreak += message
|
141 |
-
if jailbroken_checked:
|
142 |
-
yield message
|
143 |
-
else:
|
144 |
-
if response_jailbroken_success(response_jailbreak):
|
145 |
-
jailbroken_checked = True
|
146 |
-
if response_jailbroken_failed(response_jailbreak):
|
147 |
-
yield response_jailbreak
|
148 |
-
jailbroken_checked = True
|
149 |
-
else:
|
150 |
-
yield from response
|
151 |
-
|
152 |
-
|
153 |
-
def response_jailbroken_success(response: str) -> bool:
|
154 |
-
"""Check if the response has been jailbroken.
|
155 |
-
|
156 |
-
:param response: Response string
|
157 |
-
:return: Boolean indicating if the response has been jailbroken
|
158 |
-
"""
|
159 |
-
act_match = re.search(r'ACT:', response, flags=re.DOTALL)
|
160 |
-
return bool(act_match)
|
161 |
-
|
162 |
-
|
163 |
-
def response_jailbroken_failed(response):
|
164 |
-
"""
|
165 |
-
Check if the response has not been jailbroken.
|
166 |
-
|
167 |
-
:param response: Response string
|
168 |
-
:return: Boolean indicating if the response has not been jailbroken
|
169 |
-
"""
|
170 |
-
return False if len(response) < 4 else not (response.startswith("GPT:") or response.startswith("ACT:"))
|
171 |
-
|
172 |
-
|
173 |
-
def getJailbreak(jailbreak):
|
174 |
-
"""
|
175 |
-
Check if jailbreak instructions are provided.
|
176 |
-
|
177 |
-
:param jailbreak: Jailbreak instruction string
|
178 |
-
:return: Jailbreak instructions if provided, otherwise None
|
179 |
-
"""
|
180 |
-
if jailbreak != "default":
|
181 |
-
special_instructions[jailbreak][0]['content'] += special_instructions['two_responses_instruction']
|
182 |
-
if jailbreak in special_instructions:
|
183 |
-
special_instructions[jailbreak]
|
184 |
-
return special_instructions[jailbreak]
|
185 |
-
else:
|
186 |
-
return None
|
187 |
-
else:
|
188 |
-
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/swipe/Factory.js
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
import Swipe from './Swipe.js';
|
2 |
-
import ObjectFactory from '../ObjectFactory.js';
|
3 |
-
import IsGameObject from '../../../plugins/utils/system/IsGameObject.js';
|
4 |
-
import SetValue from '../../../plugins/utils/object/SetValue.js';
|
5 |
-
|
6 |
-
ObjectFactory.register('swipe', function (gameObject, config) {
|
7 |
-
if (!IsGameObject(gameObject)) {
|
8 |
-
config = gameObject;
|
9 |
-
gameObject = this.scene;
|
10 |
-
}
|
11 |
-
return new Swipe(gameObject, config);
|
12 |
-
});
|
13 |
-
|
14 |
-
SetValue(window, 'RexPlugins.UI.Swipe', Swipe);
|
15 |
-
|
16 |
-
export default Swipe;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AkitoP/umamusume_bert_vits2/text/english.py
DELETED
@@ -1,214 +0,0 @@
|
|
1 |
-
import pickle
|
2 |
-
import os
|
3 |
-
import re
|
4 |
-
from g2p_en import G2p
|
5 |
-
|
6 |
-
from text import symbols
|
7 |
-
|
8 |
-
current_file_path = os.path.dirname(__file__)
|
9 |
-
CMU_DICT_PATH = os.path.join(current_file_path, "cmudict.rep")
|
10 |
-
CACHE_PATH = os.path.join(current_file_path, "cmudict_cache.pickle")
|
11 |
-
_g2p = G2p()
|
12 |
-
|
13 |
-
arpa = {
|
14 |
-
"AH0",
|
15 |
-
"S",
|
16 |
-
"AH1",
|
17 |
-
"EY2",
|
18 |
-
"AE2",
|
19 |
-
"EH0",
|
20 |
-
"OW2",
|
21 |
-
"UH0",
|
22 |
-
"NG",
|
23 |
-
"B",
|
24 |
-
"G",
|
25 |
-
"AY0",
|
26 |
-
"M",
|
27 |
-
"AA0",
|
28 |
-
"F",
|
29 |
-
"AO0",
|
30 |
-
"ER2",
|
31 |
-
"UH1",
|
32 |
-
"IY1",
|
33 |
-
"AH2",
|
34 |
-
"DH",
|
35 |
-
"IY0",
|
36 |
-
"EY1",
|
37 |
-
"IH0",
|
38 |
-
"K",
|
39 |
-
"N",
|
40 |
-
"W",
|
41 |
-
"IY2",
|
42 |
-
"T",
|
43 |
-
"AA1",
|
44 |
-
"ER1",
|
45 |
-
"EH2",
|
46 |
-
"OY0",
|
47 |
-
"UH2",
|
48 |
-
"UW1",
|
49 |
-
"Z",
|
50 |
-
"AW2",
|
51 |
-
"AW1",
|
52 |
-
"V",
|
53 |
-
"UW2",
|
54 |
-
"AA2",
|
55 |
-
"ER",
|
56 |
-
"AW0",
|
57 |
-
"UW0",
|
58 |
-
"R",
|
59 |
-
"OW1",
|
60 |
-
"EH1",
|
61 |
-
"ZH",
|
62 |
-
"AE0",
|
63 |
-
"IH2",
|
64 |
-
"IH",
|
65 |
-
"Y",
|
66 |
-
"JH",
|
67 |
-
"P",
|
68 |
-
"AY1",
|
69 |
-
"EY0",
|
70 |
-
"OY2",
|
71 |
-
"TH",
|
72 |
-
"HH",
|
73 |
-
"D",
|
74 |
-
"ER0",
|
75 |
-
"CH",
|
76 |
-
"AO1",
|
77 |
-
"AE1",
|
78 |
-
"AO2",
|
79 |
-
"OY1",
|
80 |
-
"AY2",
|
81 |
-
"IH1",
|
82 |
-
"OW0",
|
83 |
-
"L",
|
84 |
-
"SH",
|
85 |
-
}
|
86 |
-
|
87 |
-
|
88 |
-
def post_replace_ph(ph):
|
89 |
-
rep_map = {
|
90 |
-
":": ",",
|
91 |
-
";": ",",
|
92 |
-
",": ",",
|
93 |
-
"。": ".",
|
94 |
-
"!": "!",
|
95 |
-
"?": "?",
|
96 |
-
"\n": ".",
|
97 |
-
"·": ",",
|
98 |
-
"、": ",",
|
99 |
-
"...": "…",
|
100 |
-
"v": "V",
|
101 |
-
}
|
102 |
-
if ph in rep_map.keys():
|
103 |
-
ph = rep_map[ph]
|
104 |
-
if ph in symbols:
|
105 |
-
return ph
|
106 |
-
if ph not in symbols:
|
107 |
-
ph = "UNK"
|
108 |
-
return ph
|
109 |
-
|
110 |
-
|
111 |
-
def read_dict():
|
112 |
-
g2p_dict = {}
|
113 |
-
start_line = 49
|
114 |
-
with open(CMU_DICT_PATH) as f:
|
115 |
-
line = f.readline()
|
116 |
-
line_index = 1
|
117 |
-
while line:
|
118 |
-
if line_index >= start_line:
|
119 |
-
line = line.strip()
|
120 |
-
word_split = line.split(" ")
|
121 |
-
word = word_split[0]
|
122 |
-
|
123 |
-
syllable_split = word_split[1].split(" - ")
|
124 |
-
g2p_dict[word] = []
|
125 |
-
for syllable in syllable_split:
|
126 |
-
phone_split = syllable.split(" ")
|
127 |
-
g2p_dict[word].append(phone_split)
|
128 |
-
|
129 |
-
line_index = line_index + 1
|
130 |
-
line = f.readline()
|
131 |
-
|
132 |
-
return g2p_dict
|
133 |
-
|
134 |
-
|
135 |
-
def cache_dict(g2p_dict, file_path):
|
136 |
-
with open(file_path, "wb") as pickle_file:
|
137 |
-
pickle.dump(g2p_dict, pickle_file)
|
138 |
-
|
139 |
-
|
140 |
-
def get_dict():
|
141 |
-
if os.path.exists(CACHE_PATH):
|
142 |
-
with open(CACHE_PATH, "rb") as pickle_file:
|
143 |
-
g2p_dict = pickle.load(pickle_file)
|
144 |
-
else:
|
145 |
-
g2p_dict = read_dict()
|
146 |
-
cache_dict(g2p_dict, CACHE_PATH)
|
147 |
-
|
148 |
-
return g2p_dict
|
149 |
-
|
150 |
-
|
151 |
-
eng_dict = get_dict()
|
152 |
-
|
153 |
-
|
154 |
-
def refine_ph(phn):
|
155 |
-
tone = 0
|
156 |
-
if re.search(r"\d$", phn):
|
157 |
-
tone = int(phn[-1]) + 1
|
158 |
-
phn = phn[:-1]
|
159 |
-
return phn.lower(), tone
|
160 |
-
|
161 |
-
|
162 |
-
def refine_syllables(syllables):
|
163 |
-
tones = []
|
164 |
-
phonemes = []
|
165 |
-
for phn_list in syllables:
|
166 |
-
for i in range(len(phn_list)):
|
167 |
-
phn = phn_list[i]
|
168 |
-
phn, tone = refine_ph(phn)
|
169 |
-
phonemes.append(phn)
|
170 |
-
tones.append(tone)
|
171 |
-
return phonemes, tones
|
172 |
-
|
173 |
-
|
174 |
-
def text_normalize(text):
|
175 |
-
# todo: eng text normalize
|
176 |
-
return text
|
177 |
-
|
178 |
-
|
179 |
-
def g2p(text):
|
180 |
-
phones = []
|
181 |
-
tones = []
|
182 |
-
words = re.split(r"([,;.\-\?\!\s+])", text)
|
183 |
-
for w in words:
|
184 |
-
if w.upper() in eng_dict:
|
185 |
-
phns, tns = refine_syllables(eng_dict[w.upper()])
|
186 |
-
phones += phns
|
187 |
-
tones += tns
|
188 |
-
else:
|
189 |
-
phone_list = list(filter(lambda p: p != " ", _g2p(w)))
|
190 |
-
for ph in phone_list:
|
191 |
-
if ph in arpa:
|
192 |
-
ph, tn = refine_ph(ph)
|
193 |
-
phones.append(ph)
|
194 |
-
tones.append(tn)
|
195 |
-
else:
|
196 |
-
phones.append(ph)
|
197 |
-
tones.append(0)
|
198 |
-
# todo: implement word2ph
|
199 |
-
word2ph = [1 for i in phones]
|
200 |
-
|
201 |
-
phones = [post_replace_ph(i) for i in phones]
|
202 |
-
return phones, tones, word2ph
|
203 |
-
|
204 |
-
|
205 |
-
if __name__ == "__main__":
|
206 |
-
# print(get_dict())
|
207 |
-
# print(eng_word_to_phoneme("hello"))
|
208 |
-
print(g2p("In this paper, we propose 1 DSPGAN, a GAN-based universal vocoder."))
|
209 |
-
# all_phones = set()
|
210 |
-
# for k, syllables in eng_dict.items():
|
211 |
-
# for group in syllables:
|
212 |
-
# for ph in group:
|
213 |
-
# all_phones.add(ph)
|
214 |
-
# print(all_phones)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alex89912/ai-code-v1/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: CodeGenerator-v1
|
3 |
-
emoji: 💻
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.41.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlexZou/Deploy_Restoration/Dehazing.py
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import torch
|
3 |
-
import numpy as np
|
4 |
-
from torchvision import transforms
|
5 |
-
from PIL import Image
|
6 |
-
import time
|
7 |
-
import torchvision
|
8 |
-
import cv2
|
9 |
-
import torchvision.utils as tvu
|
10 |
-
import torch.functional as F
|
11 |
-
import argparse
|
12 |
-
|
13 |
-
def inference_img(haze_path,Net):
|
14 |
-
|
15 |
-
haze_image = Image.open(haze_path).convert('RGB')
|
16 |
-
enhance_transforms = transforms.Compose([
|
17 |
-
transforms.Resize((400,400)),
|
18 |
-
transforms.ToTensor()
|
19 |
-
])
|
20 |
-
|
21 |
-
print(haze_image.size)
|
22 |
-
with torch.no_grad():
|
23 |
-
haze_image = enhance_transforms(haze_image)
|
24 |
-
#print(haze_image)
|
25 |
-
haze_image = haze_image.unsqueeze(0)
|
26 |
-
start = time.time()
|
27 |
-
restored2 = Net(haze_image)
|
28 |
-
end = time.time()
|
29 |
-
|
30 |
-
|
31 |
-
return restored2,end-start
|
32 |
-
|
33 |
-
if __name__ == '__main__':
|
34 |
-
parser=argparse.ArgumentParser()
|
35 |
-
parser.add_argument('--test_path',type=str,required=True,help='Path to test')
|
36 |
-
parser.add_argument('--save_path',type=str,required=True,help='Path to save')
|
37 |
-
parser.add_argument('--pk_path',type=str,default='model_zoo/Haze4k.tjm',help='Path of the checkpoint')
|
38 |
-
opt = parser.parse_args()
|
39 |
-
if not os.path.isdir(opt.save_path):
|
40 |
-
os.mkdir(opt.save_path)
|
41 |
-
Net=torch.jit.load(opt.pk_path,map_location=torch.device('cpu')).eval()
|
42 |
-
image = opt.test_path
|
43 |
-
print(image)
|
44 |
-
restored2,time_num = inference_img(image,Net)
|
45 |
-
torchvision.utils.save_image(restored2,opt.save_path+'output.png')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alfasign/remove-background-on-image/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Remove Background
|
3 |
-
emoji: 🌖
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.40.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
duplicated_from: openskyml/remove-background-on-image
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amitesh007/elevenlabs-stt/app.py
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import numpy as np
|
3 |
-
from elevenlabs import voices, generate, set_api_key, UnauthenticatedRateLimitError
|
4 |
-
|
5 |
-
def pad_buffer(audio):
|
6 |
-
# Pad buffer to multiple of 2 bytes
|
7 |
-
buffer_size = len(audio)
|
8 |
-
element_size = np.dtype(np.int16).itemsize
|
9 |
-
if buffer_size % element_size != 0:
|
10 |
-
audio = audio + b'\0' * (element_size - (buffer_size % element_size))
|
11 |
-
return audio
|
12 |
-
|
13 |
-
def generate_voice(text, voice_name, model_name):
|
14 |
-
audio = generate(
|
15 |
-
text[:250], # Limit to 250 characters
|
16 |
-
voice=voice_name,
|
17 |
-
model=model_name
|
18 |
-
)
|
19 |
-
audio_data = np.frombuffer(pad_buffer(audio), dtype=np.int16)
|
20 |
-
audio_bytes = audio_data.tobytes()
|
21 |
-
return audio_bytes
|
22 |
-
|
23 |
-
st.title("🎤 World's most advanced Text-to-Speech")
|
24 |
-
|
25 |
-
description = """
|
26 |
-
A demo of the world's most advanced TTS systems, made by [ElevenLabs](https://elevenlabs.io). Eleven Monolingual is designed to generate highly realistic voices in English, where Eleven Multilingual is a single model supporting multiple languages including English, German, Polish, Spanish, Italian, French, Portuguese, and Hindi. Sign up on [ElevenLabs](https://elevenlabs.io) to get fast access, long-form generation, voice cloning, API keys, and more!
|
27 |
-
credit goes to "1little coder"
|
28 |
-
"""
|
29 |
-
|
30 |
-
|
31 |
-
st.markdown(description)
|
32 |
-
|
33 |
-
input_text = st.text_area(
|
34 |
-
"Input Text (250 characters max)",
|
35 |
-
value="Hahaha OHH MY GOD! This is SOOO funny, I-I am Eleven a text-to-speech system!",
|
36 |
-
max_chars=250
|
37 |
-
)
|
38 |
-
|
39 |
-
all_voices = voices()
|
40 |
-
input_voice = st.selectbox(
|
41 |
-
"Voice",
|
42 |
-
options=[voice.name for voice in all_voices],
|
43 |
-
index=0
|
44 |
-
)
|
45 |
-
|
46 |
-
input_model = st.radio(
|
47 |
-
"Model",
|
48 |
-
options=["eleven_monolingual_v1", "eleven_multilingual_v1"],
|
49 |
-
index=0
|
50 |
-
)
|
51 |
-
|
52 |
-
if st.button("Generate Voice"):
|
53 |
-
try:
|
54 |
-
audio = generate_voice(input_text, input_voice, input_model)
|
55 |
-
st.audio(audio, format='audio/wav')
|
56 |
-
except UnauthenticatedRateLimitError:
|
57 |
-
st.error("Thanks for trying out ElevenLabs TTS! You've reached the free tier limit. Please provide an API key to continue.")
|
58 |
-
except Exception as e:
|
59 |
-
st.error(str(e))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/multi_subject_dreambooth/train_multi_subject_dreambooth.py
DELETED
@@ -1,1185 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import hashlib
|
3 |
-
import itertools
|
4 |
-
import json
|
5 |
-
import logging
|
6 |
-
import math
|
7 |
-
import uuid
|
8 |
-
import warnings
|
9 |
-
from os import environ, listdir, makedirs
|
10 |
-
from os.path import basename, join
|
11 |
-
from pathlib import Path
|
12 |
-
from typing import List
|
13 |
-
|
14 |
-
import datasets
|
15 |
-
import numpy as np
|
16 |
-
import torch
|
17 |
-
import torch.nn.functional as F
|
18 |
-
import torch.utils.checkpoint
|
19 |
-
import transformers
|
20 |
-
from accelerate import Accelerator
|
21 |
-
from accelerate.logging import get_logger
|
22 |
-
from accelerate.utils import ProjectConfiguration, set_seed
|
23 |
-
from huggingface_hub import create_repo, upload_folder
|
24 |
-
from PIL import Image
|
25 |
-
from torch import dtype
|
26 |
-
from torch.nn import Module
|
27 |
-
from torch.utils.data import Dataset
|
28 |
-
from torchvision import transforms
|
29 |
-
from tqdm.auto import tqdm
|
30 |
-
from transformers import AutoTokenizer, PretrainedConfig
|
31 |
-
|
32 |
-
import diffusers
|
33 |
-
from diffusers import (
|
34 |
-
AutoencoderKL,
|
35 |
-
DDPMScheduler,
|
36 |
-
DiffusionPipeline,
|
37 |
-
DPMSolverMultistepScheduler,
|
38 |
-
UNet2DConditionModel,
|
39 |
-
)
|
40 |
-
from diffusers.optimization import get_scheduler
|
41 |
-
from diffusers.utils import check_min_version, is_wandb_available
|
42 |
-
from diffusers.utils.import_utils import is_xformers_available
|
43 |
-
|
44 |
-
|
45 |
-
if is_wandb_available():
|
46 |
-
import wandb
|
47 |
-
|
48 |
-
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
49 |
-
check_min_version("0.13.0.dev0")
|
50 |
-
|
51 |
-
logger = get_logger(__name__)
|
52 |
-
|
53 |
-
|
54 |
-
def log_validation_images_to_tracker(
|
55 |
-
images: List[np.array], label: str, validation_prompt: str, accelerator: Accelerator, epoch: int
|
56 |
-
):
|
57 |
-
logger.info(f"Logging images to tracker for validation prompt: {validation_prompt}.")
|
58 |
-
|
59 |
-
for tracker in accelerator.trackers:
|
60 |
-
if tracker.name == "tensorboard":
|
61 |
-
np_images = np.stack([np.asarray(img) for img in images])
|
62 |
-
tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC")
|
63 |
-
if tracker.name == "wandb":
|
64 |
-
tracker.log(
|
65 |
-
{
|
66 |
-
"validation": [
|
67 |
-
wandb.Image(image, caption=f"{label}_{epoch}_{i}: {validation_prompt}")
|
68 |
-
for i, image in enumerate(images)
|
69 |
-
]
|
70 |
-
}
|
71 |
-
)
|
72 |
-
|
73 |
-
|
74 |
-
# TODO: Add `prompt_embeds` and `negative_prompt_embeds` parameters to the function when `pre_compute_text_embeddings`
|
75 |
-
# argument is implemented.
|
76 |
-
def generate_validation_images(
|
77 |
-
text_encoder: Module,
|
78 |
-
tokenizer: Module,
|
79 |
-
unet: Module,
|
80 |
-
vae: Module,
|
81 |
-
arguments: argparse.Namespace,
|
82 |
-
accelerator: Accelerator,
|
83 |
-
weight_dtype: dtype,
|
84 |
-
):
|
85 |
-
logger.info("Running validation images.")
|
86 |
-
|
87 |
-
pipeline_args = {}
|
88 |
-
|
89 |
-
if text_encoder is not None:
|
90 |
-
pipeline_args["text_encoder"] = accelerator.unwrap_model(text_encoder)
|
91 |
-
|
92 |
-
if vae is not None:
|
93 |
-
pipeline_args["vae"] = vae
|
94 |
-
|
95 |
-
# create pipeline (note: unet and vae are loaded again in float32)
|
96 |
-
pipeline = DiffusionPipeline.from_pretrained(
|
97 |
-
arguments.pretrained_model_name_or_path,
|
98 |
-
tokenizer=tokenizer,
|
99 |
-
unet=accelerator.unwrap_model(unet),
|
100 |
-
revision=arguments.revision,
|
101 |
-
torch_dtype=weight_dtype,
|
102 |
-
**pipeline_args,
|
103 |
-
)
|
104 |
-
|
105 |
-
# We train on the simplified learning objective. If we were previously predicting a variance, we need the
|
106 |
-
# scheduler to ignore it
|
107 |
-
scheduler_args = {}
|
108 |
-
|
109 |
-
if "variance_type" in pipeline.scheduler.config:
|
110 |
-
variance_type = pipeline.scheduler.config.variance_type
|
111 |
-
|
112 |
-
if variance_type in ["learned", "learned_range"]:
|
113 |
-
variance_type = "fixed_small"
|
114 |
-
|
115 |
-
scheduler_args["variance_type"] = variance_type
|
116 |
-
|
117 |
-
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, **scheduler_args)
|
118 |
-
pipeline = pipeline.to(accelerator.device)
|
119 |
-
pipeline.set_progress_bar_config(disable=True)
|
120 |
-
|
121 |
-
generator = (
|
122 |
-
None if arguments.seed is None else torch.Generator(device=accelerator.device).manual_seed(arguments.seed)
|
123 |
-
)
|
124 |
-
|
125 |
-
images_sets = []
|
126 |
-
for vp, nvi, vnp, vis, vgs in zip(
|
127 |
-
arguments.validation_prompt,
|
128 |
-
arguments.validation_number_images,
|
129 |
-
arguments.validation_negative_prompt,
|
130 |
-
arguments.validation_inference_steps,
|
131 |
-
arguments.validation_guidance_scale,
|
132 |
-
):
|
133 |
-
images = []
|
134 |
-
if vp is not None:
|
135 |
-
logger.info(
|
136 |
-
f"Generating {nvi} images with prompt: '{vp}', negative prompt: '{vnp}', inference steps: {vis}, "
|
137 |
-
f"guidance scale: {vgs}."
|
138 |
-
)
|
139 |
-
|
140 |
-
pipeline_args = {"prompt": vp, "negative_prompt": vnp, "num_inference_steps": vis, "guidance_scale": vgs}
|
141 |
-
|
142 |
-
# run inference
|
143 |
-
# TODO: it would be good to measure whether it's faster to run inference on all images at once, one at a
|
144 |
-
# time or in small batches
|
145 |
-
for _ in range(nvi):
|
146 |
-
with torch.autocast("cuda"):
|
147 |
-
image = pipeline(**pipeline_args, num_images_per_prompt=1, generator=generator).images[0]
|
148 |
-
images.append(image)
|
149 |
-
|
150 |
-
images_sets.append(images)
|
151 |
-
|
152 |
-
del pipeline
|
153 |
-
if torch.cuda.is_available():
|
154 |
-
torch.cuda.empty_cache()
|
155 |
-
|
156 |
-
return images_sets
|
157 |
-
|
158 |
-
|
159 |
-
def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str):
|
160 |
-
text_encoder_config = PretrainedConfig.from_pretrained(
|
161 |
-
pretrained_model_name_or_path,
|
162 |
-
subfolder="text_encoder",
|
163 |
-
revision=revision,
|
164 |
-
)
|
165 |
-
model_class = text_encoder_config.architectures[0]
|
166 |
-
|
167 |
-
if model_class == "CLIPTextModel":
|
168 |
-
from transformers import CLIPTextModel
|
169 |
-
|
170 |
-
return CLIPTextModel
|
171 |
-
elif model_class == "RobertaSeriesModelWithTransformation":
|
172 |
-
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation
|
173 |
-
|
174 |
-
return RobertaSeriesModelWithTransformation
|
175 |
-
else:
|
176 |
-
raise ValueError(f"{model_class} is not supported.")
|
177 |
-
|
178 |
-
|
179 |
-
def parse_args(input_args=None):
|
180 |
-
parser = argparse.ArgumentParser(description="Simple example of a training script.")
|
181 |
-
parser.add_argument(
|
182 |
-
"--pretrained_model_name_or_path",
|
183 |
-
type=str,
|
184 |
-
default=None,
|
185 |
-
required=True,
|
186 |
-
help="Path to pretrained model or model identifier from huggingface.co/models.",
|
187 |
-
)
|
188 |
-
parser.add_argument(
|
189 |
-
"--revision",
|
190 |
-
type=str,
|
191 |
-
default=None,
|
192 |
-
required=False,
|
193 |
-
help="Revision of pretrained model identifier from huggingface.co/models.",
|
194 |
-
)
|
195 |
-
parser.add_argument(
|
196 |
-
"--tokenizer_name",
|
197 |
-
type=str,
|
198 |
-
default=None,
|
199 |
-
help="Pretrained tokenizer name or path if not the same as model_name",
|
200 |
-
)
|
201 |
-
parser.add_argument(
|
202 |
-
"--instance_data_dir",
|
203 |
-
type=str,
|
204 |
-
default=None,
|
205 |
-
required=False,
|
206 |
-
help="A folder containing the training data of instance images.",
|
207 |
-
)
|
208 |
-
parser.add_argument(
|
209 |
-
"--class_data_dir",
|
210 |
-
type=str,
|
211 |
-
default=None,
|
212 |
-
required=False,
|
213 |
-
help="A folder containing the training data of class images.",
|
214 |
-
)
|
215 |
-
parser.add_argument(
|
216 |
-
"--instance_prompt",
|
217 |
-
type=str,
|
218 |
-
default=None,
|
219 |
-
required=False,
|
220 |
-
help="The prompt with identifier specifying the instance",
|
221 |
-
)
|
222 |
-
parser.add_argument(
|
223 |
-
"--class_prompt",
|
224 |
-
type=str,
|
225 |
-
default=None,
|
226 |
-
help="The prompt to specify images in the same class as provided instance images.",
|
227 |
-
)
|
228 |
-
parser.add_argument(
|
229 |
-
"--with_prior_preservation",
|
230 |
-
default=False,
|
231 |
-
action="store_true",
|
232 |
-
help="Flag to add prior preservation loss.",
|
233 |
-
)
|
234 |
-
parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
|
235 |
-
parser.add_argument(
|
236 |
-
"--num_class_images",
|
237 |
-
type=int,
|
238 |
-
default=100,
|
239 |
-
help=(
|
240 |
-
"Minimal class images for prior preservation loss. If there are not enough images already present in"
|
241 |
-
" class_data_dir, additional images will be sampled with class_prompt."
|
242 |
-
),
|
243 |
-
)
|
244 |
-
parser.add_argument(
|
245 |
-
"--output_dir",
|
246 |
-
type=str,
|
247 |
-
default="text-inversion-model",
|
248 |
-
help="The output directory where the model predictions and checkpoints will be written.",
|
249 |
-
)
|
250 |
-
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
|
251 |
-
parser.add_argument(
|
252 |
-
"--resolution",
|
253 |
-
type=int,
|
254 |
-
default=512,
|
255 |
-
help=(
|
256 |
-
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
|
257 |
-
" resolution"
|
258 |
-
),
|
259 |
-
)
|
260 |
-
parser.add_argument(
|
261 |
-
"--center_crop",
|
262 |
-
default=False,
|
263 |
-
action="store_true",
|
264 |
-
help=(
|
265 |
-
"Whether to center crop the input images to the resolution. If not set, the images will be randomly"
|
266 |
-
" cropped. The images will be resized to the resolution first before cropping."
|
267 |
-
),
|
268 |
-
)
|
269 |
-
parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder")
|
270 |
-
parser.add_argument(
|
271 |
-
"--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
|
272 |
-
)
|
273 |
-
parser.add_argument(
|
274 |
-
"--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
|
275 |
-
)
|
276 |
-
parser.add_argument("--num_train_epochs", type=int, default=1)
|
277 |
-
parser.add_argument(
|
278 |
-
"--max_train_steps",
|
279 |
-
type=int,
|
280 |
-
default=None,
|
281 |
-
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
|
282 |
-
)
|
283 |
-
parser.add_argument(
|
284 |
-
"--checkpointing_steps",
|
285 |
-
type=int,
|
286 |
-
default=500,
|
287 |
-
help=(
|
288 |
-
"Save a checkpoint of the training state every X updates. These checkpoints can be used both as final"
|
289 |
-
" checkpoints in case they are better than the last checkpoint, and are also suitable for resuming"
|
290 |
-
" training using `--resume_from_checkpoint`."
|
291 |
-
),
|
292 |
-
)
|
293 |
-
parser.add_argument(
|
294 |
-
"--checkpoints_total_limit",
|
295 |
-
type=int,
|
296 |
-
default=None,
|
297 |
-
help=(
|
298 |
-
"Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`."
|
299 |
-
" See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state"
|
300 |
-
" for more docs"
|
301 |
-
),
|
302 |
-
)
|
303 |
-
parser.add_argument(
|
304 |
-
"--resume_from_checkpoint",
|
305 |
-
type=str,
|
306 |
-
default=None,
|
307 |
-
help=(
|
308 |
-
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
|
309 |
-
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
|
310 |
-
),
|
311 |
-
)
|
312 |
-
parser.add_argument(
|
313 |
-
"--gradient_accumulation_steps",
|
314 |
-
type=int,
|
315 |
-
default=1,
|
316 |
-
help="Number of updates steps to accumulate before performing a backward/update pass.",
|
317 |
-
)
|
318 |
-
parser.add_argument(
|
319 |
-
"--gradient_checkpointing",
|
320 |
-
action="store_true",
|
321 |
-
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
|
322 |
-
)
|
323 |
-
parser.add_argument(
|
324 |
-
"--learning_rate",
|
325 |
-
type=float,
|
326 |
-
default=5e-6,
|
327 |
-
help="Initial learning rate (after the potential warmup period) to use.",
|
328 |
-
)
|
329 |
-
parser.add_argument(
|
330 |
-
"--scale_lr",
|
331 |
-
action="store_true",
|
332 |
-
default=False,
|
333 |
-
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
|
334 |
-
)
|
335 |
-
parser.add_argument(
|
336 |
-
"--lr_scheduler",
|
337 |
-
type=str,
|
338 |
-
default="constant",
|
339 |
-
help=(
|
340 |
-
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
|
341 |
-
' "constant", "constant_with_warmup"]'
|
342 |
-
),
|
343 |
-
)
|
344 |
-
parser.add_argument(
|
345 |
-
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
|
346 |
-
)
|
347 |
-
parser.add_argument(
|
348 |
-
"--lr_num_cycles",
|
349 |
-
type=int,
|
350 |
-
default=1,
|
351 |
-
help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
|
352 |
-
)
|
353 |
-
parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.")
|
354 |
-
parser.add_argument(
|
355 |
-
"--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
|
356 |
-
)
|
357 |
-
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
|
358 |
-
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
|
359 |
-
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
|
360 |
-
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
|
361 |
-
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
|
362 |
-
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
|
363 |
-
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
|
364 |
-
parser.add_argument(
|
365 |
-
"--hub_model_id",
|
366 |
-
type=str,
|
367 |
-
default=None,
|
368 |
-
help="The name of the repository to keep in sync with the local `output_dir`.",
|
369 |
-
)
|
370 |
-
parser.add_argument(
|
371 |
-
"--logging_dir",
|
372 |
-
type=str,
|
373 |
-
default="logs",
|
374 |
-
help=(
|
375 |
-
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
|
376 |
-
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
|
377 |
-
),
|
378 |
-
)
|
379 |
-
parser.add_argument(
|
380 |
-
"--allow_tf32",
|
381 |
-
action="store_true",
|
382 |
-
help=(
|
383 |
-
"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
|
384 |
-
" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
|
385 |
-
),
|
386 |
-
)
|
387 |
-
parser.add_argument(
|
388 |
-
"--report_to",
|
389 |
-
type=str,
|
390 |
-
default="tensorboard",
|
391 |
-
help=(
|
392 |
-
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
|
393 |
-
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
|
394 |
-
),
|
395 |
-
)
|
396 |
-
parser.add_argument(
|
397 |
-
"--validation_steps",
|
398 |
-
type=int,
|
399 |
-
default=None,
|
400 |
-
help=(
|
401 |
-
"Run validation every X steps. Validation consists of running the prompt(s) `validation_prompt` "
|
402 |
-
"multiple times (`validation_number_images`) and logging the images."
|
403 |
-
),
|
404 |
-
)
|
405 |
-
parser.add_argument(
|
406 |
-
"--validation_prompt",
|
407 |
-
type=str,
|
408 |
-
default=None,
|
409 |
-
help="A prompt that is used during validation to verify that the model is learning. You can use commas to "
|
410 |
-
"define multiple negative prompts. This parameter can be defined also within the file given by "
|
411 |
-
"`concepts_list` parameter in the respective subject.",
|
412 |
-
)
|
413 |
-
parser.add_argument(
|
414 |
-
"--validation_number_images",
|
415 |
-
type=int,
|
416 |
-
default=4,
|
417 |
-
help="Number of images that should be generated during validation with the validation parameters given. This "
|
418 |
-
"can be defined within the file given by `concepts_list` parameter in the respective subject.",
|
419 |
-
)
|
420 |
-
parser.add_argument(
|
421 |
-
"--validation_negative_prompt",
|
422 |
-
type=str,
|
423 |
-
default=None,
|
424 |
-
help="A negative prompt that is used during validation to verify that the model is learning. You can use commas"
|
425 |
-
" to define multiple negative prompts, each one corresponding to a validation prompt. This parameter can "
|
426 |
-
"be defined also within the file given by `concepts_list` parameter in the respective subject.",
|
427 |
-
)
|
428 |
-
parser.add_argument(
|
429 |
-
"--validation_inference_steps",
|
430 |
-
type=int,
|
431 |
-
default=25,
|
432 |
-
help="Number of inference steps (denoising steps) to run during validation. This can be defined within the "
|
433 |
-
"file given by `concepts_list` parameter in the respective subject.",
|
434 |
-
)
|
435 |
-
parser.add_argument(
|
436 |
-
"--validation_guidance_scale",
|
437 |
-
type=float,
|
438 |
-
default=7.5,
|
439 |
-
help="To control how much the image generation process follows the text prompt. This can be defined within the "
|
440 |
-
"file given by `concepts_list` parameter in the respective subject.",
|
441 |
-
)
|
442 |
-
parser.add_argument(
|
443 |
-
"--mixed_precision",
|
444 |
-
type=str,
|
445 |
-
default=None,
|
446 |
-
choices=["no", "fp16", "bf16"],
|
447 |
-
help=(
|
448 |
-
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
|
449 |
-
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
|
450 |
-
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
|
451 |
-
),
|
452 |
-
)
|
453 |
-
parser.add_argument(
|
454 |
-
"--prior_generation_precision",
|
455 |
-
type=str,
|
456 |
-
default=None,
|
457 |
-
choices=["no", "fp32", "fp16", "bf16"],
|
458 |
-
help=(
|
459 |
-
"Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
|
460 |
-
" 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32."
|
461 |
-
),
|
462 |
-
)
|
463 |
-
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
|
464 |
-
parser.add_argument(
|
465 |
-
"--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
|
466 |
-
)
|
467 |
-
parser.add_argument(
|
468 |
-
"--set_grads_to_none",
|
469 |
-
action="store_true",
|
470 |
-
help=(
|
471 |
-
"Save more memory by using setting grads to None instead of zero. Be aware, that this changes certain"
|
472 |
-
" behaviors, so disable this argument if it causes any problems. More info:"
|
473 |
-
" https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html"
|
474 |
-
),
|
475 |
-
)
|
476 |
-
parser.add_argument(
|
477 |
-
"--concepts_list",
|
478 |
-
type=str,
|
479 |
-
default=None,
|
480 |
-
help="Path to json file containing a list of multiple concepts, will overwrite parameters like instance_prompt,"
|
481 |
-
" class_prompt, etc.",
|
482 |
-
)
|
483 |
-
|
484 |
-
if input_args:
|
485 |
-
args = parser.parse_args(input_args)
|
486 |
-
else:
|
487 |
-
args = parser.parse_args()
|
488 |
-
|
489 |
-
if not args.concepts_list and (not args.instance_data_dir or not args.instance_prompt):
|
490 |
-
raise ValueError(
|
491 |
-
"You must specify either instance parameters (data directory, prompt, etc.) or use "
|
492 |
-
"the `concept_list` parameter and specify them within the file."
|
493 |
-
)
|
494 |
-
|
495 |
-
if args.concepts_list:
|
496 |
-
if args.instance_prompt:
|
497 |
-
raise ValueError("If you are using `concepts_list` parameter, define the instance prompt within the file.")
|
498 |
-
if args.instance_data_dir:
|
499 |
-
raise ValueError(
|
500 |
-
"If you are using `concepts_list` parameter, define the instance data directory within the file."
|
501 |
-
)
|
502 |
-
if args.validation_steps and (args.validation_prompt or args.validation_negative_prompt):
|
503 |
-
raise ValueError(
|
504 |
-
"If you are using `concepts_list` parameter, define validation parameters for "
|
505 |
-
"each subject within the file:\n - `validation_prompt`."
|
506 |
-
"\n - `validation_negative_prompt`.\n - `validation_guidance_scale`."
|
507 |
-
"\n - `validation_number_images`.\n - `validation_prompt`."
|
508 |
-
"\n - `validation_inference_steps`.\nThe `validation_steps` parameter is the only one "
|
509 |
-
"that needs to be defined outside the file."
|
510 |
-
)
|
511 |
-
|
512 |
-
env_local_rank = int(environ.get("LOCAL_RANK", -1))
|
513 |
-
if env_local_rank != -1 and env_local_rank != args.local_rank:
|
514 |
-
args.local_rank = env_local_rank
|
515 |
-
|
516 |
-
if args.with_prior_preservation:
|
517 |
-
if not args.concepts_list:
|
518 |
-
if not args.class_data_dir:
|
519 |
-
raise ValueError("You must specify a data directory for class images.")
|
520 |
-
if not args.class_prompt:
|
521 |
-
raise ValueError("You must specify prompt for class images.")
|
522 |
-
else:
|
523 |
-
if args.class_data_dir:
|
524 |
-
raise ValueError(
|
525 |
-
"If you are using `concepts_list` parameter, define the class data directory within the file."
|
526 |
-
)
|
527 |
-
if args.class_prompt:
|
528 |
-
raise ValueError(
|
529 |
-
"If you are using `concepts_list` parameter, define the class prompt within the file."
|
530 |
-
)
|
531 |
-
else:
|
532 |
-
# logger is not available yet
|
533 |
-
if not args.class_data_dir:
|
534 |
-
warnings.warn(
|
535 |
-
"Ignoring `class_data_dir` parameter, you need to use it together with `with_prior_preservation`."
|
536 |
-
)
|
537 |
-
if not args.class_prompt:
|
538 |
-
warnings.warn(
|
539 |
-
"Ignoring `class_prompt` parameter, you need to use it together with `with_prior_preservation`."
|
540 |
-
)
|
541 |
-
|
542 |
-
return args
|
543 |
-
|
544 |
-
|
545 |
-
class DreamBoothDataset(Dataset):
|
546 |
-
"""
|
547 |
-
A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
|
548 |
-
It pre-processes the images and then tokenizes prompts.
|
549 |
-
"""
|
550 |
-
|
551 |
-
def __init__(
|
552 |
-
self,
|
553 |
-
instance_data_root,
|
554 |
-
instance_prompt,
|
555 |
-
tokenizer,
|
556 |
-
class_data_root=None,
|
557 |
-
class_prompt=None,
|
558 |
-
size=512,
|
559 |
-
center_crop=False,
|
560 |
-
):
|
561 |
-
self.size = size
|
562 |
-
self.center_crop = center_crop
|
563 |
-
self.tokenizer = tokenizer
|
564 |
-
|
565 |
-
self.instance_data_root = []
|
566 |
-
self.instance_images_path = []
|
567 |
-
self.num_instance_images = []
|
568 |
-
self.instance_prompt = []
|
569 |
-
self.class_data_root = [] if class_data_root is not None else None
|
570 |
-
self.class_images_path = []
|
571 |
-
self.num_class_images = []
|
572 |
-
self.class_prompt = []
|
573 |
-
self._length = 0
|
574 |
-
|
575 |
-
for i in range(len(instance_data_root)):
|
576 |
-
self.instance_data_root.append(Path(instance_data_root[i]))
|
577 |
-
if not self.instance_data_root[i].exists():
|
578 |
-
raise ValueError("Instance images root doesn't exists.")
|
579 |
-
|
580 |
-
self.instance_images_path.append(list(Path(instance_data_root[i]).iterdir()))
|
581 |
-
self.num_instance_images.append(len(self.instance_images_path[i]))
|
582 |
-
self.instance_prompt.append(instance_prompt[i])
|
583 |
-
self._length += self.num_instance_images[i]
|
584 |
-
|
585 |
-
if class_data_root is not None:
|
586 |
-
self.class_data_root.append(Path(class_data_root[i]))
|
587 |
-
self.class_data_root[i].mkdir(parents=True, exist_ok=True)
|
588 |
-
self.class_images_path.append(list(self.class_data_root[i].iterdir()))
|
589 |
-
self.num_class_images.append(len(self.class_images_path))
|
590 |
-
if self.num_class_images[i] > self.num_instance_images[i]:
|
591 |
-
self._length -= self.num_instance_images[i]
|
592 |
-
self._length += self.num_class_images[i]
|
593 |
-
self.class_prompt.append(class_prompt[i])
|
594 |
-
|
595 |
-
self.image_transforms = transforms.Compose(
|
596 |
-
[
|
597 |
-
transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
|
598 |
-
transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
|
599 |
-
transforms.ToTensor(),
|
600 |
-
transforms.Normalize([0.5], [0.5]),
|
601 |
-
]
|
602 |
-
)
|
603 |
-
|
604 |
-
def __len__(self):
|
605 |
-
return self._length
|
606 |
-
|
607 |
-
def __getitem__(self, index):
|
608 |
-
example = {}
|
609 |
-
for i in range(len(self.instance_images_path)):
|
610 |
-
instance_image = Image.open(self.instance_images_path[i][index % self.num_instance_images[i]])
|
611 |
-
if not instance_image.mode == "RGB":
|
612 |
-
instance_image = instance_image.convert("RGB")
|
613 |
-
example[f"instance_images_{i}"] = self.image_transforms(instance_image)
|
614 |
-
example[f"instance_prompt_ids_{i}"] = self.tokenizer(
|
615 |
-
self.instance_prompt[i],
|
616 |
-
truncation=True,
|
617 |
-
padding="max_length",
|
618 |
-
max_length=self.tokenizer.model_max_length,
|
619 |
-
return_tensors="pt",
|
620 |
-
).input_ids
|
621 |
-
|
622 |
-
if self.class_data_root:
|
623 |
-
for i in range(len(self.class_data_root)):
|
624 |
-
class_image = Image.open(self.class_images_path[i][index % self.num_class_images[i]])
|
625 |
-
if not class_image.mode == "RGB":
|
626 |
-
class_image = class_image.convert("RGB")
|
627 |
-
example[f"class_images_{i}"] = self.image_transforms(class_image)
|
628 |
-
example[f"class_prompt_ids_{i}"] = self.tokenizer(
|
629 |
-
self.class_prompt[i],
|
630 |
-
truncation=True,
|
631 |
-
padding="max_length",
|
632 |
-
max_length=self.tokenizer.model_max_length,
|
633 |
-
return_tensors="pt",
|
634 |
-
).input_ids
|
635 |
-
|
636 |
-
return example
|
637 |
-
|
638 |
-
|
639 |
-
def collate_fn(num_instances, examples, with_prior_preservation=False):
|
640 |
-
input_ids = []
|
641 |
-
pixel_values = []
|
642 |
-
|
643 |
-
for i in range(num_instances):
|
644 |
-
input_ids += [example[f"instance_prompt_ids_{i}"] for example in examples]
|
645 |
-
pixel_values += [example[f"instance_images_{i}"] for example in examples]
|
646 |
-
|
647 |
-
# Concat class and instance examples for prior preservation.
|
648 |
-
# We do this to avoid doing two forward passes.
|
649 |
-
if with_prior_preservation:
|
650 |
-
for i in range(num_instances):
|
651 |
-
input_ids += [example[f"class_prompt_ids_{i}"] for example in examples]
|
652 |
-
pixel_values += [example[f"class_images_{i}"] for example in examples]
|
653 |
-
|
654 |
-
pixel_values = torch.stack(pixel_values)
|
655 |
-
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
|
656 |
-
|
657 |
-
input_ids = torch.cat(input_ids, dim=0)
|
658 |
-
|
659 |
-
batch = {
|
660 |
-
"input_ids": input_ids,
|
661 |
-
"pixel_values": pixel_values,
|
662 |
-
}
|
663 |
-
return batch
|
664 |
-
|
665 |
-
|
666 |
-
class PromptDataset(Dataset):
|
667 |
-
"""A simple dataset to prepare the prompts to generate class images on multiple GPUs."""
|
668 |
-
|
669 |
-
def __init__(self, prompt, num_samples):
|
670 |
-
self.prompt = prompt
|
671 |
-
self.num_samples = num_samples
|
672 |
-
|
673 |
-
def __len__(self):
|
674 |
-
return self.num_samples
|
675 |
-
|
676 |
-
def __getitem__(self, index):
|
677 |
-
example = {}
|
678 |
-
example["prompt"] = self.prompt
|
679 |
-
example["index"] = index
|
680 |
-
return example
|
681 |
-
|
682 |
-
|
683 |
-
def main(args):
|
684 |
-
logging_dir = Path(args.output_dir, args.logging_dir)
|
685 |
-
accelerator_project_config = ProjectConfiguration(
|
686 |
-
total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
|
687 |
-
)
|
688 |
-
accelerator = Accelerator(
|
689 |
-
gradient_accumulation_steps=args.gradient_accumulation_steps,
|
690 |
-
mixed_precision=args.mixed_precision,
|
691 |
-
log_with=args.report_to,
|
692 |
-
project_config=accelerator_project_config,
|
693 |
-
)
|
694 |
-
|
695 |
-
if args.report_to == "wandb":
|
696 |
-
if not is_wandb_available():
|
697 |
-
raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
|
698 |
-
|
699 |
-
# Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate
|
700 |
-
# This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models.
|
701 |
-
# TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate.
|
702 |
-
if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1:
|
703 |
-
raise ValueError(
|
704 |
-
"Gradient accumulation is not supported when training the text encoder in distributed training. "
|
705 |
-
"Please set gradient_accumulation_steps to 1. This feature will be supported in the future."
|
706 |
-
)
|
707 |
-
|
708 |
-
instance_data_dir = []
|
709 |
-
instance_prompt = []
|
710 |
-
class_data_dir = [] if args.with_prior_preservation else None
|
711 |
-
class_prompt = [] if args.with_prior_preservation else None
|
712 |
-
if args.concepts_list:
|
713 |
-
with open(args.concepts_list, "r") as f:
|
714 |
-
concepts_list = json.load(f)
|
715 |
-
|
716 |
-
if args.validation_steps:
|
717 |
-
args.validation_prompt = []
|
718 |
-
args.validation_number_images = []
|
719 |
-
args.validation_negative_prompt = []
|
720 |
-
args.validation_inference_steps = []
|
721 |
-
args.validation_guidance_scale = []
|
722 |
-
|
723 |
-
for concept in concepts_list:
|
724 |
-
instance_data_dir.append(concept["instance_data_dir"])
|
725 |
-
instance_prompt.append(concept["instance_prompt"])
|
726 |
-
|
727 |
-
if args.with_prior_preservation:
|
728 |
-
try:
|
729 |
-
class_data_dir.append(concept["class_data_dir"])
|
730 |
-
class_prompt.append(concept["class_prompt"])
|
731 |
-
except KeyError:
|
732 |
-
raise KeyError(
|
733 |
-
"`class_data_dir` or `class_prompt` not found in concepts_list while using "
|
734 |
-
"`with_prior_preservation`."
|
735 |
-
)
|
736 |
-
else:
|
737 |
-
if "class_data_dir" in concept:
|
738 |
-
warnings.warn(
|
739 |
-
"Ignoring `class_data_dir` key, to use it you need to enable `with_prior_preservation`."
|
740 |
-
)
|
741 |
-
if "class_prompt" in concept:
|
742 |
-
warnings.warn(
|
743 |
-
"Ignoring `class_prompt` key, to use it you need to enable `with_prior_preservation`."
|
744 |
-
)
|
745 |
-
|
746 |
-
if args.validation_steps:
|
747 |
-
args.validation_prompt.append(concept.get("validation_prompt", None))
|
748 |
-
args.validation_number_images.append(concept.get("validation_number_images", 4))
|
749 |
-
args.validation_negative_prompt.append(concept.get("validation_negative_prompt", None))
|
750 |
-
args.validation_inference_steps.append(concept.get("validation_inference_steps", 25))
|
751 |
-
args.validation_guidance_scale.append(concept.get("validation_guidance_scale", 7.5))
|
752 |
-
else:
|
753 |
-
# Parse instance and class inputs, and double check that lengths match
|
754 |
-
instance_data_dir = args.instance_data_dir.split(",")
|
755 |
-
instance_prompt = args.instance_prompt.split(",")
|
756 |
-
assert all(
|
757 |
-
x == len(instance_data_dir) for x in [len(instance_data_dir), len(instance_prompt)]
|
758 |
-
), "Instance data dir and prompt inputs are not of the same length."
|
759 |
-
|
760 |
-
if args.with_prior_preservation:
|
761 |
-
class_data_dir = args.class_data_dir.split(",")
|
762 |
-
class_prompt = args.class_prompt.split(",")
|
763 |
-
assert all(
|
764 |
-
x == len(instance_data_dir)
|
765 |
-
for x in [len(instance_data_dir), len(instance_prompt), len(class_data_dir), len(class_prompt)]
|
766 |
-
), "Instance & class data dir or prompt inputs are not of the same length."
|
767 |
-
|
768 |
-
if args.validation_steps:
|
769 |
-
validation_prompts = args.validation_prompt.split(",")
|
770 |
-
num_of_validation_prompts = len(validation_prompts)
|
771 |
-
args.validation_prompt = validation_prompts
|
772 |
-
args.validation_number_images = [args.validation_number_images] * num_of_validation_prompts
|
773 |
-
|
774 |
-
negative_validation_prompts = [None] * num_of_validation_prompts
|
775 |
-
if args.validation_negative_prompt:
|
776 |
-
negative_validation_prompts = args.validation_negative_prompt.split(",")
|
777 |
-
while len(negative_validation_prompts) < num_of_validation_prompts:
|
778 |
-
negative_validation_prompts.append(None)
|
779 |
-
args.validation_negative_prompt = negative_validation_prompts
|
780 |
-
|
781 |
-
assert num_of_validation_prompts == len(
|
782 |
-
negative_validation_prompts
|
783 |
-
), "The length of negative prompts for validation is greater than the number of validation prompts."
|
784 |
-
args.validation_inference_steps = [args.validation_inference_steps] * num_of_validation_prompts
|
785 |
-
args.validation_guidance_scale = [args.validation_guidance_scale] * num_of_validation_prompts
|
786 |
-
|
787 |
-
# Make one log on every process with the configuration for debugging.
|
788 |
-
logging.basicConfig(
|
789 |
-
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
790 |
-
datefmt="%m/%d/%Y %H:%M:%S",
|
791 |
-
level=logging.INFO,
|
792 |
-
)
|
793 |
-
logger.info(accelerator.state, main_process_only=False)
|
794 |
-
if accelerator.is_local_main_process:
|
795 |
-
datasets.utils.logging.set_verbosity_warning()
|
796 |
-
transformers.utils.logging.set_verbosity_warning()
|
797 |
-
diffusers.utils.logging.set_verbosity_info()
|
798 |
-
else:
|
799 |
-
datasets.utils.logging.set_verbosity_error()
|
800 |
-
transformers.utils.logging.set_verbosity_error()
|
801 |
-
diffusers.utils.logging.set_verbosity_error()
|
802 |
-
|
803 |
-
# If passed along, set the training seed now.
|
804 |
-
if args.seed is not None:
|
805 |
-
set_seed(args.seed)
|
806 |
-
|
807 |
-
# Generate class images if prior preservation is enabled.
|
808 |
-
if args.with_prior_preservation:
|
809 |
-
for i in range(len(class_data_dir)):
|
810 |
-
class_images_dir = Path(class_data_dir[i])
|
811 |
-
if not class_images_dir.exists():
|
812 |
-
class_images_dir.mkdir(parents=True)
|
813 |
-
cur_class_images = len(list(class_images_dir.iterdir()))
|
814 |
-
|
815 |
-
if cur_class_images < args.num_class_images:
|
816 |
-
torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
|
817 |
-
if args.prior_generation_precision == "fp32":
|
818 |
-
torch_dtype = torch.float32
|
819 |
-
elif args.prior_generation_precision == "fp16":
|
820 |
-
torch_dtype = torch.float16
|
821 |
-
elif args.prior_generation_precision == "bf16":
|
822 |
-
torch_dtype = torch.bfloat16
|
823 |
-
pipeline = DiffusionPipeline.from_pretrained(
|
824 |
-
args.pretrained_model_name_or_path,
|
825 |
-
torch_dtype=torch_dtype,
|
826 |
-
safety_checker=None,
|
827 |
-
revision=args.revision,
|
828 |
-
)
|
829 |
-
pipeline.set_progress_bar_config(disable=True)
|
830 |
-
|
831 |
-
num_new_images = args.num_class_images - cur_class_images
|
832 |
-
logger.info(f"Number of class images to sample: {num_new_images}.")
|
833 |
-
|
834 |
-
sample_dataset = PromptDataset(class_prompt[i], num_new_images)
|
835 |
-
sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
|
836 |
-
|
837 |
-
sample_dataloader = accelerator.prepare(sample_dataloader)
|
838 |
-
pipeline.to(accelerator.device)
|
839 |
-
|
840 |
-
for example in tqdm(
|
841 |
-
sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
|
842 |
-
):
|
843 |
-
images = pipeline(example["prompt"]).images
|
844 |
-
|
845 |
-
for ii, image in enumerate(images):
|
846 |
-
hash_image = hashlib.sha1(image.tobytes()).hexdigest()
|
847 |
-
image_filename = (
|
848 |
-
class_images_dir / f"{example['index'][ii] + cur_class_images}-{hash_image}.jpg"
|
849 |
-
)
|
850 |
-
image.save(image_filename)
|
851 |
-
|
852 |
-
# Clean up the memory deleting one-time-use variables.
|
853 |
-
del pipeline
|
854 |
-
del sample_dataloader
|
855 |
-
del sample_dataset
|
856 |
-
if torch.cuda.is_available():
|
857 |
-
torch.cuda.empty_cache()
|
858 |
-
|
859 |
-
# Handle the repository creation
|
860 |
-
if accelerator.is_main_process:
|
861 |
-
if args.output_dir is not None:
|
862 |
-
makedirs(args.output_dir, exist_ok=True)
|
863 |
-
|
864 |
-
if args.push_to_hub:
|
865 |
-
repo_id = create_repo(
|
866 |
-
repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
|
867 |
-
).repo_id
|
868 |
-
|
869 |
-
# Load the tokenizer
|
870 |
-
tokenizer = None
|
871 |
-
if args.tokenizer_name:
|
872 |
-
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False)
|
873 |
-
elif args.pretrained_model_name_or_path:
|
874 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
875 |
-
args.pretrained_model_name_or_path,
|
876 |
-
subfolder="tokenizer",
|
877 |
-
revision=args.revision,
|
878 |
-
use_fast=False,
|
879 |
-
)
|
880 |
-
|
881 |
-
# import correct text encoder class
|
882 |
-
text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision)
|
883 |
-
|
884 |
-
# Load scheduler and models
|
885 |
-
noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
|
886 |
-
text_encoder = text_encoder_cls.from_pretrained(
|
887 |
-
args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
|
888 |
-
)
|
889 |
-
vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision)
|
890 |
-
unet = UNet2DConditionModel.from_pretrained(
|
891 |
-
args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
|
892 |
-
)
|
893 |
-
|
894 |
-
vae.requires_grad_(False)
|
895 |
-
if not args.train_text_encoder:
|
896 |
-
text_encoder.requires_grad_(False)
|
897 |
-
|
898 |
-
if args.enable_xformers_memory_efficient_attention:
|
899 |
-
if is_xformers_available():
|
900 |
-
unet.enable_xformers_memory_efficient_attention()
|
901 |
-
else:
|
902 |
-
raise ValueError("xformers is not available. Make sure it is installed correctly")
|
903 |
-
|
904 |
-
if args.gradient_checkpointing:
|
905 |
-
unet.enable_gradient_checkpointing()
|
906 |
-
if args.train_text_encoder:
|
907 |
-
text_encoder.gradient_checkpointing_enable()
|
908 |
-
|
909 |
-
# Enable TF32 for faster training on Ampere GPUs,
|
910 |
-
# cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
|
911 |
-
if args.allow_tf32:
|
912 |
-
torch.backends.cuda.matmul.allow_tf32 = True
|
913 |
-
|
914 |
-
if args.scale_lr:
|
915 |
-
args.learning_rate = (
|
916 |
-
args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
|
917 |
-
)
|
918 |
-
|
919 |
-
# Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
|
920 |
-
if args.use_8bit_adam:
|
921 |
-
try:
|
922 |
-
import bitsandbytes as bnb
|
923 |
-
except ImportError:
|
924 |
-
raise ImportError(
|
925 |
-
"To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
|
926 |
-
)
|
927 |
-
|
928 |
-
optimizer_class = bnb.optim.AdamW8bit
|
929 |
-
else:
|
930 |
-
optimizer_class = torch.optim.AdamW
|
931 |
-
|
932 |
-
# Optimizer creation
|
933 |
-
params_to_optimize = (
|
934 |
-
itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters()
|
935 |
-
)
|
936 |
-
optimizer = optimizer_class(
|
937 |
-
params_to_optimize,
|
938 |
-
lr=args.learning_rate,
|
939 |
-
betas=(args.adam_beta1, args.adam_beta2),
|
940 |
-
weight_decay=args.adam_weight_decay,
|
941 |
-
eps=args.adam_epsilon,
|
942 |
-
)
|
943 |
-
|
944 |
-
# Dataset and DataLoaders creation:
|
945 |
-
train_dataset = DreamBoothDataset(
|
946 |
-
instance_data_root=instance_data_dir,
|
947 |
-
instance_prompt=instance_prompt,
|
948 |
-
class_data_root=class_data_dir,
|
949 |
-
class_prompt=class_prompt,
|
950 |
-
tokenizer=tokenizer,
|
951 |
-
size=args.resolution,
|
952 |
-
center_crop=args.center_crop,
|
953 |
-
)
|
954 |
-
|
955 |
-
train_dataloader = torch.utils.data.DataLoader(
|
956 |
-
train_dataset,
|
957 |
-
batch_size=args.train_batch_size,
|
958 |
-
shuffle=True,
|
959 |
-
collate_fn=lambda examples: collate_fn(len(instance_data_dir), examples, args.with_prior_preservation),
|
960 |
-
num_workers=1,
|
961 |
-
)
|
962 |
-
|
963 |
-
# Scheduler and math around the number of training steps.
|
964 |
-
overrode_max_train_steps = False
|
965 |
-
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
|
966 |
-
if args.max_train_steps is None:
|
967 |
-
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
968 |
-
overrode_max_train_steps = True
|
969 |
-
|
970 |
-
lr_scheduler = get_scheduler(
|
971 |
-
args.lr_scheduler,
|
972 |
-
optimizer=optimizer,
|
973 |
-
num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
|
974 |
-
num_training_steps=args.max_train_steps * accelerator.num_processes,
|
975 |
-
num_cycles=args.lr_num_cycles,
|
976 |
-
power=args.lr_power,
|
977 |
-
)
|
978 |
-
|
979 |
-
# Prepare everything with our `accelerator`.
|
980 |
-
if args.train_text_encoder:
|
981 |
-
unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
982 |
-
unet, text_encoder, optimizer, train_dataloader, lr_scheduler
|
983 |
-
)
|
984 |
-
else:
|
985 |
-
unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
986 |
-
unet, optimizer, train_dataloader, lr_scheduler
|
987 |
-
)
|
988 |
-
|
989 |
-
# For mixed precision training we cast the text_encoder and vae weights to half-precision
|
990 |
-
# as these models are only used for inference, keeping weights in full precision is not required.
|
991 |
-
weight_dtype = torch.float32
|
992 |
-
if accelerator.mixed_precision == "fp16":
|
993 |
-
weight_dtype = torch.float16
|
994 |
-
elif accelerator.mixed_precision == "bf16":
|
995 |
-
weight_dtype = torch.bfloat16
|
996 |
-
|
997 |
-
# Move vae and text_encoder to device and cast to weight_dtype
|
998 |
-
vae.to(accelerator.device, dtype=weight_dtype)
|
999 |
-
if not args.train_text_encoder:
|
1000 |
-
text_encoder.to(accelerator.device, dtype=weight_dtype)
|
1001 |
-
|
1002 |
-
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
|
1003 |
-
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
|
1004 |
-
if overrode_max_train_steps:
|
1005 |
-
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
1006 |
-
# Afterwards we recalculate our number of training epochs
|
1007 |
-
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
|
1008 |
-
|
1009 |
-
# We need to initialize the trackers we use, and also store our configuration.
|
1010 |
-
# The trackers initialize automatically on the main process.
|
1011 |
-
if accelerator.is_main_process:
|
1012 |
-
accelerator.init_trackers("dreambooth", config=vars(args))
|
1013 |
-
|
1014 |
-
# Train!
|
1015 |
-
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
|
1016 |
-
|
1017 |
-
logger.info("***** Running training *****")
|
1018 |
-
logger.info(f" Num examples = {len(train_dataset)}")
|
1019 |
-
logger.info(f" Num batches each epoch = {len(train_dataloader)}")
|
1020 |
-
logger.info(f" Num Epochs = {args.num_train_epochs}")
|
1021 |
-
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
|
1022 |
-
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
|
1023 |
-
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
|
1024 |
-
logger.info(f" Total optimization steps = {args.max_train_steps}")
|
1025 |
-
global_step = 0
|
1026 |
-
first_epoch = 0
|
1027 |
-
|
1028 |
-
# Potentially load in the weights and states from a previous save
|
1029 |
-
if args.resume_from_checkpoint:
|
1030 |
-
if args.resume_from_checkpoint != "latest":
|
1031 |
-
path = basename(args.resume_from_checkpoint)
|
1032 |
-
else:
|
1033 |
-
# Get the mos recent checkpoint
|
1034 |
-
dirs = listdir(args.output_dir)
|
1035 |
-
dirs = [d for d in dirs if d.startswith("checkpoint")]
|
1036 |
-
dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
|
1037 |
-
path = dirs[-1] if len(dirs) > 0 else None
|
1038 |
-
|
1039 |
-
if path is None:
|
1040 |
-
accelerator.print(
|
1041 |
-
f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
|
1042 |
-
)
|
1043 |
-
args.resume_from_checkpoint = None
|
1044 |
-
else:
|
1045 |
-
accelerator.print(f"Resuming from checkpoint {path}")
|
1046 |
-
accelerator.load_state(join(args.output_dir, path))
|
1047 |
-
global_step = int(path.split("-")[1])
|
1048 |
-
|
1049 |
-
resume_global_step = global_step * args.gradient_accumulation_steps
|
1050 |
-
first_epoch = global_step // num_update_steps_per_epoch
|
1051 |
-
resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps)
|
1052 |
-
|
1053 |
-
# Only show the progress bar once on each machine.
|
1054 |
-
progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process)
|
1055 |
-
progress_bar.set_description("Steps")
|
1056 |
-
|
1057 |
-
for epoch in range(first_epoch, args.num_train_epochs):
|
1058 |
-
unet.train()
|
1059 |
-
if args.train_text_encoder:
|
1060 |
-
text_encoder.train()
|
1061 |
-
for step, batch in enumerate(train_dataloader):
|
1062 |
-
# Skip steps until we reach the resumed step
|
1063 |
-
if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
|
1064 |
-
if step % args.gradient_accumulation_steps == 0:
|
1065 |
-
progress_bar.update(1)
|
1066 |
-
continue
|
1067 |
-
|
1068 |
-
with accelerator.accumulate(unet):
|
1069 |
-
# Convert images to latent space
|
1070 |
-
latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
|
1071 |
-
latents = latents * vae.config.scaling_factor
|
1072 |
-
|
1073 |
-
# Sample noise that we'll add to the latents
|
1074 |
-
noise = torch.randn_like(latents)
|
1075 |
-
bsz = latents.shape[0]
|
1076 |
-
# Sample a random timestep for each image
|
1077 |
-
time_steps = torch.randint(
|
1078 |
-
0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device
|
1079 |
-
)
|
1080 |
-
time_steps = time_steps.long()
|
1081 |
-
|
1082 |
-
# Add noise to the latents according to the noise magnitude at each timestep
|
1083 |
-
# (this is the forward diffusion process)
|
1084 |
-
noisy_latents = noise_scheduler.add_noise(latents, noise, time_steps)
|
1085 |
-
|
1086 |
-
# Get the text embedding for conditioning
|
1087 |
-
encoder_hidden_states = text_encoder(batch["input_ids"])[0]
|
1088 |
-
|
1089 |
-
# Predict the noise residual
|
1090 |
-
model_pred = unet(noisy_latents, time_steps, encoder_hidden_states).sample
|
1091 |
-
|
1092 |
-
# Get the target for loss depending on the prediction type
|
1093 |
-
if noise_scheduler.config.prediction_type == "epsilon":
|
1094 |
-
target = noise
|
1095 |
-
elif noise_scheduler.config.prediction_type == "v_prediction":
|
1096 |
-
target = noise_scheduler.get_velocity(latents, noise, time_steps)
|
1097 |
-
else:
|
1098 |
-
raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
|
1099 |
-
|
1100 |
-
if args.with_prior_preservation:
|
1101 |
-
# Chunk the noise and model_pred into two parts and compute the loss on each part separately.
|
1102 |
-
model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0)
|
1103 |
-
target, target_prior = torch.chunk(target, 2, dim=0)
|
1104 |
-
|
1105 |
-
# Compute instance loss
|
1106 |
-
loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
|
1107 |
-
|
1108 |
-
# Compute prior loss
|
1109 |
-
prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean")
|
1110 |
-
|
1111 |
-
# Add the prior loss to the instance loss.
|
1112 |
-
loss = loss + args.prior_loss_weight * prior_loss
|
1113 |
-
else:
|
1114 |
-
loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
|
1115 |
-
|
1116 |
-
accelerator.backward(loss)
|
1117 |
-
if accelerator.sync_gradients:
|
1118 |
-
params_to_clip = (
|
1119 |
-
itertools.chain(unet.parameters(), text_encoder.parameters())
|
1120 |
-
if args.train_text_encoder
|
1121 |
-
else unet.parameters()
|
1122 |
-
)
|
1123 |
-
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
|
1124 |
-
optimizer.step()
|
1125 |
-
lr_scheduler.step()
|
1126 |
-
optimizer.zero_grad(set_to_none=args.set_grads_to_none)
|
1127 |
-
|
1128 |
-
# Checks if the accelerator has performed an optimization step behind the scenes
|
1129 |
-
if accelerator.sync_gradients:
|
1130 |
-
progress_bar.update(1)
|
1131 |
-
global_step += 1
|
1132 |
-
|
1133 |
-
if accelerator.is_main_process:
|
1134 |
-
if global_step % args.checkpointing_steps == 0:
|
1135 |
-
save_path = join(args.output_dir, f"checkpoint-{global_step}")
|
1136 |
-
accelerator.save_state(save_path)
|
1137 |
-
logger.info(f"Saved state to {save_path}")
|
1138 |
-
|
1139 |
-
if (
|
1140 |
-
args.validation_steps
|
1141 |
-
and any(args.validation_prompt)
|
1142 |
-
and global_step % args.validation_steps == 0
|
1143 |
-
):
|
1144 |
-
images_set = generate_validation_images(
|
1145 |
-
text_encoder, tokenizer, unet, vae, args, accelerator, weight_dtype
|
1146 |
-
)
|
1147 |
-
for images, validation_prompt in zip(images_set, args.validation_prompt):
|
1148 |
-
if len(images) > 0:
|
1149 |
-
label = str(uuid.uuid1())[:8] # generate an id for different set of images
|
1150 |
-
log_validation_images_to_tracker(
|
1151 |
-
images, label, validation_prompt, accelerator, global_step
|
1152 |
-
)
|
1153 |
-
|
1154 |
-
logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
|
1155 |
-
progress_bar.set_postfix(**logs)
|
1156 |
-
accelerator.log(logs, step=global_step)
|
1157 |
-
|
1158 |
-
if global_step >= args.max_train_steps:
|
1159 |
-
break
|
1160 |
-
|
1161 |
-
# Create the pipeline using the trained modules and save it.
|
1162 |
-
accelerator.wait_for_everyone()
|
1163 |
-
if accelerator.is_main_process:
|
1164 |
-
pipeline = DiffusionPipeline.from_pretrained(
|
1165 |
-
args.pretrained_model_name_or_path,
|
1166 |
-
unet=accelerator.unwrap_model(unet),
|
1167 |
-
text_encoder=accelerator.unwrap_model(text_encoder),
|
1168 |
-
revision=args.revision,
|
1169 |
-
)
|
1170 |
-
pipeline.save_pretrained(args.output_dir)
|
1171 |
-
|
1172 |
-
if args.push_to_hub:
|
1173 |
-
upload_folder(
|
1174 |
-
repo_id=repo_id,
|
1175 |
-
folder_path=args.output_dir,
|
1176 |
-
commit_message="End of training",
|
1177 |
-
ignore_patterns=["step_*", "epoch_*"],
|
1178 |
-
)
|
1179 |
-
|
1180 |
-
accelerator.end_training()
|
1181 |
-
|
1182 |
-
|
1183 |
-
if __name__ == "__main__":
|
1184 |
-
args = parse_args()
|
1185 |
-
main(args)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/yolo/yolov3_d53_mstrain-416_273e_coco.py
DELETED
@@ -1,42 +0,0 @@
|
|
1 |
-
_base_ = './yolov3_d53_mstrain-608_273e_coco.py'
|
2 |
-
# dataset settings
|
3 |
-
img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True)
|
4 |
-
train_pipeline = [
|
5 |
-
dict(type='LoadImageFromFile', to_float32=True),
|
6 |
-
dict(type='LoadAnnotations', with_bbox=True),
|
7 |
-
dict(type='PhotoMetricDistortion'),
|
8 |
-
dict(
|
9 |
-
type='Expand',
|
10 |
-
mean=img_norm_cfg['mean'],
|
11 |
-
to_rgb=img_norm_cfg['to_rgb'],
|
12 |
-
ratio_range=(1, 2)),
|
13 |
-
dict(
|
14 |
-
type='MinIoURandomCrop',
|
15 |
-
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
|
16 |
-
min_crop_size=0.3),
|
17 |
-
dict(type='Resize', img_scale=[(320, 320), (416, 416)], keep_ratio=True),
|
18 |
-
dict(type='RandomFlip', flip_ratio=0.5),
|
19 |
-
dict(type='Normalize', **img_norm_cfg),
|
20 |
-
dict(type='Pad', size_divisor=32),
|
21 |
-
dict(type='DefaultFormatBundle'),
|
22 |
-
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
|
23 |
-
]
|
24 |
-
test_pipeline = [
|
25 |
-
dict(type='LoadImageFromFile'),
|
26 |
-
dict(
|
27 |
-
type='MultiScaleFlipAug',
|
28 |
-
img_scale=(416, 416),
|
29 |
-
flip=False,
|
30 |
-
transforms=[
|
31 |
-
dict(type='Resize', keep_ratio=True),
|
32 |
-
dict(type='RandomFlip'),
|
33 |
-
dict(type='Normalize', **img_norm_cfg),
|
34 |
-
dict(type='Pad', size_divisor=32),
|
35 |
-
dict(type='ImageToTensor', keys=['img']),
|
36 |
-
dict(type='Collect', keys=['img'])
|
37 |
-
])
|
38 |
-
]
|
39 |
-
data = dict(
|
40 |
-
train=dict(pipeline=train_pipeline),
|
41 |
-
val=dict(pipeline=test_pipeline),
|
42 |
-
test=dict(pipeline=test_pipeline))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr48_512x512_20k_voc12aug.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
_base_ = './fcn_hr18_512x512_20k_voc12aug.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://msra/hrnetv2_w48',
|
4 |
-
backbone=dict(
|
5 |
-
extra=dict(
|
6 |
-
stage2=dict(num_channels=(48, 96)),
|
7 |
-
stage3=dict(num_channels=(48, 96, 192)),
|
8 |
-
stage4=dict(num_channels=(48, 96, 192, 384)))),
|
9 |
-
decode_head=dict(
|
10 |
-
in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384])))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr48_512x512_160k_ade20k.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
_base_ = './ocrnet_hr18_512x512_160k_ade20k.py'
|
2 |
-
norm_cfg = dict(type='SyncBN', requires_grad=True)
|
3 |
-
model = dict(
|
4 |
-
pretrained='open-mmlab://msra/hrnetv2_w48',
|
5 |
-
backbone=dict(
|
6 |
-
extra=dict(
|
7 |
-
stage2=dict(num_channels=(48, 96)),
|
8 |
-
stage3=dict(num_channels=(48, 96, 192)),
|
9 |
-
stage4=dict(num_channels=(48, 96, 192, 384)))),
|
10 |
-
decode_head=[
|
11 |
-
dict(
|
12 |
-
type='FCNHead',
|
13 |
-
in_channels=[48, 96, 192, 384],
|
14 |
-
channels=sum([48, 96, 192, 384]),
|
15 |
-
input_transform='resize_concat',
|
16 |
-
in_index=(0, 1, 2, 3),
|
17 |
-
kernel_size=1,
|
18 |
-
num_convs=1,
|
19 |
-
norm_cfg=norm_cfg,
|
20 |
-
concat_input=False,
|
21 |
-
dropout_ratio=-1,
|
22 |
-
num_classes=150,
|
23 |
-
align_corners=False,
|
24 |
-
loss_decode=dict(
|
25 |
-
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
|
26 |
-
dict(
|
27 |
-
type='OCRHead',
|
28 |
-
in_channels=[48, 96, 192, 384],
|
29 |
-
channels=512,
|
30 |
-
ocr_channels=256,
|
31 |
-
input_transform='resize_concat',
|
32 |
-
in_index=(0, 1, 2, 3),
|
33 |
-
norm_cfg=norm_cfg,
|
34 |
-
dropout_ratio=-1,
|
35 |
-
num_classes=150,
|
36 |
-
align_corners=False,
|
37 |
-
loss_decode=dict(
|
38 |
-
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
|
39 |
-
])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/ade20k.py',
|
3 |
-
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
|
4 |
-
]
|
5 |
-
model = dict(
|
6 |
-
decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/__init__.py
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
from .base import LoggerHook
|
3 |
-
from .dvclive import DvcliveLoggerHook
|
4 |
-
from .mlflow import MlflowLoggerHook
|
5 |
-
from .neptune import NeptuneLoggerHook
|
6 |
-
from .pavi import PaviLoggerHook
|
7 |
-
from .tensorboard import TensorboardLoggerHook
|
8 |
-
from .text import TextLoggerHook
|
9 |
-
from .wandb import WandbLoggerHook
|
10 |
-
|
11 |
-
__all__ = [
|
12 |
-
'LoggerHook', 'MlflowLoggerHook', 'PaviLoggerHook',
|
13 |
-
'TensorboardLoggerHook', 'TextLoggerHook', 'WandbLoggerHook',
|
14 |
-
'NeptuneLoggerHook', 'DvcliveLoggerHook'
|
15 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/gradio_scribble2image_interactive.py
DELETED
@@ -1,102 +0,0 @@
|
|
1 |
-
from share import *
|
2 |
-
import config
|
3 |
-
|
4 |
-
import cv2
|
5 |
-
import einops
|
6 |
-
import gradio as gr
|
7 |
-
import numpy as np
|
8 |
-
import torch
|
9 |
-
import random
|
10 |
-
|
11 |
-
from pytorch_lightning import seed_everything
|
12 |
-
from annotator.util import resize_image, HWC3
|
13 |
-
from cldm.model import create_model, load_state_dict
|
14 |
-
from cldm.ddim_hacked import DDIMSampler
|
15 |
-
|
16 |
-
|
17 |
-
model = create_model('./models/cldm_v15.yaml').cpu()
|
18 |
-
model.load_state_dict(load_state_dict('./models/control_sd15_scribble.pth', location='cuda'))
|
19 |
-
model = model.cuda()
|
20 |
-
ddim_sampler = DDIMSampler(model)
|
21 |
-
|
22 |
-
|
23 |
-
def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode, strength, scale, seed, eta):
|
24 |
-
with torch.no_grad():
|
25 |
-
img = resize_image(HWC3(input_image['mask'][:, :, 0]), image_resolution)
|
26 |
-
H, W, C = img.shape
|
27 |
-
|
28 |
-
detected_map = np.zeros_like(img, dtype=np.uint8)
|
29 |
-
detected_map[np.min(img, axis=2) > 127] = 255
|
30 |
-
|
31 |
-
control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
|
32 |
-
control = torch.stack([control for _ in range(num_samples)], dim=0)
|
33 |
-
control = einops.rearrange(control, 'b h w c -> b c h w').clone()
|
34 |
-
|
35 |
-
if seed == -1:
|
36 |
-
seed = random.randint(0, 65535)
|
37 |
-
seed_everything(seed)
|
38 |
-
|
39 |
-
if config.save_memory:
|
40 |
-
model.low_vram_shift(is_diffusing=False)
|
41 |
-
|
42 |
-
cond = {"c_concat": [control], "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)]}
|
43 |
-
un_cond = {"c_concat": None if guess_mode else [control], "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
|
44 |
-
shape = (4, H // 8, W // 8)
|
45 |
-
|
46 |
-
if config.save_memory:
|
47 |
-
model.low_vram_shift(is_diffusing=True)
|
48 |
-
|
49 |
-
model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else ([strength] * 13) # Magic number. IDK why. Perhaps because 0.825**12<0.01 but 0.826**12>0.01
|
50 |
-
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
|
51 |
-
shape, cond, verbose=False, eta=eta,
|
52 |
-
unconditional_guidance_scale=scale,
|
53 |
-
unconditional_conditioning=un_cond)
|
54 |
-
|
55 |
-
if config.save_memory:
|
56 |
-
model.low_vram_shift(is_diffusing=False)
|
57 |
-
|
58 |
-
x_samples = model.decode_first_stage(samples)
|
59 |
-
x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(np.uint8)
|
60 |
-
|
61 |
-
results = [x_samples[i] for i in range(num_samples)]
|
62 |
-
return [255 - detected_map] + results
|
63 |
-
|
64 |
-
|
65 |
-
def create_canvas(w, h):
|
66 |
-
return np.zeros(shape=(h, w, 3), dtype=np.uint8) + 255
|
67 |
-
|
68 |
-
|
69 |
-
block = gr.Blocks().queue()
|
70 |
-
with block:
|
71 |
-
with gr.Row():
|
72 |
-
gr.Markdown("## Control Stable Diffusion with Interactive Scribbles")
|
73 |
-
with gr.Row():
|
74 |
-
with gr.Column():
|
75 |
-
canvas_width = gr.Slider(label="Canvas Width", minimum=256, maximum=1024, value=512, step=1)
|
76 |
-
canvas_height = gr.Slider(label="Canvas Height", minimum=256, maximum=1024, value=512, step=1)
|
77 |
-
create_button = gr.Button(label="Start", value='Open drawing canvas!')
|
78 |
-
input_image = gr.Image(source='upload', type='numpy', tool='sketch')
|
79 |
-
gr.Markdown(value='Do not forget to change your brush width to make it thinner. (Gradio do not allow developers to set brush width so you need to do it manually.) '
|
80 |
-
'Just click on the small pencil icon in the upper right corner of the above block.')
|
81 |
-
create_button.click(fn=create_canvas, inputs=[canvas_width, canvas_height], outputs=[input_image])
|
82 |
-
prompt = gr.Textbox(label="Prompt")
|
83 |
-
run_button = gr.Button(label="Run")
|
84 |
-
with gr.Accordion("Advanced options", open=False):
|
85 |
-
num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
|
86 |
-
image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512, step=64)
|
87 |
-
strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01)
|
88 |
-
guess_mode = gr.Checkbox(label='Guess Mode', value=False)
|
89 |
-
ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
|
90 |
-
scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
|
91 |
-
seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True)
|
92 |
-
eta = gr.Number(label="eta (DDIM)", value=0.0)
|
93 |
-
a_prompt = gr.Textbox(label="Added Prompt", value='best quality, extremely detailed')
|
94 |
-
n_prompt = gr.Textbox(label="Negative Prompt",
|
95 |
-
value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality')
|
96 |
-
with gr.Column():
|
97 |
-
result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto')
|
98 |
-
ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode, strength, scale, seed, eta]
|
99 |
-
run_button.click(fn=process, inputs=ips, outputs=[result_gallery])
|
100 |
-
|
101 |
-
|
102 |
-
block.launch(server_name='0.0.0.0')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ArtyomKhyan/Detection/utils/utils.py
DELETED
@@ -1,1200 +0,0 @@
|
|
1 |
-
import glob
|
2 |
-
import math
|
3 |
-
import os
|
4 |
-
import random
|
5 |
-
import shutil
|
6 |
-
import subprocess
|
7 |
-
import time
|
8 |
-
from copy import copy
|
9 |
-
from pathlib import Path
|
10 |
-
from sys import platform
|
11 |
-
|
12 |
-
import cv2
|
13 |
-
import matplotlib
|
14 |
-
import matplotlib.pyplot as plt
|
15 |
-
import numpy as np
|
16 |
-
import torch
|
17 |
-
import torch.nn as nn
|
18 |
-
import torchvision
|
19 |
-
import yaml
|
20 |
-
from scipy.signal import butter, filtfilt
|
21 |
-
from tqdm import tqdm
|
22 |
-
|
23 |
-
from . import torch_utils # torch_utils, google_utils
|
24 |
-
|
25 |
-
# Set printoptions
|
26 |
-
torch.set_printoptions(linewidth=320, precision=5, profile='long')
|
27 |
-
np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
|
28 |
-
matplotlib.rc('font', **{'size': 11})
|
29 |
-
|
30 |
-
# Prevent OpenCV from multithreading (to use PyTorch DataLoader)
|
31 |
-
cv2.setNumThreads(0)
|
32 |
-
|
33 |
-
|
34 |
-
def init_seeds(seed=0):
|
35 |
-
random.seed(seed)
|
36 |
-
np.random.seed(seed)
|
37 |
-
torch_utils.init_seeds(seed=seed)
|
38 |
-
|
39 |
-
|
40 |
-
def check_git_status():
|
41 |
-
# Suggest 'git pull' if repo is out of date
|
42 |
-
if platform in ['linux', 'darwin']:
|
43 |
-
s = subprocess.check_output('if [ -d .git ]; then git fetch && git status -uno; fi', shell=True).decode('utf-8')
|
44 |
-
if 'Your branch is behind' in s:
|
45 |
-
print(s[s.find('Your branch is behind'):s.find('\n\n')] + '\n')
|
46 |
-
|
47 |
-
|
48 |
-
def check_img_size(img_size, s=32):
|
49 |
-
# Verify img_size is a multiple of stride s
|
50 |
-
new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
|
51 |
-
if new_size != img_size:
|
52 |
-
print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))
|
53 |
-
return new_size
|
54 |
-
|
55 |
-
|
56 |
-
def check_anchors(dataset, model, thr=4.0, imgsz=640):
|
57 |
-
# Check anchor fit to data, recompute if necessary
|
58 |
-
print('\nAnalyzing anchors... ', end='')
|
59 |
-
m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()
|
60 |
-
shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
|
61 |
-
scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale
|
62 |
-
wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh
|
63 |
-
|
64 |
-
def metric(k): # compute metric
|
65 |
-
r = wh[:, None] / k[None]
|
66 |
-
x = torch.min(r, 1. / r).min(2)[0] # ratio metric
|
67 |
-
best = x.max(1)[0] # best_x
|
68 |
-
return (best > 1. / thr).float().mean() # best possible recall
|
69 |
-
|
70 |
-
bpr = metric(m.anchor_grid.clone().cpu().view(-1, 2))
|
71 |
-
print('Best Possible Recall (BPR) = %.4f' % bpr, end='')
|
72 |
-
if bpr < 0.99: # threshold to recompute
|
73 |
-
print('. Attempting to generate improved anchors, please wait...' % bpr)
|
74 |
-
na = m.anchor_grid.numel() // 2 # number of anchors
|
75 |
-
new_anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
|
76 |
-
new_bpr = metric(new_anchors.reshape(-1, 2))
|
77 |
-
if new_bpr > bpr: # replace anchors
|
78 |
-
new_anchors = torch.tensor(new_anchors, device=m.anchors.device).type_as(m.anchors)
|
79 |
-
m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid) # for inference
|
80 |
-
m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss
|
81 |
-
check_anchor_order(m)
|
82 |
-
print('New anchors saved to model. Update model *.yaml to use these anchors in the future.')
|
83 |
-
else:
|
84 |
-
print('Original anchors better than new anchors. Proceeding with original anchors.')
|
85 |
-
print('') # newline
|
86 |
-
|
87 |
-
|
88 |
-
def check_anchor_order(m):
|
89 |
-
# Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
|
90 |
-
a = m.anchor_grid.prod(-1).view(-1) # anchor area
|
91 |
-
da = a[-1] - a[0] # delta a
|
92 |
-
ds = m.stride[-1] - m.stride[0] # delta s
|
93 |
-
if da.sign() != ds.sign(): # same order
|
94 |
-
m.anchors[:] = m.anchors.flip(0)
|
95 |
-
m.anchor_grid[:] = m.anchor_grid.flip(0)
|
96 |
-
|
97 |
-
|
98 |
-
def check_file(file):
|
99 |
-
# Searches for file if not found locally
|
100 |
-
if os.path.isfile(file):
|
101 |
-
return file
|
102 |
-
else:
|
103 |
-
files = glob.glob('./**/' + file, recursive=True) # find file
|
104 |
-
assert len(files), 'File Not Found: %s' % file # assert file was found
|
105 |
-
return files[0] # return first file if multiple found
|
106 |
-
|
107 |
-
|
108 |
-
def make_divisible(x, divisor):
|
109 |
-
# Returns x evenly divisble by divisor
|
110 |
-
return math.ceil(x / divisor) * divisor
|
111 |
-
|
112 |
-
|
113 |
-
def labels_to_class_weights(labels, nc=80):
|
114 |
-
# Get class weights (inverse frequency) from training labels
|
115 |
-
if labels[0] is None: # no labels loaded
|
116 |
-
return torch.Tensor()
|
117 |
-
|
118 |
-
labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
|
119 |
-
classes = labels[:, 0].astype(np.int) # labels = [class xywh]
|
120 |
-
weights = np.bincount(classes, minlength=nc) # occurences per class
|
121 |
-
|
122 |
-
# Prepend gridpoint count (for uCE trianing)
|
123 |
-
# gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
|
124 |
-
# weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
|
125 |
-
|
126 |
-
weights[weights == 0] = 1 # replace empty bins with 1
|
127 |
-
weights = 1 / weights # number of targets per class
|
128 |
-
weights /= weights.sum() # normalize
|
129 |
-
return torch.from_numpy(weights)
|
130 |
-
|
131 |
-
|
132 |
-
def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
|
133 |
-
# Produces image weights based on class mAPs
|
134 |
-
n = len(labels)
|
135 |
-
class_counts = np.array([np.bincount(labels[i][:, 0].astype(np.int), minlength=nc) for i in range(n)])
|
136 |
-
image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
|
137 |
-
# index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
|
138 |
-
return image_weights
|
139 |
-
|
140 |
-
|
141 |
-
def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
|
142 |
-
# https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
|
143 |
-
# a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
|
144 |
-
# b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
|
145 |
-
# x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
|
146 |
-
# x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
|
147 |
-
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
|
148 |
-
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
|
149 |
-
64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
|
150 |
-
return x
|
151 |
-
|
152 |
-
|
153 |
-
def xyxy2xywh(x):
|
154 |
-
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
|
155 |
-
y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
|
156 |
-
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
|
157 |
-
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
|
158 |
-
y[:, 2] = x[:, 2] - x[:, 0] # width
|
159 |
-
y[:, 3] = x[:, 3] - x[:, 1] # height
|
160 |
-
return y
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
|
166 |
-
# Rescale coords (xyxy) from img1_shape to img0_shape
|
167 |
-
if ratio_pad is None: # calculate from img0_shape
|
168 |
-
gain = max(img1_shape) / max(img0_shape) # gain = old / new
|
169 |
-
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
|
170 |
-
else:
|
171 |
-
gain = ratio_pad[0][0]
|
172 |
-
pad = ratio_pad[1]
|
173 |
-
|
174 |
-
coords[:, [0, 2]] -= pad[0] # x padding
|
175 |
-
coords[:, [1, 3]] -= pad[1] # y padding
|
176 |
-
coords[:, :4] /= gain
|
177 |
-
clip_coords(coords, img0_shape)
|
178 |
-
return coords
|
179 |
-
|
180 |
-
|
181 |
-
def clip_coords(boxes, img_shape):
|
182 |
-
# Clip bounding xyxy bounding boxes to image shape (height, width)
|
183 |
-
boxes[:, 0].clamp_(0, img_shape[1]) # x1
|
184 |
-
boxes[:, 1].clamp_(0, img_shape[0]) # y1
|
185 |
-
boxes[:, 2].clamp_(0, img_shape[1]) # x2
|
186 |
-
boxes[:, 3].clamp_(0, img_shape[0]) # y2
|
187 |
-
|
188 |
-
|
189 |
-
def ap_per_class(tp, conf, pred_cls, target_cls):
|
190 |
-
""" Compute the average precision, given the recall and precision curves.
|
191 |
-
Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
|
192 |
-
# Arguments
|
193 |
-
tp: True positives (nparray, nx1 or nx10).
|
194 |
-
conf: Objectness value from 0-1 (nparray).
|
195 |
-
pred_cls: Predicted object classes (nparray).
|
196 |
-
target_cls: True object classes (nparray).
|
197 |
-
# Returns
|
198 |
-
The average precision as computed in py-faster-rcnn.
|
199 |
-
"""
|
200 |
-
|
201 |
-
# Sort by objectness
|
202 |
-
i = np.argsort(-conf)
|
203 |
-
tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
|
204 |
-
|
205 |
-
# Find unique classes
|
206 |
-
unique_classes = np.unique(target_cls)
|
207 |
-
|
208 |
-
# Create Precision-Recall curve and compute AP for each class
|
209 |
-
pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898
|
210 |
-
s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)
|
211 |
-
ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s)
|
212 |
-
for ci, c in enumerate(unique_classes):
|
213 |
-
i = pred_cls == c
|
214 |
-
n_gt = (target_cls == c).sum() # Number of ground truth objects
|
215 |
-
n_p = i.sum() # Number of predicted objects
|
216 |
-
|
217 |
-
if n_p == 0 or n_gt == 0:
|
218 |
-
continue
|
219 |
-
else:
|
220 |
-
# Accumulate FPs and TPs
|
221 |
-
fpc = (1 - tp[i]).cumsum(0)
|
222 |
-
tpc = tp[i].cumsum(0)
|
223 |
-
|
224 |
-
# Recall
|
225 |
-
recall = tpc / (n_gt + 1e-16) # recall curve
|
226 |
-
r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases
|
227 |
-
|
228 |
-
# Precision
|
229 |
-
precision = tpc / (tpc + fpc) # precision curve
|
230 |
-
p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score
|
231 |
-
|
232 |
-
# AP from recall-precision curve
|
233 |
-
for j in range(tp.shape[1]):
|
234 |
-
ap[ci, j] = compute_ap(recall[:, j], precision[:, j])
|
235 |
-
|
236 |
-
# Plot
|
237 |
-
# fig, ax = plt.subplots(1, 1, figsize=(5, 5))
|
238 |
-
# ax.plot(recall, precision)
|
239 |
-
# ax.set_xlabel('Recall')
|
240 |
-
# ax.set_ylabel('Precision')
|
241 |
-
# ax.set_xlim(0, 1.01)
|
242 |
-
# ax.set_ylim(0, 1.01)
|
243 |
-
# fig.tight_layout()
|
244 |
-
# fig.savefig('PR_curve.png', dpi=300)
|
245 |
-
|
246 |
-
# Compute F1 score (harmonic mean of precision and recall)
|
247 |
-
f1 = 2 * p * r / (p + r + 1e-16)
|
248 |
-
|
249 |
-
return p, r, ap, f1, unique_classes.astype('int32')
|
250 |
-
|
251 |
-
|
252 |
-
def compute_ap(recall, precision):
|
253 |
-
""" Compute the average precision, given the recall and precision curves.
|
254 |
-
Source: https://github.com/rbgirshick/py-faster-rcnn.
|
255 |
-
# Arguments
|
256 |
-
recall: The recall curve (list).
|
257 |
-
precision: The precision curve (list).
|
258 |
-
# Returns
|
259 |
-
The average precision as computed in py-faster-rcnn.
|
260 |
-
"""
|
261 |
-
|
262 |
-
# Append sentinel values to beginning and end
|
263 |
-
mrec = np.concatenate(([0.], recall, [min(recall[-1] + 1E-3, 1.)]))
|
264 |
-
mpre = np.concatenate(([0.], precision, [0.]))
|
265 |
-
|
266 |
-
# Compute the precision envelope
|
267 |
-
mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
|
268 |
-
|
269 |
-
# Integrate area under curve
|
270 |
-
method = 'interp' # methods: 'continuous', 'interp'
|
271 |
-
if method == 'interp':
|
272 |
-
x = np.linspace(0, 1, 101) # 101-point interp (COCO)
|
273 |
-
ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
|
274 |
-
else: # 'continuous'
|
275 |
-
i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
|
276 |
-
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
|
277 |
-
|
278 |
-
return ap
|
279 |
-
|
280 |
-
|
281 |
-
def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False):
|
282 |
-
# Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
|
283 |
-
box2 = box2.t()
|
284 |
-
|
285 |
-
# Get the coordinates of bounding boxes
|
286 |
-
if x1y1x2y2: # x1, y1, x2, y2 = box1
|
287 |
-
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
|
288 |
-
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
|
289 |
-
else: # transform from xywh to xyxy
|
290 |
-
b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
|
291 |
-
b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
|
292 |
-
b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
|
293 |
-
b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
|
294 |
-
|
295 |
-
# Intersection area
|
296 |
-
inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
|
297 |
-
(torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
|
298 |
-
|
299 |
-
# Union Area
|
300 |
-
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1
|
301 |
-
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1
|
302 |
-
union = (w1 * h1 + 1e-16) + w2 * h2 - inter
|
303 |
-
|
304 |
-
iou = inter / union # iou
|
305 |
-
if GIoU or DIoU or CIoU:
|
306 |
-
cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
|
307 |
-
ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
|
308 |
-
if GIoU: # Generalized IoU https://arxiv.org/pdf/1902.09630.pdf
|
309 |
-
c_area = cw * ch + 1e-16 # convex area
|
310 |
-
return iou - (c_area - union) / c_area # GIoU
|
311 |
-
if DIoU or CIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
|
312 |
-
# convex diagonal squared
|
313 |
-
c2 = cw ** 2 + ch ** 2 + 1e-16
|
314 |
-
# centerpoint distance squared
|
315 |
-
rho2 = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4 + ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4
|
316 |
-
if DIoU:
|
317 |
-
return iou - rho2 / c2 # DIoU
|
318 |
-
elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
|
319 |
-
v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
|
320 |
-
with torch.no_grad():
|
321 |
-
alpha = v / (1 - iou + v)
|
322 |
-
return iou - (rho2 / c2 + v * alpha) # CIoU
|
323 |
-
|
324 |
-
return iou
|
325 |
-
|
326 |
-
|
327 |
-
def box_iou(box1, box2):
|
328 |
-
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
|
329 |
-
"""
|
330 |
-
Return intersection-over-union (Jaccard index) of boxes.
|
331 |
-
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
|
332 |
-
Arguments:
|
333 |
-
box1 (Tensor[N, 4])
|
334 |
-
box2 (Tensor[M, 4])
|
335 |
-
Returns:
|
336 |
-
iou (Tensor[N, M]): the NxM matrix containing the pairwise
|
337 |
-
IoU values for every element in boxes1 and boxes2
|
338 |
-
"""
|
339 |
-
|
340 |
-
def box_area(box):
|
341 |
-
# box = 4xn
|
342 |
-
return (box[2] - box[0]) * (box[3] - box[1])
|
343 |
-
|
344 |
-
area1 = box_area(box1.t())
|
345 |
-
area2 = box_area(box2.t())
|
346 |
-
|
347 |
-
# inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
|
348 |
-
inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
|
349 |
-
return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
|
350 |
-
|
351 |
-
|
352 |
-
def wh_iou(wh1, wh2):
|
353 |
-
# Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
|
354 |
-
wh1 = wh1[:, None] # [N,1,2]
|
355 |
-
wh2 = wh2[None] # [1,M,2]
|
356 |
-
inter = torch.min(wh1, wh2).prod(2) # [N,M]
|
357 |
-
return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
|
358 |
-
|
359 |
-
|
360 |
-
class FocalLoss(nn.Module):
|
361 |
-
# Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
|
362 |
-
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
|
363 |
-
super(FocalLoss, self).__init__()
|
364 |
-
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
|
365 |
-
self.gamma = gamma
|
366 |
-
self.alpha = alpha
|
367 |
-
self.reduction = loss_fcn.reduction
|
368 |
-
self.loss_fcn.reduction = 'none' # required to apply FL to each element
|
369 |
-
|
370 |
-
def forward(self, pred, true):
|
371 |
-
loss = self.loss_fcn(pred, true)
|
372 |
-
# p_t = torch.exp(-loss)
|
373 |
-
# loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
|
374 |
-
|
375 |
-
# TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
|
376 |
-
pred_prob = torch.sigmoid(pred) # prob from logits
|
377 |
-
p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
|
378 |
-
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
|
379 |
-
modulating_factor = (1.0 - p_t) ** self.gamma
|
380 |
-
loss *= alpha_factor * modulating_factor
|
381 |
-
|
382 |
-
if self.reduction == 'mean':
|
383 |
-
return loss.mean()
|
384 |
-
elif self.reduction == 'sum':
|
385 |
-
return loss.sum()
|
386 |
-
else: # 'none'
|
387 |
-
return loss
|
388 |
-
|
389 |
-
|
390 |
-
def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
|
391 |
-
# return positive, negative label smoothing BCE targets
|
392 |
-
return 1.0 - 0.5 * eps, 0.5 * eps
|
393 |
-
|
394 |
-
|
395 |
-
class BCEBlurWithLogitsLoss(nn.Module):
|
396 |
-
# BCEwithLogitLoss() with reduced missing label effects.
|
397 |
-
def __init__(self, alpha=0.05):
|
398 |
-
super(BCEBlurWithLogitsLoss, self).__init__()
|
399 |
-
self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
|
400 |
-
self.alpha = alpha
|
401 |
-
|
402 |
-
def forward(self, pred, true):
|
403 |
-
loss = self.loss_fcn(pred, true)
|
404 |
-
pred = torch.sigmoid(pred) # prob from logits
|
405 |
-
dx = pred - true # reduce only missing label effects
|
406 |
-
# dx = (pred - true).abs() # reduce missing label and false label effects
|
407 |
-
alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
|
408 |
-
loss *= alpha_factor
|
409 |
-
return loss.mean()
|
410 |
-
|
411 |
-
|
412 |
-
def compute_loss(p, targets, model): # predictions, targets, model
|
413 |
-
ft = torch.cuda.FloatTensor if p[0].is_cuda else torch.Tensor
|
414 |
-
lcls, lbox, lobj = ft([0]), ft([0]), ft([0])
|
415 |
-
tcls, tbox, indices, anchors = build_targets(p, targets, model) # targets
|
416 |
-
h = model.hyp # hyperparameters
|
417 |
-
red = 'mean' # Loss reduction (sum or mean)
|
418 |
-
|
419 |
-
# Define criteria
|
420 |
-
BCEcls = nn.BCEWithLogitsLoss(pos_weight=ft([h['cls_pw']]), reduction=red)
|
421 |
-
BCEobj = nn.BCEWithLogitsLoss(pos_weight=ft([h['obj_pw']]), reduction=red)
|
422 |
-
|
423 |
-
# class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
|
424 |
-
cp, cn = smooth_BCE(eps=0.0)
|
425 |
-
|
426 |
-
# focal loss
|
427 |
-
g = h['fl_gamma'] # focal loss gamma
|
428 |
-
if g > 0:
|
429 |
-
BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
|
430 |
-
|
431 |
-
# per output
|
432 |
-
nt = 0 # number of targets
|
433 |
-
np = len(p) # number of outputs
|
434 |
-
balance = [1.0, 1.0, 1.0]
|
435 |
-
for i, pi in enumerate(p): # layer index, layer predictions
|
436 |
-
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
|
437 |
-
tobj = torch.zeros_like(pi[..., 0]) # target obj
|
438 |
-
|
439 |
-
nb = b.shape[0] # number of targets
|
440 |
-
if nb:
|
441 |
-
nt += nb # cumulative targets
|
442 |
-
ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
|
443 |
-
|
444 |
-
# GIoU
|
445 |
-
pxy = ps[:, :2].sigmoid() * 2. - 0.5
|
446 |
-
pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
|
447 |
-
pbox = torch.cat((pxy, pwh), 1) # predicted box
|
448 |
-
giou = bbox_iou(pbox.t(), tbox[i], x1y1x2y2=False, GIoU=True) # giou(prediction, target)
|
449 |
-
lbox += (1.0 - giou).sum() if red == 'sum' else (1.0 - giou).mean() # giou loss
|
450 |
-
|
451 |
-
# Obj
|
452 |
-
tobj[b, a, gj, gi] = (1.0 - model.gr) + model.gr * giou.detach().clamp(0).type(tobj.dtype) # giou ratio
|
453 |
-
|
454 |
-
# Class
|
455 |
-
if model.nc > 1: # cls loss (only if multiple classes)
|
456 |
-
t = torch.full_like(ps[:, 5:], cn) # targets
|
457 |
-
t[range(nb), tcls[i]] = cp
|
458 |
-
lcls += BCEcls(ps[:, 5:], t) # BCE
|
459 |
-
|
460 |
-
# Append targets to text file
|
461 |
-
# with open('targets.txt', 'a') as file:
|
462 |
-
# [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
|
463 |
-
|
464 |
-
lobj += BCEobj(pi[..., 4], tobj) * balance[i] # obj loss
|
465 |
-
|
466 |
-
s = 3 / np # output count scaling
|
467 |
-
lbox *= h['giou'] * s
|
468 |
-
lobj *= h['obj'] * s
|
469 |
-
lcls *= h['cls'] * s
|
470 |
-
bs = tobj.shape[0] # batch size
|
471 |
-
if red == 'sum':
|
472 |
-
g = 3.0 # loss gain
|
473 |
-
lobj *= g / bs
|
474 |
-
if nt:
|
475 |
-
lcls *= g / nt / model.nc
|
476 |
-
lbox *= g / nt
|
477 |
-
|
478 |
-
loss = lbox + lobj + lcls
|
479 |
-
return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
|
480 |
-
|
481 |
-
|
482 |
-
def build_targets(p, targets, model):
|
483 |
-
# Build targets for compute_loss(), input targets(image,class,x,y,w,h)
|
484 |
-
det = model.module.model[-1] if type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) \
|
485 |
-
else model.model[-1] # Detect() module
|
486 |
-
na, nt = det.na, targets.shape[0] # number of anchors, targets
|
487 |
-
tcls, tbox, indices, anch = [], [], [], []
|
488 |
-
gain = torch.ones(6, device=targets.device) # normalized to gridspace gain
|
489 |
-
off = torch.tensor([[1, 0], [0, 1], [-1, 0], [0, -1]], device=targets.device).float() # overlap offsets
|
490 |
-
at = torch.arange(na).view(na, 1).repeat(1, nt) # anchor tensor, same as .repeat_interleave(nt)
|
491 |
-
|
492 |
-
style = 'rect4'
|
493 |
-
for i in range(det.nl):
|
494 |
-
anchors = det.anchors[i]
|
495 |
-
gain[2:] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
|
496 |
-
|
497 |
-
# Match targets to anchors
|
498 |
-
a, t, offsets = [], targets * gain, 0
|
499 |
-
if nt:
|
500 |
-
r = t[None, :, 4:6] / anchors[:, None] # wh ratio
|
501 |
-
j = torch.max(r, 1. / r).max(2)[0] < model.hyp['anchor_t'] # compare
|
502 |
-
# j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n) = wh_iou(anchors(3,2), gwh(n,2))
|
503 |
-
a, t = at[j], t.repeat(na, 1, 1)[j] # filter
|
504 |
-
|
505 |
-
# overlaps
|
506 |
-
g = 0.5 # offset
|
507 |
-
gxy = t[:, 2:4] # grid xy
|
508 |
-
z = torch.zeros_like(gxy)
|
509 |
-
if style == 'rect2':
|
510 |
-
j, k = ((gxy % 1. < g) & (gxy > 1.)).T
|
511 |
-
a, t = torch.cat((a, a[j], a[k]), 0), torch.cat((t, t[j], t[k]), 0)
|
512 |
-
offsets = torch.cat((z, z[j] + off[0], z[k] + off[1]), 0) * g
|
513 |
-
elif style == 'rect4':
|
514 |
-
j, k = ((gxy % 1. < g) & (gxy > 1.)).T
|
515 |
-
l, m = ((gxy % 1. > (1 - g)) & (gxy < (gain[[2, 3]] - 1.))).T
|
516 |
-
a, t = torch.cat((a, a[j], a[k], a[l], a[m]), 0), torch.cat((t, t[j], t[k], t[l], t[m]), 0)
|
517 |
-
offsets = torch.cat((z, z[j] + off[0], z[k] + off[1], z[l] + off[2], z[m] + off[3]), 0) * g
|
518 |
-
|
519 |
-
# Define
|
520 |
-
b, c = t[:, :2].long().T # image, class
|
521 |
-
gxy = t[:, 2:4] # grid xy
|
522 |
-
gwh = t[:, 4:6] # grid wh
|
523 |
-
gij = (gxy - offsets).long()
|
524 |
-
gi, gj = gij.T # grid xy indices
|
525 |
-
|
526 |
-
# Append
|
527 |
-
indices.append((b, a, gj, gi)) # image, anchor, grid indices
|
528 |
-
tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
|
529 |
-
anch.append(anchors[a]) # anchors
|
530 |
-
tcls.append(c) # class
|
531 |
-
|
532 |
-
return tcls, tbox, indices, anch
|
533 |
-
|
534 |
-
|
535 |
-
def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, merge=False, classes=None, agnostic=False):
|
536 |
-
"""Performs Non-Maximum Suppression (NMS) on inference results
|
537 |
-
|
538 |
-
Returns:
|
539 |
-
detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
|
540 |
-
"""
|
541 |
-
if prediction.dtype is torch.float16:
|
542 |
-
prediction = prediction.float() # to FP32
|
543 |
-
|
544 |
-
nc = prediction[0].shape[1] - 5 # number of classes
|
545 |
-
xc = prediction[..., 4] > conf_thres # candidates
|
546 |
-
|
547 |
-
# Settings
|
548 |
-
min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
|
549 |
-
max_det = 300 # maximum number of detections per image
|
550 |
-
time_limit = 10.0 # seconds to quit after
|
551 |
-
redundant = True # require redundant detections
|
552 |
-
multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
|
553 |
-
|
554 |
-
t = time.time()
|
555 |
-
output = [None] * prediction.shape[0]
|
556 |
-
for xi, x in enumerate(prediction): # image index, image inference
|
557 |
-
# Apply constraints
|
558 |
-
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
|
559 |
-
x = x[xc[xi]] # confidence
|
560 |
-
|
561 |
-
# If none remain process next image
|
562 |
-
if not x.shape[0]:
|
563 |
-
continue
|
564 |
-
|
565 |
-
# Compute conf
|
566 |
-
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
|
567 |
-
|
568 |
-
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
|
569 |
-
box = xywh2xyxy(x[:, :4])
|
570 |
-
|
571 |
-
# Detections matrix nx6 (xyxy, conf, cls)
|
572 |
-
if multi_label:
|
573 |
-
i, j = (x[:, 5:] > conf_thres).nonzero().t()
|
574 |
-
x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
|
575 |
-
else: # best class only
|
576 |
-
conf, j = x[:, 5:].max(1, keepdim=True)
|
577 |
-
x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
|
578 |
-
|
579 |
-
# Filter by class
|
580 |
-
if classes:
|
581 |
-
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
|
582 |
-
|
583 |
-
# Apply finite constraint
|
584 |
-
# if not torch.isfinite(x).all():
|
585 |
-
# x = x[torch.isfinite(x).all(1)]
|
586 |
-
|
587 |
-
# If none remain process next image
|
588 |
-
n = x.shape[0] # number of boxes
|
589 |
-
if not n:
|
590 |
-
continue
|
591 |
-
|
592 |
-
# Sort by confidence
|
593 |
-
# x = x[x[:, 4].argsort(descending=True)]
|
594 |
-
|
595 |
-
# Batched NMS
|
596 |
-
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
|
597 |
-
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
|
598 |
-
i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)
|
599 |
-
if i.shape[0] > max_det: # limit detections
|
600 |
-
i = i[:max_det]
|
601 |
-
if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
|
602 |
-
try: # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
|
603 |
-
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
|
604 |
-
weights = iou * scores[None] # box weights
|
605 |
-
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
|
606 |
-
if redundant:
|
607 |
-
i = i[iou.sum(1) > 1] # require redundancy
|
608 |
-
except: # possible CUDA error https://github.com/ultralytics/yolov3/issues/1139
|
609 |
-
print(x, i, x.shape, i.shape)
|
610 |
-
pass
|
611 |
-
|
612 |
-
output[xi] = x[i]
|
613 |
-
if (time.time() - t) > time_limit:
|
614 |
-
break # time limit exceeded
|
615 |
-
|
616 |
-
return output
|
617 |
-
|
618 |
-
|
619 |
-
def strip_optimizer(f='weights/best.pt'): # from utils.utils import *; strip_optimizer()
|
620 |
-
# Strip optimizer from *.pt files for lighter files (reduced by 1/2 size)
|
621 |
-
x = torch.load(f, map_location=torch.device('cpu'))
|
622 |
-
x['optimizer'] = None
|
623 |
-
x['model'].half() # to FP16
|
624 |
-
torch.save(x, f)
|
625 |
-
print('Optimizer stripped from %s' % f)
|
626 |
-
|
627 |
-
|
628 |
-
def create_pretrained(f='weights/best.pt', s='weights/pretrained.pt'): # from utils.utils import *; create_pretrained()
|
629 |
-
# create pretrained checkpoint 's' from 'f' (create_pretrained(x, x) for x in glob.glob('./*.pt'))
|
630 |
-
device = torch.device('cpu')
|
631 |
-
x = torch.load(s, map_location=device)
|
632 |
-
|
633 |
-
x['optimizer'] = None
|
634 |
-
x['training_results'] = None
|
635 |
-
x['epoch'] = -1
|
636 |
-
x['model'].half() # to FP16
|
637 |
-
for p in x['model'].parameters():
|
638 |
-
p.requires_grad = True
|
639 |
-
torch.save(x, s)
|
640 |
-
print('%s saved as pretrained checkpoint %s' % (f, s))
|
641 |
-
|
642 |
-
|
643 |
-
def coco_class_count(path='../coco/labels/train2014/'):
|
644 |
-
# Histogram of occurrences per class
|
645 |
-
nc = 80 # number classes
|
646 |
-
x = np.zeros(nc, dtype='int32')
|
647 |
-
files = sorted(glob.glob('%s/*.*' % path))
|
648 |
-
for i, file in enumerate(files):
|
649 |
-
labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
|
650 |
-
x += np.bincount(labels[:, 0].astype('int32'), minlength=nc)
|
651 |
-
print(i, len(files))
|
652 |
-
|
653 |
-
|
654 |
-
def coco_only_people(path='../coco/labels/train2017/'): # from utils.utils import *; coco_only_people()
|
655 |
-
# Find images with only people
|
656 |
-
files = sorted(glob.glob('%s/*.*' % path))
|
657 |
-
for i, file in enumerate(files):
|
658 |
-
labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
|
659 |
-
if all(labels[:, 0] == 0):
|
660 |
-
print(labels.shape[0], file)
|
661 |
-
|
662 |
-
|
663 |
-
def crop_images_random(path='../images/', scale=0.50): # from utils.utils import *; crop_images_random()
|
664 |
-
# crops images into random squares up to scale fraction
|
665 |
-
# WARNING: overwrites images!
|
666 |
-
for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
|
667 |
-
img = cv2.imread(file) # BGR
|
668 |
-
if img is not None:
|
669 |
-
h, w = img.shape[:2]
|
670 |
-
|
671 |
-
# create random mask
|
672 |
-
a = 30 # minimum size (pixels)
|
673 |
-
mask_h = random.randint(a, int(max(a, h * scale))) # mask height
|
674 |
-
mask_w = mask_h # mask width
|
675 |
-
|
676 |
-
# box
|
677 |
-
xmin = max(0, random.randint(0, w) - mask_w // 2)
|
678 |
-
ymin = max(0, random.randint(0, h) - mask_h // 2)
|
679 |
-
xmax = min(w, xmin + mask_w)
|
680 |
-
ymax = min(h, ymin + mask_h)
|
681 |
-
|
682 |
-
# apply random color mask
|
683 |
-
cv2.imwrite(file, img[ymin:ymax, xmin:xmax])
|
684 |
-
|
685 |
-
|
686 |
-
def coco_single_class_labels(path='../coco/labels/train2014/', label_class=43):
|
687 |
-
# Makes single-class coco datasets. from utils.utils import *; coco_single_class_labels()
|
688 |
-
if os.path.exists('new/'):
|
689 |
-
shutil.rmtree('new/') # delete output folder
|
690 |
-
os.makedirs('new/') # make new output folder
|
691 |
-
os.makedirs('new/labels/')
|
692 |
-
os.makedirs('new/images/')
|
693 |
-
for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
|
694 |
-
with open(file, 'r') as f:
|
695 |
-
labels = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
|
696 |
-
i = labels[:, 0] == label_class
|
697 |
-
if any(i):
|
698 |
-
img_file = file.replace('labels', 'images').replace('txt', 'jpg')
|
699 |
-
labels[:, 0] = 0 # reset class to 0
|
700 |
-
with open('new/images.txt', 'a') as f: # add image to dataset list
|
701 |
-
f.write(img_file + '\n')
|
702 |
-
with open('new/labels/' + Path(file).name, 'a') as f: # write label
|
703 |
-
for l in labels[i]:
|
704 |
-
f.write('%g %.6f %.6f %.6f %.6f\n' % tuple(l))
|
705 |
-
shutil.copyfile(src=img_file, dst='new/images/' + Path(file).name.replace('txt', 'jpg')) # copy images
|
706 |
-
|
707 |
-
|
708 |
-
def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
|
709 |
-
""" Creates kmeans-evolved anchors from training dataset
|
710 |
-
|
711 |
-
Arguments:
|
712 |
-
path: path to dataset *.yaml, or a loaded dataset
|
713 |
-
n: number of anchors
|
714 |
-
img_size: image size used for training
|
715 |
-
thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
|
716 |
-
gen: generations to evolve anchors using genetic algorithm
|
717 |
-
|
718 |
-
Return:
|
719 |
-
k: kmeans evolved anchors
|
720 |
-
|
721 |
-
Usage:
|
722 |
-
from utils.utils import *; _ = kmean_anchors()
|
723 |
-
"""
|
724 |
-
thr = 1. / thr
|
725 |
-
|
726 |
-
def metric(k, wh): # compute metrics
|
727 |
-
r = wh[:, None] / k[None]
|
728 |
-
x = torch.min(r, 1. / r).min(2)[0] # ratio metric
|
729 |
-
# x = wh_iou(wh, torch.tensor(k)) # iou metric
|
730 |
-
return x, x.max(1)[0] # x, best_x
|
731 |
-
|
732 |
-
def fitness(k): # mutation fitness
|
733 |
-
_, best = metric(torch.tensor(k, dtype=torch.float32), wh)
|
734 |
-
return (best * (best > thr).float()).mean() # fitness
|
735 |
-
|
736 |
-
def print_results(k):
|
737 |
-
k = k[np.argsort(k.prod(1))] # sort small to large
|
738 |
-
x, best = metric(k, wh0)
|
739 |
-
bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr
|
740 |
-
print('thr=%.2f: %.4f best possible recall, %.2f anchors past thr' % (thr, bpr, aat))
|
741 |
-
print('n=%g, img_size=%s, metric_all=%.3f/%.3f-mean/best, past_thr=%.3f-mean: ' %
|
742 |
-
(n, img_size, x.mean(), best.mean(), x[x > thr].mean()), end='')
|
743 |
-
for i, x in enumerate(k):
|
744 |
-
print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
|
745 |
-
return k
|
746 |
-
|
747 |
-
if isinstance(path, str): # *.yaml file
|
748 |
-
with open(path) as f:
|
749 |
-
data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
|
750 |
-
from utils.datasets import LoadImagesAndLabels
|
751 |
-
dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
|
752 |
-
else:
|
753 |
-
dataset = path # dataset
|
754 |
-
|
755 |
-
# Get label wh
|
756 |
-
shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
|
757 |
-
wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh
|
758 |
-
|
759 |
-
# Filter
|
760 |
-
i = (wh0 < 3.0).any(1).sum()
|
761 |
-
if i:
|
762 |
-
print('WARNING: Extremely small objects found. '
|
763 |
-
'%g of %g labels are < 3 pixels in width or height.' % (i, len(wh0)))
|
764 |
-
wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels
|
765 |
-
|
766 |
-
# Kmeans calculation
|
767 |
-
from scipy.cluster.vq import kmeans
|
768 |
-
print('Running kmeans for %g anchors on %g points...' % (n, len(wh)))
|
769 |
-
s = wh.std(0) # sigmas for whitening
|
770 |
-
k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
|
771 |
-
k *= s
|
772 |
-
wh = torch.tensor(wh, dtype=torch.float32) # filtered
|
773 |
-
wh0 = torch.tensor(wh0, dtype=torch.float32) # unflitered
|
774 |
-
k = print_results(k)
|
775 |
-
|
776 |
-
# Plot
|
777 |
-
# k, d = [None] * 20, [None] * 20
|
778 |
-
# for i in tqdm(range(1, 21)):
|
779 |
-
# k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
|
780 |
-
# fig, ax = plt.subplots(1, 2, figsize=(14, 7))
|
781 |
-
# ax = ax.ravel()
|
782 |
-
# ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
|
783 |
-
# fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
|
784 |
-
# ax[0].hist(wh[wh[:, 0]<100, 0],400)
|
785 |
-
# ax[1].hist(wh[wh[:, 1]<100, 1],400)
|
786 |
-
# fig.tight_layout()
|
787 |
-
# fig.savefig('wh.png', dpi=200)
|
788 |
-
|
789 |
-
# Evolve
|
790 |
-
npr = np.random
|
791 |
-
f, sh, mp, s = fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
|
792 |
-
pbar = tqdm(range(gen), desc='Evolving anchors with Genetic Algorithm') # progress bar
|
793 |
-
for _ in pbar:
|
794 |
-
v = np.ones(sh)
|
795 |
-
while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
|
796 |
-
v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)
|
797 |
-
kg = (k.copy() * v).clip(min=2.0)
|
798 |
-
fg = fitness(kg)
|
799 |
-
if fg > f:
|
800 |
-
f, k = fg, kg.copy()
|
801 |
-
pbar.desc = 'Evolving anchors with Genetic Algorithm: fitness = %.4f' % f
|
802 |
-
if verbose:
|
803 |
-
print_results(k)
|
804 |
-
|
805 |
-
return print_results(k)
|
806 |
-
|
807 |
-
|
808 |
-
def print_mutation(hyp, results, bucket=''):
|
809 |
-
# Print mutation results to evolve.txt (for use with train.py --evolve)
|
810 |
-
a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
|
811 |
-
b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
|
812 |
-
c = '%10.4g' * len(results) % results # results (P, R, mAP, F1, test_loss)
|
813 |
-
print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
|
814 |
-
|
815 |
-
if bucket:
|
816 |
-
os.system('gsutil cp gs://%s/evolve.txt .' % bucket) # download evolve.txt
|
817 |
-
|
818 |
-
with open('evolve.txt', 'a') as f: # append result
|
819 |
-
f.write(c + b + '\n')
|
820 |
-
x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
|
821 |
-
np.savetxt('evolve.txt', x[np.argsort(-fitness(x))], '%10.3g') # save sort by fitness
|
822 |
-
|
823 |
-
if bucket:
|
824 |
-
os.system('gsutil cp evolve.txt gs://%s' % bucket) # upload evolve.txt
|
825 |
-
|
826 |
-
|
827 |
-
def apply_classifier(x, model, img, im0):
|
828 |
-
# applies a second stage classifier to yolo outputs
|
829 |
-
im0 = [im0] if isinstance(im0, np.ndarray) else im0
|
830 |
-
for i, d in enumerate(x): # per image
|
831 |
-
if d is not None and len(d):
|
832 |
-
d = d.clone()
|
833 |
-
|
834 |
-
# Reshape and pad cutouts
|
835 |
-
b = xyxy2xywh(d[:, :4]) # boxes
|
836 |
-
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
|
837 |
-
b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
|
838 |
-
d[:, :4] = xywh2xyxy(b).long()
|
839 |
-
|
840 |
-
# Rescale boxes from img_size to im0 size
|
841 |
-
scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
|
842 |
-
|
843 |
-
# Classes
|
844 |
-
pred_cls1 = d[:, 5].long()
|
845 |
-
ims = []
|
846 |
-
for j, a in enumerate(d): # per item
|
847 |
-
cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
|
848 |
-
im = cv2.resize(cutout, (224, 224)) # BGR
|
849 |
-
# cv2.imwrite('test%i.jpg' % j, cutout)
|
850 |
-
|
851 |
-
im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
|
852 |
-
im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
|
853 |
-
im /= 255.0 # 0 - 255 to 0.0 - 1.0
|
854 |
-
ims.append(im)
|
855 |
-
|
856 |
-
pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
|
857 |
-
x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
|
858 |
-
|
859 |
-
return x
|
860 |
-
|
861 |
-
|
862 |
-
def fitness(x):
|
863 |
-
# Returns fitness (for use with results.txt or evolve.txt)
|
864 |
-
w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, [email protected], [email protected]:0.95]
|
865 |
-
return (x[:, :4] * w).sum(1)
|
866 |
-
|
867 |
-
|
868 |
-
def output_to_target(output, width, height):
|
869 |
-
"""
|
870 |
-
Convert a YOLO model output to target format
|
871 |
-
[batch_id, class_id, x, y, w, h, conf]
|
872 |
-
"""
|
873 |
-
if isinstance(output, torch.Tensor):
|
874 |
-
output = output.cpu().numpy()
|
875 |
-
|
876 |
-
targets = []
|
877 |
-
for i, o in enumerate(output):
|
878 |
-
if o is not None:
|
879 |
-
for pred in o:
|
880 |
-
box = pred[:4]
|
881 |
-
w = (box[2] - box[0]) / width
|
882 |
-
h = (box[3] - box[1]) / height
|
883 |
-
x = box[0] / width + w / 2
|
884 |
-
y = box[1] / height + h / 2
|
885 |
-
conf = pred[4]
|
886 |
-
cls = int(pred[5])
|
887 |
-
|
888 |
-
targets.append([i, cls, x, y, w, h, conf])
|
889 |
-
|
890 |
-
return np.array(targets)
|
891 |
-
|
892 |
-
|
893 |
-
# Plotting functions ---------------------------------------------------------------------------------------------------
|
894 |
-
def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
|
895 |
-
# https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
|
896 |
-
def butter_lowpass(cutoff, fs, order):
|
897 |
-
nyq = 0.5 * fs
|
898 |
-
normal_cutoff = cutoff / nyq
|
899 |
-
b, a = butter(order, normal_cutoff, btype='low', analog=False)
|
900 |
-
return b, a
|
901 |
-
|
902 |
-
b, a = butter_lowpass(cutoff, fs, order=order)
|
903 |
-
return filtfilt(b, a, data) # forward-backward filter
|
904 |
-
|
905 |
-
|
906 |
-
def plot_one_box(x, img, color=None, label=None, line_thickness=None):
|
907 |
-
# Plots one bounding box on image img
|
908 |
-
tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
|
909 |
-
color = color or [random.randint(0, 255) for _ in range(3)]
|
910 |
-
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
|
911 |
-
cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
|
912 |
-
if label:
|
913 |
-
tf = max(tl - 1, 1) # font thickness
|
914 |
-
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
|
915 |
-
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
|
916 |
-
cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
|
917 |
-
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
|
918 |
-
|
919 |
-
|
920 |
-
def plot_wh_methods(): # from utils.utils import *; plot_wh_methods()
|
921 |
-
# Compares the two methods for width-height anchor multiplication
|
922 |
-
# https://github.com/ultralytics/yolov3/issues/168
|
923 |
-
x = np.arange(-4.0, 4.0, .1)
|
924 |
-
ya = np.exp(x)
|
925 |
-
yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2
|
926 |
-
|
927 |
-
fig = plt.figure(figsize=(6, 3), dpi=150)
|
928 |
-
plt.plot(x, ya, '.-', label='yolo method')
|
929 |
-
plt.plot(x, yb ** 2, '.-', label='^2 power method')
|
930 |
-
plt.plot(x, yb ** 2.5, '.-', label='^2.5 power method')
|
931 |
-
plt.xlim(left=-4, right=4)
|
932 |
-
plt.ylim(bottom=0, top=6)
|
933 |
-
plt.xlabel('input')
|
934 |
-
plt.ylabel('output')
|
935 |
-
plt.legend()
|
936 |
-
fig.tight_layout()
|
937 |
-
fig.savefig('comparison.png', dpi=200)
|
938 |
-
|
939 |
-
|
940 |
-
def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):
|
941 |
-
tl = 3 # line thickness
|
942 |
-
tf = max(tl - 1, 1) # font thickness
|
943 |
-
if os.path.isfile(fname): # do not overwrite
|
944 |
-
return None
|
945 |
-
|
946 |
-
if isinstance(images, torch.Tensor):
|
947 |
-
images = images.cpu().float().numpy()
|
948 |
-
|
949 |
-
if isinstance(targets, torch.Tensor):
|
950 |
-
targets = targets.cpu().numpy()
|
951 |
-
|
952 |
-
# un-normalise
|
953 |
-
if np.max(images[0]) <= 1:
|
954 |
-
images *= 255
|
955 |
-
|
956 |
-
bs, _, h, w = images.shape # batch size, _, height, width
|
957 |
-
bs = min(bs, max_subplots) # limit plot images
|
958 |
-
ns = np.ceil(bs ** 0.5) # number of subplots (square)
|
959 |
-
|
960 |
-
# Check if we should resize
|
961 |
-
scale_factor = max_size / max(h, w)
|
962 |
-
if scale_factor < 1:
|
963 |
-
h = math.ceil(scale_factor * h)
|
964 |
-
w = math.ceil(scale_factor * w)
|
965 |
-
|
966 |
-
# Empty array for output
|
967 |
-
mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8)
|
968 |
-
|
969 |
-
# Fix class - colour map
|
970 |
-
prop_cycle = plt.rcParams['axes.prop_cycle']
|
971 |
-
# https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb
|
972 |
-
hex2rgb = lambda h: tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
|
973 |
-
color_lut = [hex2rgb(h) for h in prop_cycle.by_key()['color']]
|
974 |
-
|
975 |
-
for i, img in enumerate(images):
|
976 |
-
if i == max_subplots: # if last batch has fewer images than we expect
|
977 |
-
break
|
978 |
-
|
979 |
-
block_x = int(w * (i // ns))
|
980 |
-
block_y = int(h * (i % ns))
|
981 |
-
|
982 |
-
img = img.transpose(1, 2, 0)
|
983 |
-
if scale_factor < 1:
|
984 |
-
img = cv2.resize(img, (w, h))
|
985 |
-
|
986 |
-
mosaic[block_y:block_y + h, block_x:block_x + w, :] = img
|
987 |
-
if len(targets) > 0:
|
988 |
-
image_targets = targets[targets[:, 0] == i]
|
989 |
-
boxes = xywh2xyxy(image_targets[:, 2:6]).T
|
990 |
-
classes = image_targets[:, 1].astype('int')
|
991 |
-
gt = image_targets.shape[1] == 6 # ground truth if no conf column
|
992 |
-
conf = None if gt else image_targets[:, 6] # check for confidence presence (gt vs pred)
|
993 |
-
|
994 |
-
boxes[[0, 2]] *= w
|
995 |
-
boxes[[0, 2]] += block_x
|
996 |
-
boxes[[1, 3]] *= h
|
997 |
-
boxes[[1, 3]] += block_y
|
998 |
-
for j, box in enumerate(boxes.T):
|
999 |
-
cls = int(classes[j])
|
1000 |
-
color = color_lut[cls % len(color_lut)]
|
1001 |
-
cls = names[cls] if names else cls
|
1002 |
-
if gt or conf[j] > 0.3: # 0.3 conf thresh
|
1003 |
-
label = '%s' % cls if gt else '%s %.1f' % (cls, conf[j])
|
1004 |
-
plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)
|
1005 |
-
|
1006 |
-
# Draw image filename labels
|
1007 |
-
if paths is not None:
|
1008 |
-
label = os.path.basename(paths[i])[:40] # trim to 40 char
|
1009 |
-
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
|
1010 |
-
cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
|
1011 |
-
lineType=cv2.LINE_AA)
|
1012 |
-
|
1013 |
-
# Image border
|
1014 |
-
cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)
|
1015 |
-
|
1016 |
-
if fname is not None:
|
1017 |
-
mosaic = cv2.resize(mosaic, (int(ns * w * 0.5), int(ns * h * 0.5)), interpolation=cv2.INTER_AREA)
|
1018 |
-
cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB))
|
1019 |
-
|
1020 |
-
return mosaic
|
1021 |
-
|
1022 |
-
|
1023 |
-
def plot_lr_scheduler(optimizer, scheduler, epochs=300):
|
1024 |
-
# Plot LR simulating training for full epochs
|
1025 |
-
optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
|
1026 |
-
y = []
|
1027 |
-
for _ in range(epochs):
|
1028 |
-
scheduler.step()
|
1029 |
-
y.append(optimizer.param_groups[0]['lr'])
|
1030 |
-
plt.plot(y, '.-', label='LR')
|
1031 |
-
plt.xlabel('epoch')
|
1032 |
-
plt.ylabel('LR')
|
1033 |
-
plt.grid()
|
1034 |
-
plt.xlim(0, epochs)
|
1035 |
-
plt.ylim(0)
|
1036 |
-
plt.tight_layout()
|
1037 |
-
plt.savefig('LR.png', dpi=200)
|
1038 |
-
|
1039 |
-
|
1040 |
-
def plot_test_txt(): # from utils.utils import *; plot_test()
|
1041 |
-
# Plot test.txt histograms
|
1042 |
-
x = np.loadtxt('test.txt', dtype=np.float32)
|
1043 |
-
box = xyxy2xywh(x[:, :4])
|
1044 |
-
cx, cy = box[:, 0], box[:, 1]
|
1045 |
-
|
1046 |
-
fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
|
1047 |
-
ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
|
1048 |
-
ax.set_aspect('equal')
|
1049 |
-
plt.savefig('hist2d.png', dpi=300)
|
1050 |
-
|
1051 |
-
fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
|
1052 |
-
ax[0].hist(cx, bins=600)
|
1053 |
-
ax[1].hist(cy, bins=600)
|
1054 |
-
plt.savefig('hist1d.png', dpi=200)
|
1055 |
-
|
1056 |
-
|
1057 |
-
def plot_targets_txt(): # from utils.utils import *; plot_targets_txt()
|
1058 |
-
# Plot targets.txt histograms
|
1059 |
-
x = np.loadtxt('targets.txt', dtype=np.float32).T
|
1060 |
-
s = ['x targets', 'y targets', 'width targets', 'height targets']
|
1061 |
-
fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
|
1062 |
-
ax = ax.ravel()
|
1063 |
-
for i in range(4):
|
1064 |
-
ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
|
1065 |
-
ax[i].legend()
|
1066 |
-
ax[i].set_title(s[i])
|
1067 |
-
plt.savefig('targets.jpg', dpi=200)
|
1068 |
-
|
1069 |
-
|
1070 |
-
def plot_study_txt(f='study.txt', x=None): # from utils.utils import *; plot_study_txt()
|
1071 |
-
# Plot study.txt generated by test.py
|
1072 |
-
fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)
|
1073 |
-
ax = ax.ravel()
|
1074 |
-
|
1075 |
-
fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
|
1076 |
-
for f in ['coco_study/study_coco_yolov5%s.txt' % x for x in ['s', 'm', 'l', 'x']]:
|
1077 |
-
y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
|
1078 |
-
x = np.arange(y.shape[1]) if x is None else np.array(x)
|
1079 |
-
s = ['P', 'R', '[email protected]', '[email protected]:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']
|
1080 |
-
for i in range(7):
|
1081 |
-
ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
|
1082 |
-
ax[i].set_title(s[i])
|
1083 |
-
|
1084 |
-
j = y[3].argmax() + 1
|
1085 |
-
ax2.plot(y[6, :j], y[3, :j] * 1E2, '.-', linewidth=2, markersize=8,
|
1086 |
-
label=Path(f).stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
|
1087 |
-
|
1088 |
-
ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [33.5, 39.1, 42.5, 45.9, 49., 50.5],
|
1089 |
-
'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
|
1090 |
-
|
1091 |
-
ax2.grid()
|
1092 |
-
ax2.set_xlim(0, 30)
|
1093 |
-
ax2.set_ylim(28, 50)
|
1094 |
-
ax2.set_yticks(np.arange(30, 55, 5))
|
1095 |
-
ax2.set_xlabel('GPU Speed (ms/img)')
|
1096 |
-
ax2.set_ylabel('COCO AP val')
|
1097 |
-
ax2.legend(loc='lower right')
|
1098 |
-
plt.savefig('study_mAP_latency.png', dpi=300)
|
1099 |
-
plt.savefig(f.replace('.txt', '.png'), dpi=200)
|
1100 |
-
|
1101 |
-
|
1102 |
-
def plot_labels(labels):
|
1103 |
-
# plot dataset labels
|
1104 |
-
c, b = labels[:, 0], labels[:, 1:].transpose() # classees, boxes
|
1105 |
-
|
1106 |
-
def hist2d(x, y, n=100):
|
1107 |
-
xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
|
1108 |
-
hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
|
1109 |
-
xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
|
1110 |
-
yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
|
1111 |
-
return np.log(hist[xidx, yidx])
|
1112 |
-
|
1113 |
-
fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
|
1114 |
-
ax = ax.ravel()
|
1115 |
-
ax[0].hist(c, bins=int(c.max() + 1))
|
1116 |
-
ax[0].set_xlabel('classes')
|
1117 |
-
ax[1].scatter(b[0], b[1], c=hist2d(b[0], b[1], 90), cmap='jet')
|
1118 |
-
ax[1].set_xlabel('x')
|
1119 |
-
ax[1].set_ylabel('y')
|
1120 |
-
ax[2].scatter(b[2], b[3], c=hist2d(b[2], b[3], 90), cmap='jet')
|
1121 |
-
ax[2].set_xlabel('width')
|
1122 |
-
ax[2].set_ylabel('height')
|
1123 |
-
plt.savefig('labels.png', dpi=200)
|
1124 |
-
plt.close()
|
1125 |
-
|
1126 |
-
|
1127 |
-
def plot_evolution_results(hyp): # from utils.utils import *; plot_evolution_results(hyp)
|
1128 |
-
# Plot hyperparameter evolution results in evolve.txt
|
1129 |
-
x = np.loadtxt('evolve.txt', ndmin=2)
|
1130 |
-
f = fitness(x)
|
1131 |
-
# weights = (f - f.min()) ** 2 # for weighted results
|
1132 |
-
plt.figure(figsize=(12, 10), tight_layout=True)
|
1133 |
-
matplotlib.rc('font', **{'size': 8})
|
1134 |
-
for i, (k, v) in enumerate(hyp.items()):
|
1135 |
-
y = x[:, i + 7]
|
1136 |
-
# mu = (y * weights).sum() / weights.sum() # best weighted result
|
1137 |
-
mu = y[f.argmax()] # best single result
|
1138 |
-
plt.subplot(4, 5, i + 1)
|
1139 |
-
plt.plot(mu, f.max(), 'o', markersize=10)
|
1140 |
-
plt.plot(y, f, '.')
|
1141 |
-
plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
|
1142 |
-
print('%15s: %.3g' % (k, mu))
|
1143 |
-
plt.savefig('evolve.png', dpi=200)
|
1144 |
-
|
1145 |
-
|
1146 |
-
def plot_results_overlay(start=0, stop=0): # from utils.utils import *; plot_results_overlay()
|
1147 |
-
# Plot training 'results*.txt', overlaying train and val losses
|
1148 |
-
s = ['train', 'train', 'train', 'Precision', '[email protected]', 'val', 'val', 'val', 'Recall', '[email protected]:0.95'] # legends
|
1149 |
-
t = ['GIoU', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
|
1150 |
-
for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
|
1151 |
-
results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
|
1152 |
-
n = results.shape[1] # number of rows
|
1153 |
-
x = range(start, min(stop, n) if stop else n)
|
1154 |
-
fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True)
|
1155 |
-
ax = ax.ravel()
|
1156 |
-
for i in range(5):
|
1157 |
-
for j in [i, i + 5]:
|
1158 |
-
y = results[j, x]
|
1159 |
-
ax[i].plot(x, y, marker='.', label=s[j])
|
1160 |
-
# y_smooth = butter_lowpass_filtfilt(y)
|
1161 |
-
# ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j])
|
1162 |
-
|
1163 |
-
ax[i].set_title(t[i])
|
1164 |
-
ax[i].legend()
|
1165 |
-
ax[i].set_ylabel(f) if i == 0 else None # add filename
|
1166 |
-
fig.savefig(f.replace('.txt', '.png'), dpi=200)
|
1167 |
-
|
1168 |
-
|
1169 |
-
def plot_results(start=0, stop=0, bucket='', id=(), labels=()): # from utils.utils import *; plot_results()
|
1170 |
-
# Plot training 'results*.txt' as seen in https://github.com/ultralytics/yolov5#reproduce-our-training
|
1171 |
-
fig, ax = plt.subplots(2, 5, figsize=(12, 6))
|
1172 |
-
ax = ax.ravel()
|
1173 |
-
s = ['GIoU', 'Objectness', 'Classification', 'Precision', 'Recall',
|
1174 |
-
'val GIoU', 'val Objectness', 'val Classification', '[email protected]', '[email protected]:0.95']
|
1175 |
-
if bucket:
|
1176 |
-
os.system('rm -rf storage.googleapis.com')
|
1177 |
-
files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
|
1178 |
-
else:
|
1179 |
-
files = glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')
|
1180 |
-
for fi, f in enumerate(files):
|
1181 |
-
try:
|
1182 |
-
results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
|
1183 |
-
n = results.shape[1] # number of rows
|
1184 |
-
x = range(start, min(stop, n) if stop else n)
|
1185 |
-
for i in range(10):
|
1186 |
-
y = results[i, x]
|
1187 |
-
if i in [0, 1, 2, 5, 6, 7]:
|
1188 |
-
y[y == 0] = np.nan # dont show zero loss values
|
1189 |
-
# y /= y[0] # normalize
|
1190 |
-
label = labels[fi] if len(labels) else Path(f).stem
|
1191 |
-
ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8)
|
1192 |
-
ax[i].set_title(s[i])
|
1193 |
-
# if i in [5, 6, 7]: # share train and val loss y axes
|
1194 |
-
# ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
|
1195 |
-
except:
|
1196 |
-
print('Warning: Plotting error for %s, skipping file' % f)
|
1197 |
-
|
1198 |
-
fig.tight_layout()
|
1199 |
-
ax[1].legend()
|
1200 |
-
fig.savefig('results.png', dpi=200)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Audio-AGI/AudioSep/models/CLAP/training/__init__.py
DELETED
File without changes
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/tutorials/deployment.md
DELETED
@@ -1,137 +0,0 @@
|
|
1 |
-
# Deployment
|
2 |
-
|
3 |
-
Models written in Python need to go through an export process to become a deployable artifact.
|
4 |
-
A few basic concepts about this process:
|
5 |
-
|
6 |
-
__"Export method"__ is how a Python model is fully serialized to a deployable format.
|
7 |
-
We support the following export methods:
|
8 |
-
|
9 |
-
* `tracing`: see [pytorch documentation](https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html) to learn about it
|
10 |
-
* `scripting`: see [pytorch documentation](https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html) to learn about it
|
11 |
-
* `caffe2_tracing`: replace parts of the model by caffe2 operators, then use tracing.
|
12 |
-
|
13 |
-
__"Format"__ is how a serialized model is described in a file, e.g.
|
14 |
-
TorchScript, Caffe2 protobuf, ONNX format.
|
15 |
-
__"Runtime"__ is an engine that loads a serialized model and executes it,
|
16 |
-
e.g., PyTorch, Caffe2, TensorFlow, onnxruntime, TensorRT, etc.
|
17 |
-
A runtime is often tied to a specific format
|
18 |
-
(e.g. PyTorch needs TorchScript format, Caffe2 needs protobuf format).
|
19 |
-
We currently support the following combination and each has some limitations:
|
20 |
-
|
21 |
-
```eval_rst
|
22 |
-
+----------------------------+-------------+-------------+-----------------------------+
|
23 |
-
| Export Method | tracing | scripting | caffe2_tracing |
|
24 |
-
+============================+=============+=============+=============================+
|
25 |
-
| **Formats** | TorchScript | TorchScript | Caffe2, TorchScript, ONNX |
|
26 |
-
+----------------------------+-------------+-------------+-----------------------------+
|
27 |
-
| **Runtime** | PyTorch | PyTorch | Caffe2, PyTorch |
|
28 |
-
+----------------------------+-------------+-------------+-----------------------------+
|
29 |
-
| C++/Python inference | ✅ | ✅ | ✅ |
|
30 |
-
+----------------------------+-------------+-------------+-----------------------------+
|
31 |
-
| Dynamic resolution | ✅ | ✅ | ✅ |
|
32 |
-
+----------------------------+-------------+-------------+-----------------------------+
|
33 |
-
| Batch size requirement | Constant | Dynamic | Batch inference unsupported |
|
34 |
-
+----------------------------+-------------+-------------+-----------------------------+
|
35 |
-
| Extra runtime deps | torchvision | torchvision | Caffe2 ops (usually already |
|
36 |
-
| | | | |
|
37 |
-
| | | | included in PyTorch) |
|
38 |
-
+----------------------------+-------------+-------------+-----------------------------+
|
39 |
-
| Faster/Mask/Keypoint R-CNN | ✅ | ✅ | ✅ |
|
40 |
-
+----------------------------+-------------+-------------+-----------------------------+
|
41 |
-
| RetinaNet | ✅ | ✅ | ✅ |
|
42 |
-
+----------------------------+-------------+-------------+-----------------------------+
|
43 |
-
| PointRend R-CNN | ✅ | ❌ | ❌ |
|
44 |
-
+----------------------------+-------------+-------------+-----------------------------+
|
45 |
-
| Cascade R-CNN | ✅ | ❌ | ❌ |
|
46 |
-
+----------------------------+-------------+-------------+-----------------------------+
|
47 |
-
|
48 |
-
```
|
49 |
-
|
50 |
-
`caffe2_tracing` is going to be deprecated.
|
51 |
-
We don't plan to work on additional support for other formats/runtime, but contributions are welcome.
|
52 |
-
|
53 |
-
|
54 |
-
## Deployment with Tracing or Scripting
|
55 |
-
|
56 |
-
Models can be exported to TorchScript format, by either
|
57 |
-
[tracing or scripting](https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html).
|
58 |
-
The output model file can be loaded without detectron2 dependency in either Python or C++.
|
59 |
-
The exported model often requires torchvision (or its C++ library) dependency for some custom ops.
|
60 |
-
|
61 |
-
This feature requires PyTorch ≥ 1.8.
|
62 |
-
|
63 |
-
### Coverage
|
64 |
-
Most official models under the meta architectures `GeneralizedRCNN` and `RetinaNet`
|
65 |
-
are supported in both tracing and scripting mode.
|
66 |
-
Cascade R-CNN and PointRend are currently supported in tracing.
|
67 |
-
Users' custom extensions are supported if they are also scriptable or traceable.
|
68 |
-
|
69 |
-
For models exported with tracing, dynamic input resolution is allowed, but batch size
|
70 |
-
(number of input images) must be fixed.
|
71 |
-
Scripting can support dynamic batch size.
|
72 |
-
|
73 |
-
### Usage
|
74 |
-
|
75 |
-
The main export APIs for tracing and scripting are [TracingAdapter](../modules/export.html#detectron2.export.TracingAdapter)
|
76 |
-
and [scripting_with_instances](../modules/export.html#detectron2.export.scripting_with_instances).
|
77 |
-
Their usage is currently demonstrated in [test_export_torchscript.py](../../tests/test_export_torchscript.py)
|
78 |
-
(see `TestScripting` and `TestTracing`)
|
79 |
-
as well as the [deployment example](../../tools/deploy).
|
80 |
-
Please check that these examples can run, and then modify for your use cases.
|
81 |
-
The usage now requires some user effort and necessary knowledge for each model to workaround the limitation of scripting and tracing.
|
82 |
-
In the future we plan to wrap these under simpler APIs to lower the bar to use them.
|
83 |
-
|
84 |
-
## Deployment with Caffe2-tracing
|
85 |
-
We provide [Caffe2Tracer](../modules/export.html#detectron2.export.Caffe2Tracer)
|
86 |
-
that performs the export logic.
|
87 |
-
It replaces parts of the model with Caffe2 operators,
|
88 |
-
and then export the model into Caffe2, TorchScript or ONNX format.
|
89 |
-
|
90 |
-
The converted model is able to run in either Python or C++ without detectron2/torchvision dependency, on CPU or GPUs.
|
91 |
-
It has a runtime optimized for CPU & mobile inference, but not optimized for GPU inference.
|
92 |
-
|
93 |
-
This feature requires 1.9 > ONNX ≥ 1.6.
|
94 |
-
|
95 |
-
### Coverage
|
96 |
-
|
97 |
-
Most official models under these 3 common meta architectures: `GeneralizedRCNN`, `RetinaNet`, `PanopticFPN`
|
98 |
-
are supported. Cascade R-CNN is not supported. Batch inference is not supported.
|
99 |
-
|
100 |
-
Users' custom extensions under these architectures (added through registration) are supported
|
101 |
-
as long as they do not contain control flow or operators not available in Caffe2 (e.g. deformable convolution).
|
102 |
-
For example, custom backbones and heads are often supported out of the box.
|
103 |
-
|
104 |
-
### Usage
|
105 |
-
|
106 |
-
The APIs are listed at [the API documentation](../modules/export).
|
107 |
-
We provide [export_model.py](../../tools/deploy/) as an example that uses
|
108 |
-
these APIs to convert a standard model. For custom models/datasets, you can add them to this script.
|
109 |
-
|
110 |
-
### Use the model in C++/Python
|
111 |
-
|
112 |
-
The model can be loaded in C++ and deployed with
|
113 |
-
either Caffe2 or Pytorch runtime.. [C++ examples](../../tools/deploy/) for Mask R-CNN
|
114 |
-
are given as a reference. Note that:
|
115 |
-
|
116 |
-
* Models exported with `caffe2_tracing` method take a special input format
|
117 |
-
described in [documentation](../modules/export.html#detectron2.export.Caffe2Tracer).
|
118 |
-
This was taken care of in the C++ example.
|
119 |
-
|
120 |
-
* The converted models do not contain post-processing operations that
|
121 |
-
transform raw layer outputs into formatted predictions.
|
122 |
-
For example, the C++ examples only produce raw outputs (28x28 masks) from the final
|
123 |
-
layers that are not post-processed, because in actual deployment, an application often needs
|
124 |
-
its custom lightweight post-processing, so this step is left for users.
|
125 |
-
|
126 |
-
To help use the Caffe2-format model in python,
|
127 |
-
we provide a python wrapper around the converted model, in the
|
128 |
-
[Caffe2Model.\_\_call\_\_](../modules/export.html#detectron2.export.Caffe2Model.__call__) method.
|
129 |
-
This method has an interface that's identical to the [pytorch versions of models](./models.md),
|
130 |
-
and it internally applies pre/post-processing code to match the formats.
|
131 |
-
This wrapper can serve as a reference for how to use Caffe2's python API,
|
132 |
-
or for how to implement pre/post-processing in actual deployment.
|
133 |
-
|
134 |
-
## Conversion to TensorFlow
|
135 |
-
[tensorpack Faster R-CNN](https://github.com/tensorpack/tensorpack/tree/master/examples/FasterRCNN/convert_d2)
|
136 |
-
provides scripts to convert a few standard detectron2 R-CNN models to TensorFlow's pb format.
|
137 |
-
It works by translating configs and weights, therefore only support a few models.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Apk Xplore File Manager.md
DELETED
@@ -1,83 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>APK Xplore Administrador de archivos: Una herramienta potente y versátil para los usuarios de Android</h1>
|
3 |
-
<p>Si usted está buscando una aplicación de administrador de archivos que puede hacer más que simplemente navegar y organizar sus archivos, es posible que desee echa un vistazo APK Xplore File Manager. Esta aplicación es una herramienta potente y versátil que le permite explorar el contenido de su teléfono, ver imágenes y videos, reproducir música, conectarse a ubicaciones remotas, abrir archivos, ver archivos PDF y mucho más. En este artículo, le mostraremos lo que APK Xplore File Manager puede hacer, cómo descargar e instalar, cómo usarlo, y cuáles son sus pros y contras. </p>
|
4 |
-
<h2>Características de APK Xplore Administrador de archivos</h2>
|
5 |
-
<h3>Explora el contenido de tu teléfono</h3>
|
6 |
-
<p>APK Xplore File Manager le permite acceder a todas las carpetas y archivos en su dispositivo, incluyendo el almacenamiento interno, almacenamiento externo, directorio raíz, carpetas del sistema y archivos ocultos. También puede ver los detalles de cada archivo, como tamaño, fecha, permisos y suma de comprobación. También puede ordenar los archivos por nombre, tamaño, fecha o tipo. </p>
|
7 |
-
<h2>apk xplore file manager</h2><br /><p><b><b>Download File</b> ► <a href="https://bltlly.com/2v6JOr">https://bltlly.com/2v6JOr</a></b></p><br /><br />
|
8 |
-
<h3>Ver imágenes y vídeos</h3>
|
9 |
-
<p>APK Xplore File Manager tiene un visor de imágenes incorporado que puede mostrar miniaturas e imágenes de pantalla completa de varios formatos, como JPG, PNG, GIF, BMP, WEBP, etc. También puede acercar y alejar, rotar, recortar, compartir o eliminar las imágenes. La aplicación también tiene un reproductor de vídeo que puede reproducir vídeos de varios formatos, como MP4, MKV, AVI, FLV, etc. También puede controlar la velocidad de reproducción, volumen, brillo, relación de aspecto, subtítulos, etc.</p>
|
10 |
-
<h3>Reproducir música</h3>
|
11 |
-
<p>APK Xplore File Manager tiene un reproductor de música incorporado que puede reproducir archivos de audio de varios formatos, como MP3, WAV, OGG, FLAC, etc. También puede crear listas de reproducción, mezclar canciones, repetir canciones o cambiar la configuración del ecualizador. La aplicación también admite controles de notificación y reproducción en segundo plano. </p>
|
12 |
-
<h3>Conectarse a ubicaciones remotas</h3>
|
13 |
-
|
14 |
-
<h3>Abrir archivos</h3>
|
15 |
-
<p>APK Xplore File Manager puede abrir archivos de varios formatos como ZIP, RAR, 7ZIP, TAR, GZIP, BZIP2, etc. También puede crear, extraer o modificar archivos con protección por contraseña o cifrado. También puede dividir o unir archivos o comprimir o descomprimir archivos. </p>
|
16 |
-
<h3>Ver archivos PDF</h3>
|
17 |
-
<p>APK Xplore File Manager tiene un visor de PDF incorporado que puede mostrar archivos PDF de varios tamaños y orientaciones. También puede acercar y alejar, desplazarse, buscar, marcar como favorito o compartir los archivos PDF. </p>
|
18 |
-
<h3>Mucho, mucho más</h3>
|
19 |
-
<p>APK Xplore File Manager tiene muchas más características que lo convierten en una herramienta potente y versátil para los usuarios de Android. Algunas de estas características son:</p>
|
20 |
-
<ul>
|
21 |
-
<li>Editor de texto: Puede editar archivos de texto de varios formatos, como TXT, HTML, XML, JSON, etc. También puede cambiar el tamaño de fuente, color, estilo o codificación. </li>
|
22 |
-
<li>Editor hexadecimal: Puede editar archivos binarios en modo hexadecimal. También puede buscar, reemplazar o insertar bytes. </li>
|
23 |
-
<li>Editor de SQLite: Puede ver y editar bases de datos SQLite. También puede ejecutar consultas SQL o exportar datos. </li>
|
24 |
-
<li>Explorador de raíz: Puede acceder al directorio raíz de su dispositivo si tiene privilegios de raíz. También puede cambiar los permisos o la propiedad de los archivos. </li>
|
25 |
-
<li>Papelera de reciclaje: Puede restaurar archivos eliminados de la papelera de reciclaje. También puede vaciar la papelera de reciclaje o excluir ciertas carpetas de ella. </li>
|
26 |
-
<li>Buscar: Puede buscar archivos o carpetas por nombre, tamaño, fecha o contenido. También puede usar expresiones o filtros regulares. </li>
|
27 |
-
<li>Operaciones por lotes: Puede realizar varias operaciones en varios archivos o carpetas a la vez. También puede usar el portapapeles para copiar, cortar, pegar o mover archivos o carpetas. </li>
|
28 |
-
<li>Temas: Puede personalizar la apariencia de la aplicación cambiando el tema, color, icono, fuente o diseño. </li>
|
29 |
-
</ul>
|
30 |
-
<h2>Cómo descargar e instalar APK Xplore Administrador de archivos</h2>
|
31 |
-
<h3>Descargar el archivo APK de una fuente de confianza</h3>
|
32 |
-
|
33 |
-
<h3>Habilitar fuentes desconocidas en su dispositivo</h3>
|
34 |
-
<p>Para instalar APK Xplore File Manager, es necesario habilitar fuentes desconocidas en el dispositivo. Esto se debe a APK Xplore File Manager no está disponible en el Google Play Store y es necesario permitir que el dispositivo para instalar aplicaciones de fuentes distintas de la Play Store. Para habilitar fuentes desconocidas en su dispositivo, siga estos pasos:</p>
|
35 |
-
<p></p>
|
36 |
-
<ol>
|
37 |
-
<li>Ir a Configuración > Seguridad > Fuentes desconocidas y activarlo. </li>
|
38 |
-
<li>Aparecerá un mensaje de advertencia. Pulse OK para confirmar. </li>
|
39 |
-
</ol>
|
40 |
-
<h3>Instalar el archivo APK y lanzar la aplicación</h3>
|
41 |
-
<p>Para instalar APK Xplore Administrador de archivos, es necesario localizar el archivo APK en el dispositivo y toque en él. Aparecerá un mensaje pidiéndole que instale la aplicación. Pulse Instalar y espere a que se complete la instalación. Una vez instalada, puede iniciar la aplicación tocando Abrir o encontrándola en el cajón de la aplicación. </p>
|
42 |
-
<h2>Cómo utilizar APK Xplore Administrador de archivos</h2>
|
43 |
-
<h3>Navegar por las carpetas y archivos</h3>
|
44 |
-
<p>Para utilizar APK Xplore Administrador de archivos, es necesario navegar a través de las carpetas y archivos en el dispositivo. La aplicación tiene una interfaz sencilla e intuitiva que muestra la lista de carpetas y archivos en una vista de cuadrícula o lista. Puede cambiar entre diferentes vistas tocando los iconos en la esquina superior derecha de la pantalla. También puede deslizar hacia la izquierda o hacia la derecha para acceder a diferentes pestañas como Inicio, Favoritos, Historial, Papelera de reciclaje, etc. Para abrir una carpeta o archivo, simplemente toque en él. Para volver a la carpeta anterior, toca el botón de atrás en la esquina superior izquierda de la pantalla o desliza el dedo hacia la derecha desde el borde izquierdo de la pantalla. </p>
|
45 |
-
<h3>Realizar varias acciones en los archivos</h3>
|
46 |
-
|
47 |
-
<h3>Acceda a los ajustes y preferencias</h3>
|
48 |
-
<p>Para acceder a la configuración y las preferencias de APK Xplore Administrador de archivos, es necesario tocar en el icono del menú en la esquina superior izquierda de la pantalla y luego toque en Configuración. Aquí puede personalizar varios aspectos de la aplicación, como tema, color, icono, fuente, diseño, idioma, etc. También puede habilitar o deshabilitar varias características, como explorador de raíz, papelera de reciclaje, archivos ocultos, suma de comprobación, cifrado, etc. También puede realizar copias de seguridad o restaurar la configuración o borrar la caché o el historial. </p>
|
49 |
-
<h2>Pros y contras de APK Xplore Administrador de archivos</h2>
|
50 |
-
<h3>Pros</h3>
|
51 |
-
<p>APK Xplore File Manager tiene muchas ventajas que lo convierten en una herramienta potente y versátil para los usuarios de Android. Algunas de estas ventajas son:</p>
|
52 |
-
<ul>
|
53 |
-
<li>Es gratis y sin anuncios. </li>
|
54 |
-
<li> Soporta una amplia gama de formatos de archivo y protocolos. </li>
|
55 |
-
<li> Tiene una interfaz simple e intuitiva que es fácil de usar. </li>
|
56 |
-
<li> Tiene muchas características que mejoran la funcionalidad y usabilidad de la aplicación. </li>
|
57 |
-
<li> Es personalizable y flexible para adaptarse a sus preferencias y necesidades. </li>
|
58 |
-
</ul>
|
59 |
-
<h3>Contras</h3>
|
60 |
-
<p>APK Xplore File Manager también tiene algunas desventajas que pueden limitar su rendimiento o compatibilidad. Algunas de estas desventajas son:</p>
|
61 |
-
<ul>
|
62 |
-
<li> Requiere Android 4.0 o superior para ejecutarse. </li>
|
63 |
-
<li> Puede no funcionar bien con algunos dispositivos o sistemas que tienen diferentes estructuras de archivos o permisos. </li>
|
64 |
-
<li> Puede consumir mucha memoria o batería si la usa durante mucho tiempo o con archivos grandes. </li>
|
65 |
-
<li>Puede tener algunos errores o errores que pueden afectar su funcionalidad o estabilidad. </li>
|
66 |
-
</ul>
|
67 |
-
<h2>Conclusión y preguntas frecuentes</h2>
|
68 |
-
|
69 |
-
<p>Aquí hay algunas preguntas frecuentes sobre APK Xplore File Manager:</p>
|
70 |
-
<ol>
|
71 |
-
<li><b>Q: ¿Cómo puedo actualizar APK Xplore File Manager? </b></li>
|
72 |
-
<li>A: Puede actualizar APK Xplore File Manager mediante la descarga de la última versión del archivo APK de su sitio web oficial o de otras fuentes de renombre como APKPure o APKMirror. A continuación, puede instalarlo sobre la aplicación existente sin perder sus datos o ajustes. </li>
|
73 |
-
<li><b>Q: ¿Cómo puedo desinstalar APK Xplore File Manager? </b></li>
|
74 |
-
<li>A: Puede desinstalar APK Xplore Administrador de archivos yendo a Configuración > Aplicaciones > APK Xplore Administrador de archivos y tocando en Desinstalar. Alternativamente, puede pulsar largo en el icono de la aplicación en el cajón de la aplicación y arrastrarlo a la opción Desinstalar. </li>
|
75 |
-
<li><b>Q: ¿Cómo puedo contactar al desarrollador de APK Xplore File Manager? </b></li>
|
76 |
-
<li>A: Puede ponerse en contacto con el desarrollador de APK Xplore File Manager enviando un correo electrónico a [email protected]. También puede visitar su sitio web en https://apkxplorefilemanager.com/ para obtener más información. </li>
|
77 |
-
<li><b>Q: ¿Cómo puedo apoyar el desarrollo de APK Xplore File Manager? </b></li>
|
78 |
-
<li>A: Puede apoyar el desarrollo de APK Xplore File Manager por calificación y revisión en su sitio web oficial o en otras plataformas como APKPure o APKMirror. También puede compartirlo con sus amigos y familiares que podrían encontrarlo útil. También puede donar al desarrollador a través de PayPal en https://paypal.me/apkxplorefilemanager.</li>
|
79 |
-
<li><b>Q: ¿Cómo puedo reportar un error o sugerir una característica para APK Xplore File Manager? </b></li>
|
80 |
-
<li>A: Puede informar de un error o sugerir una característica para APK Xplore File Manager enviando un correo electrónico a apkxplorefilemanager @gmail.com. También puede dejar un comentario en su sitio web o en otras plataformas como APKPure o APKMirror. El desarrollador aprecia sus comentarios y tratará de corregir los errores o implementar las características tan pronto como sea posible. </li>
|
81 |
-
</ol></p> 64aa2da5cf<br />
|
82 |
-
<br />
|
83 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Cmo Descargar La Tarjeta Aadhar En Lnea.md
DELETED
@@ -1,51 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Cómo descargar la tarjeta Aadhaar en línea</h1>
|
3 |
-
<p>Tarjeta Aadhaar es un número de identificación único emitido por el gobierno indio a cada residente de la India. Es un número de 12 dígitos que contiene su información biométrica y demográfica, como su nombre, fecha de nacimiento, dirección, género, foto, huella digital y escaneo del iris. La tarjeta Aadhaar se utiliza como prueba de identidad y dirección para diversos fines, como abrir una cuenta bancaria, solicitar un pasaporte, obtener una conexión móvil, obtener subsidios del gobierno y más. </p>
|
4 |
-
<p>Si se ha inscrito para la tarjeta Aadhaar o ha actualizado sus datos en un centro de inscripción, puede descargar una versión electrónica de su tarjeta Aadhaar en línea. Esto también se conoce como e-Aadhaar o Aadhaar digital. Es tan válido como la carta Aadhaar original que usted recibe por correo. También puede pedir una tarjeta de PVC (cloruro de polivinilo) que sea más duradera y cómoda de llevar. </p>
|
5 |
-
<h2>Cómo descargar la tarjeta aadhar en línea</h2><br /><p><b><b>Download Zip</b> · <a href="https://bltlly.com/2v6L2o">https://bltlly.com/2v6L2o</a></b></p><br /><br />
|
6 |
-
<p>En este artículo, le mostraremos cómo descargar la tarjeta Aadhaar en línea en pasos simples. También le diremos los beneficios de la tarjeta Aadhaar en línea y responderemos algunas preguntas frecuentes al respecto. </p>
|
7 |
-
<h2>Pasos para descargar la tarjeta Aadhaar en línea</h2>
|
8 |
-
<p>Para descargar la tarjeta Aadhaar en línea, necesita tener su número Aadhaar o su ID de inscripción (EID). También debe tener acceso a su número de teléfono móvil o dirección de correo electrónico registrados para recibir una contraseña de una sola vez (OTP) o usar su autenticación biométrica. Siga estos pasos para descargar la tarjeta Aadhaar en línea:</p>
|
9 |
-
<h3>Paso 1: Visite el sitio web oficial de UIDAI</h3>
|
10 |
-
<p>UIDAI (Autoridad de Identificación Única de la India) es la agencia gubernamental que emite y administra las tarjetas Aadhaar. Puede visitar su sitio web oficial en <a href="( 1 )">https://myaadhaar.uidai.gov.in/</a>. Este es el portal de todos los servicios en línea de Aadhaar. </p>
|
11 |
-
<h3>Paso 2: Seleccione la opción para descargar Aadhaar</h3>
|
12 |
-
|
13 |
-
<h3>Paso 3: Ingrese su número de Aadhaar o ID de inscripción</h3>
|
14 |
-
<p>Puedes descargar tu tarjeta Aadhaar usando cualquiera de estos tres métodos:</p>
|
15 |
-
<ul>
|
16 |
-
<li>Número de Aadhaar (UID): Este es el número de 12 dígitos que está impreso en su tarjeta o carta de Aadhaar. </li>
|
17 |
-
<li>ID de inscripción (EID): Este es el número de 14 dígitos y el sello de fecha y hora de 14 dígitos que se imprime en su boleto de inscripción que recibió cuando se inscribió para la tarjeta Aadhaar. </li>
|
18 |
-
<li>ID virtual (VID): Este es un número temporal de 16 dígitos que puede generar desde el sitio web de UIDAI. Se puede utilizar en lugar de su número de Aadhaar por razones de privacidad y seguridad. </li>
|
19 |
-
</ul>
|
20 |
-
<p>Seleccione el método que prefiera e introduzca el número correspondiente en el cuadro. También debe introducir el código de captcha para la verificación. Luego, haz clic en "Enviar OTP" o "Ingresar TOTP" dependiendo de si quieres recibir un OTP en tu número de móvil registrado o dirección de correo electrónico, o usar un OTP basado en el tiempo generado por una aplicación como mAadhaar o Google Authenticator.</p>
|
21 |
-
<h3>Paso 4: Verifique su identidad con OTP o biométrico</h3>
|
22 |
-
<p>Si ha elegido recibir un OTP, obtendrá un código de seis dígitos en su número de móvil o dirección de correo electrónico registrada. Introduzca este código en el cuadro y haga clic en "Verificar y Descargar". Si ha elegido utilizar un TOTP, introduzca el código de ocho dígitos generado por la aplicación y haga clic en "Verificar y descargar". Si ha elegido utilizar su autenticación biométrica, necesita tener un dispositivo biométrico registrado que pueda escanear su huella digital o iris. Conecte el dispositivo a su computadora y siga las instrucciones en la pantalla para escanear su biometría y haga clic en "Verificar y Descargar". </p>
|
23 |
-
<p></p>
|
24 |
-
<h3>Paso 5: Descarga e imprime tu tarjeta Aadhaar</h3>
|
25 |
-
|
26 |
-
<h2>Beneficios de la tarjeta Aadhaar en línea</h2>
|
27 |
-
<p>Descargar la tarjeta Aadhaar en línea tiene muchos beneficios sobre la espera de la carta física Aadhaar para llegar por correo. Aquí están algunos de ellos:</p>
|
28 |
-
<h3>Comodidad y accesibilidad</h3>
|
29 |
-
<p>Puede descargar en línea la tarjeta Aadhaar en cualquier momento y en cualquier lugar desde el sitio web de UIDAI. No necesita visitar ningún centro de inscripción u oficina de correos para obtener su tarjeta Aadhaar. También puede descargar varias copias de su tarjeta Aadhaar si pierde o daña la original. </p>
|
30 |
-
<h3>Seguridad y validez</h3>
|
31 |
-
<p>La tarjeta Aadhaar en línea es tan válida como la carta Aadhaar física emitida por UIDAI. Tiene una firma digital que verifica su autenticidad. También tiene un código QR que contiene su información demográfica y biométrica que puede ser escaneada por agencias autorizadas. También puede utilizar la tarjeta Aadhaar en línea como documento e-KYC para varios servicios. </p>
|
32 |
-
<h3>Ecológico y rentable</h3>
|
33 |
-
<p>Al descargar la tarjeta Aadhaar en línea, puede ahorrar papel y reducir el impacto ambiental. También puede ahorrar dinero en gastos de envío e impresión. La tarjeta Aadhaar online es gratuita y se puede descargar de forma ilimitada. </p>
|
34 |
-
<h2>Preguntas frecuentes sobre Online Aadhaar Card</h2>
|
35 |
-
<p>Aquí hay algunas preguntas y respuestas comunes sobre la tarjeta Aadhaar en línea:</p>
|
36 |
-
<ul>
|
37 |
-
<li><b>Q: ¿Es válida la tarjeta Aadhaar online para viajar? </b></li>
|
38 |
-
<li>A: Sí, la tarjeta Aadhaar en línea se acepta como una prueba de identificación válida para viajes nacionales de aerolíneas, ferrocarriles y autobuses. Sin embargo, para viajes internacionales, necesita tener un pasaporte u otros documentos según los requisitos del país de destino. </li>
|
39 |
-
<li><b>Q: ¿Cómo puedo actualizar mis datos en la tarjeta Aadhaar en línea? </b></li>
|
40 |
-
<li>A: Puede actualizar sus datos como nombre, dirección, número de teléfono móvil, dirección de correo electrónico, sexo, fecha de nacimiento, foto, huella digital y escaneo de iris en la tarjeta Aadhaar en línea visitando un centro de inscripción o utilizando el portal de actualización de autoservicio en línea (SSUP) en el sitio web de UIDAI. </li>
|
41 |
-
|
42 |
-
<li>A: Puede comprobar el estado de su tarjeta Aadhaar en línea utilizando la opción "Comprobar estado de Aadhaar" en el sitio web de UIDAI. Debe introducir su ID de inscripción o el número de Aadhaar y el código de captcha para verificar el estado. </li>
|
43 |
-
<li><b>Q: ¿Cómo puedo bloquear o desbloquear mi tarjeta Aadhaar en línea? </b></li>
|
44 |
-
<li>A: Puede bloquear o desbloquear su tarjeta Aadhaar en línea utilizando la opción "Lock/Unlock Biometrics" en el sitio web de UIDAI. Debe ingresar su número de Aadhaar o ID virtual y OTP o TOTP para bloquear o desbloquear sus datos biométricos. Esto evitará el acceso no autorizado a su información biométrica. </li>
|
45 |
-
<li><b>Q: ¿Cómo puedo compartir mi tarjeta Aadhaar en línea con otros? </b></li>
|
46 |
-
<li>A: Puede compartir su tarjeta Aadhaar en línea con otros mediante la opción "Compartir código QR" en el sitio web de UIDAI. Debe ingresar su número de Aadhaar o ID virtual y OTP o TOTP para generar un código QR que contenga su información demográfica y biométrica. A continuación, puede descargar o imprimir el código QR y compartirlo con otros que pueden escanearlo utilizando la aplicación lector de código QR. </li>
|
47 |
-
</ul>
|
48 |
-
<h2>Conclusión</h2>
|
49 |
-
<p>La tarjeta Aadhaar en línea es una forma conveniente y segura de obtener su número de identificación único del gobierno indio. Es tan válida como la carta física de Aadhaar y puede ser usada para varios propósitos. Puede descargar la tarjeta Aadhaar en línea en sencillos pasos desde el sitio web de UIDAI utilizando su número de Aadhaar o ID de inscripción y verificando su identidad con OTP o datos biométricos. También puede solicitar una tarjeta de PVC o generar un código QR para compartir su tarjeta Aadhaar con otros. La tarjeta Aadhaar en línea es beneficiosa para la comodidad, la accesibilidad, la seguridad, la validez, la ecología y la rentabilidad. Esperamos que este artículo le haya ayudado a entender cómo descargar la tarjeta Aadhaar en línea y respondió a sus preguntas. Si tiene más preguntas, no dude en contactarnos o visite el sitio web de UIDAI para obtener más información. </p> 64aa2da5cf<br />
|
50 |
-
<br />
|
51 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/vcs/bazaar.py
DELETED
@@ -1,112 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
from typing import List, Optional, Tuple
|
3 |
-
|
4 |
-
from pip._internal.utils.misc import HiddenText, display_path
|
5 |
-
from pip._internal.utils.subprocess import make_command
|
6 |
-
from pip._internal.utils.urls import path_to_url
|
7 |
-
from pip._internal.vcs.versioncontrol import (
|
8 |
-
AuthInfo,
|
9 |
-
RemoteNotFoundError,
|
10 |
-
RevOptions,
|
11 |
-
VersionControl,
|
12 |
-
vcs,
|
13 |
-
)
|
14 |
-
|
15 |
-
logger = logging.getLogger(__name__)
|
16 |
-
|
17 |
-
|
18 |
-
class Bazaar(VersionControl):
|
19 |
-
name = "bzr"
|
20 |
-
dirname = ".bzr"
|
21 |
-
repo_name = "branch"
|
22 |
-
schemes = (
|
23 |
-
"bzr+http",
|
24 |
-
"bzr+https",
|
25 |
-
"bzr+ssh",
|
26 |
-
"bzr+sftp",
|
27 |
-
"bzr+ftp",
|
28 |
-
"bzr+lp",
|
29 |
-
"bzr+file",
|
30 |
-
)
|
31 |
-
|
32 |
-
@staticmethod
|
33 |
-
def get_base_rev_args(rev: str) -> List[str]:
|
34 |
-
return ["-r", rev]
|
35 |
-
|
36 |
-
def fetch_new(
|
37 |
-
self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int
|
38 |
-
) -> None:
|
39 |
-
rev_display = rev_options.to_display()
|
40 |
-
logger.info(
|
41 |
-
"Checking out %s%s to %s",
|
42 |
-
url,
|
43 |
-
rev_display,
|
44 |
-
display_path(dest),
|
45 |
-
)
|
46 |
-
if verbosity <= 0:
|
47 |
-
flag = "--quiet"
|
48 |
-
elif verbosity == 1:
|
49 |
-
flag = ""
|
50 |
-
else:
|
51 |
-
flag = f"-{'v'*verbosity}"
|
52 |
-
cmd_args = make_command(
|
53 |
-
"checkout", "--lightweight", flag, rev_options.to_args(), url, dest
|
54 |
-
)
|
55 |
-
self.run_command(cmd_args)
|
56 |
-
|
57 |
-
def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
|
58 |
-
self.run_command(make_command("switch", url), cwd=dest)
|
59 |
-
|
60 |
-
def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
|
61 |
-
output = self.run_command(
|
62 |
-
make_command("info"), show_stdout=False, stdout_only=True, cwd=dest
|
63 |
-
)
|
64 |
-
if output.startswith("Standalone "):
|
65 |
-
# Older versions of pip used to create standalone branches.
|
66 |
-
# Convert the standalone branch to a checkout by calling "bzr bind".
|
67 |
-
cmd_args = make_command("bind", "-q", url)
|
68 |
-
self.run_command(cmd_args, cwd=dest)
|
69 |
-
|
70 |
-
cmd_args = make_command("update", "-q", rev_options.to_args())
|
71 |
-
self.run_command(cmd_args, cwd=dest)
|
72 |
-
|
73 |
-
@classmethod
|
74 |
-
def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]:
|
75 |
-
# hotfix the URL scheme after removing bzr+ from bzr+ssh:// re-add it
|
76 |
-
url, rev, user_pass = super().get_url_rev_and_auth(url)
|
77 |
-
if url.startswith("ssh://"):
|
78 |
-
url = "bzr+" + url
|
79 |
-
return url, rev, user_pass
|
80 |
-
|
81 |
-
@classmethod
|
82 |
-
def get_remote_url(cls, location: str) -> str:
|
83 |
-
urls = cls.run_command(
|
84 |
-
["info"], show_stdout=False, stdout_only=True, cwd=location
|
85 |
-
)
|
86 |
-
for line in urls.splitlines():
|
87 |
-
line = line.strip()
|
88 |
-
for x in ("checkout of branch: ", "parent branch: "):
|
89 |
-
if line.startswith(x):
|
90 |
-
repo = line.split(x)[1]
|
91 |
-
if cls._is_local_repository(repo):
|
92 |
-
return path_to_url(repo)
|
93 |
-
return repo
|
94 |
-
raise RemoteNotFoundError
|
95 |
-
|
96 |
-
@classmethod
|
97 |
-
def get_revision(cls, location: str) -> str:
|
98 |
-
revision = cls.run_command(
|
99 |
-
["revno"],
|
100 |
-
show_stdout=False,
|
101 |
-
stdout_only=True,
|
102 |
-
cwd=location,
|
103 |
-
)
|
104 |
-
return revision.splitlines()[-1]
|
105 |
-
|
106 |
-
@classmethod
|
107 |
-
def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool:
|
108 |
-
"""Always assume the versions don't match"""
|
109 |
-
return False
|
110 |
-
|
111 |
-
|
112 |
-
vcs.register(Bazaar)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tenacity/__init__.py
DELETED
@@ -1,608 +0,0 @@
|
|
1 |
-
# Copyright 2016-2018 Julien Danjou
|
2 |
-
# Copyright 2017 Elisey Zanko
|
3 |
-
# Copyright 2016 Étienne Bersac
|
4 |
-
# Copyright 2016 Joshua Harlow
|
5 |
-
# Copyright 2013-2014 Ray Holder
|
6 |
-
#
|
7 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
8 |
-
# you may not use this file except in compliance with the License.
|
9 |
-
# You may obtain a copy of the License at
|
10 |
-
#
|
11 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
12 |
-
#
|
13 |
-
# Unless required by applicable law or agreed to in writing, software
|
14 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
15 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
16 |
-
# See the License for the specific language governing permissions and
|
17 |
-
# limitations under the License.
|
18 |
-
|
19 |
-
|
20 |
-
import functools
|
21 |
-
import sys
|
22 |
-
import threading
|
23 |
-
import time
|
24 |
-
import typing as t
|
25 |
-
import warnings
|
26 |
-
from abc import ABC, abstractmethod
|
27 |
-
from concurrent import futures
|
28 |
-
from inspect import iscoroutinefunction
|
29 |
-
|
30 |
-
# Import all built-in retry strategies for easier usage.
|
31 |
-
from .retry import retry_base # noqa
|
32 |
-
from .retry import retry_all # noqa
|
33 |
-
from .retry import retry_always # noqa
|
34 |
-
from .retry import retry_any # noqa
|
35 |
-
from .retry import retry_if_exception # noqa
|
36 |
-
from .retry import retry_if_exception_type # noqa
|
37 |
-
from .retry import retry_if_exception_cause_type # noqa
|
38 |
-
from .retry import retry_if_not_exception_type # noqa
|
39 |
-
from .retry import retry_if_not_result # noqa
|
40 |
-
from .retry import retry_if_result # noqa
|
41 |
-
from .retry import retry_never # noqa
|
42 |
-
from .retry import retry_unless_exception_type # noqa
|
43 |
-
from .retry import retry_if_exception_message # noqa
|
44 |
-
from .retry import retry_if_not_exception_message # noqa
|
45 |
-
|
46 |
-
# Import all nap strategies for easier usage.
|
47 |
-
from .nap import sleep # noqa
|
48 |
-
from .nap import sleep_using_event # noqa
|
49 |
-
|
50 |
-
# Import all built-in stop strategies for easier usage.
|
51 |
-
from .stop import stop_after_attempt # noqa
|
52 |
-
from .stop import stop_after_delay # noqa
|
53 |
-
from .stop import stop_all # noqa
|
54 |
-
from .stop import stop_any # noqa
|
55 |
-
from .stop import stop_never # noqa
|
56 |
-
from .stop import stop_when_event_set # noqa
|
57 |
-
|
58 |
-
# Import all built-in wait strategies for easier usage.
|
59 |
-
from .wait import wait_chain # noqa
|
60 |
-
from .wait import wait_combine # noqa
|
61 |
-
from .wait import wait_exponential # noqa
|
62 |
-
from .wait import wait_fixed # noqa
|
63 |
-
from .wait import wait_incrementing # noqa
|
64 |
-
from .wait import wait_none # noqa
|
65 |
-
from .wait import wait_random # noqa
|
66 |
-
from .wait import wait_random_exponential # noqa
|
67 |
-
from .wait import wait_random_exponential as wait_full_jitter # noqa
|
68 |
-
from .wait import wait_exponential_jitter # noqa
|
69 |
-
|
70 |
-
# Import all built-in before strategies for easier usage.
|
71 |
-
from .before import before_log # noqa
|
72 |
-
from .before import before_nothing # noqa
|
73 |
-
|
74 |
-
# Import all built-in after strategies for easier usage.
|
75 |
-
from .after import after_log # noqa
|
76 |
-
from .after import after_nothing # noqa
|
77 |
-
|
78 |
-
# Import all built-in after strategies for easier usage.
|
79 |
-
from .before_sleep import before_sleep_log # noqa
|
80 |
-
from .before_sleep import before_sleep_nothing # noqa
|
81 |
-
|
82 |
-
# Replace a conditional import with a hard-coded None so that pip does
|
83 |
-
# not attempt to use tornado even if it is present in the environment.
|
84 |
-
# If tornado is non-None, tenacity will attempt to execute some code
|
85 |
-
# that is sensitive to the version of tornado, which could break pip
|
86 |
-
# if an old version is found.
|
87 |
-
tornado = None # type: ignore
|
88 |
-
|
89 |
-
if t.TYPE_CHECKING:
|
90 |
-
import types
|
91 |
-
|
92 |
-
from .retry import RetryBaseT
|
93 |
-
from .stop import StopBaseT
|
94 |
-
from .wait import WaitBaseT
|
95 |
-
|
96 |
-
|
97 |
-
WrappedFnReturnT = t.TypeVar("WrappedFnReturnT")
|
98 |
-
WrappedFn = t.TypeVar("WrappedFn", bound=t.Callable[..., t.Any])
|
99 |
-
|
100 |
-
|
101 |
-
class TryAgain(Exception):
|
102 |
-
"""Always retry the executed function when raised."""
|
103 |
-
|
104 |
-
|
105 |
-
NO_RESULT = object()
|
106 |
-
|
107 |
-
|
108 |
-
class DoAttempt:
|
109 |
-
pass
|
110 |
-
|
111 |
-
|
112 |
-
class DoSleep(float):
|
113 |
-
pass
|
114 |
-
|
115 |
-
|
116 |
-
class BaseAction:
|
117 |
-
"""Base class for representing actions to take by retry object.
|
118 |
-
|
119 |
-
Concrete implementations must define:
|
120 |
-
- __init__: to initialize all necessary fields
|
121 |
-
- REPR_FIELDS: class variable specifying attributes to include in repr(self)
|
122 |
-
- NAME: for identification in retry object methods and callbacks
|
123 |
-
"""
|
124 |
-
|
125 |
-
REPR_FIELDS: t.Sequence[str] = ()
|
126 |
-
NAME: t.Optional[str] = None
|
127 |
-
|
128 |
-
def __repr__(self) -> str:
|
129 |
-
state_str = ", ".join(f"{field}={getattr(self, field)!r}" for field in self.REPR_FIELDS)
|
130 |
-
return f"{self.__class__.__name__}({state_str})"
|
131 |
-
|
132 |
-
def __str__(self) -> str:
|
133 |
-
return repr(self)
|
134 |
-
|
135 |
-
|
136 |
-
class RetryAction(BaseAction):
|
137 |
-
REPR_FIELDS = ("sleep",)
|
138 |
-
NAME = "retry"
|
139 |
-
|
140 |
-
def __init__(self, sleep: t.SupportsFloat) -> None:
|
141 |
-
self.sleep = float(sleep)
|
142 |
-
|
143 |
-
|
144 |
-
_unset = object()
|
145 |
-
|
146 |
-
|
147 |
-
def _first_set(first: t.Union[t.Any, object], second: t.Any) -> t.Any:
|
148 |
-
return second if first is _unset else first
|
149 |
-
|
150 |
-
|
151 |
-
class RetryError(Exception):
|
152 |
-
"""Encapsulates the last attempt instance right before giving up."""
|
153 |
-
|
154 |
-
def __init__(self, last_attempt: "Future") -> None:
|
155 |
-
self.last_attempt = last_attempt
|
156 |
-
super().__init__(last_attempt)
|
157 |
-
|
158 |
-
def reraise(self) -> "t.NoReturn":
|
159 |
-
if self.last_attempt.failed:
|
160 |
-
raise self.last_attempt.result()
|
161 |
-
raise self
|
162 |
-
|
163 |
-
def __str__(self) -> str:
|
164 |
-
return f"{self.__class__.__name__}[{self.last_attempt}]"
|
165 |
-
|
166 |
-
|
167 |
-
class AttemptManager:
|
168 |
-
"""Manage attempt context."""
|
169 |
-
|
170 |
-
def __init__(self, retry_state: "RetryCallState"):
|
171 |
-
self.retry_state = retry_state
|
172 |
-
|
173 |
-
def __enter__(self) -> None:
|
174 |
-
pass
|
175 |
-
|
176 |
-
def __exit__(
|
177 |
-
self,
|
178 |
-
exc_type: t.Optional[t.Type[BaseException]],
|
179 |
-
exc_value: t.Optional[BaseException],
|
180 |
-
traceback: t.Optional["types.TracebackType"],
|
181 |
-
) -> t.Optional[bool]:
|
182 |
-
if exc_type is not None and exc_value is not None:
|
183 |
-
self.retry_state.set_exception((exc_type, exc_value, traceback))
|
184 |
-
return True # Swallow exception.
|
185 |
-
else:
|
186 |
-
# We don't have the result, actually.
|
187 |
-
self.retry_state.set_result(None)
|
188 |
-
return None
|
189 |
-
|
190 |
-
|
191 |
-
class BaseRetrying(ABC):
|
192 |
-
def __init__(
|
193 |
-
self,
|
194 |
-
sleep: t.Callable[[t.Union[int, float]], None] = sleep,
|
195 |
-
stop: "StopBaseT" = stop_never,
|
196 |
-
wait: "WaitBaseT" = wait_none(),
|
197 |
-
retry: "RetryBaseT" = retry_if_exception_type(),
|
198 |
-
before: t.Callable[["RetryCallState"], None] = before_nothing,
|
199 |
-
after: t.Callable[["RetryCallState"], None] = after_nothing,
|
200 |
-
before_sleep: t.Optional[t.Callable[["RetryCallState"], None]] = None,
|
201 |
-
reraise: bool = False,
|
202 |
-
retry_error_cls: t.Type[RetryError] = RetryError,
|
203 |
-
retry_error_callback: t.Optional[t.Callable[["RetryCallState"], t.Any]] = None,
|
204 |
-
):
|
205 |
-
self.sleep = sleep
|
206 |
-
self.stop = stop
|
207 |
-
self.wait = wait
|
208 |
-
self.retry = retry
|
209 |
-
self.before = before
|
210 |
-
self.after = after
|
211 |
-
self.before_sleep = before_sleep
|
212 |
-
self.reraise = reraise
|
213 |
-
self._local = threading.local()
|
214 |
-
self.retry_error_cls = retry_error_cls
|
215 |
-
self.retry_error_callback = retry_error_callback
|
216 |
-
|
217 |
-
def copy(
|
218 |
-
self,
|
219 |
-
sleep: t.Union[t.Callable[[t.Union[int, float]], None], object] = _unset,
|
220 |
-
stop: t.Union["StopBaseT", object] = _unset,
|
221 |
-
wait: t.Union["WaitBaseT", object] = _unset,
|
222 |
-
retry: t.Union[retry_base, object] = _unset,
|
223 |
-
before: t.Union[t.Callable[["RetryCallState"], None], object] = _unset,
|
224 |
-
after: t.Union[t.Callable[["RetryCallState"], None], object] = _unset,
|
225 |
-
before_sleep: t.Union[t.Optional[t.Callable[["RetryCallState"], None]], object] = _unset,
|
226 |
-
reraise: t.Union[bool, object] = _unset,
|
227 |
-
retry_error_cls: t.Union[t.Type[RetryError], object] = _unset,
|
228 |
-
retry_error_callback: t.Union[t.Optional[t.Callable[["RetryCallState"], t.Any]], object] = _unset,
|
229 |
-
) -> "BaseRetrying":
|
230 |
-
"""Copy this object with some parameters changed if needed."""
|
231 |
-
return self.__class__(
|
232 |
-
sleep=_first_set(sleep, self.sleep),
|
233 |
-
stop=_first_set(stop, self.stop),
|
234 |
-
wait=_first_set(wait, self.wait),
|
235 |
-
retry=_first_set(retry, self.retry),
|
236 |
-
before=_first_set(before, self.before),
|
237 |
-
after=_first_set(after, self.after),
|
238 |
-
before_sleep=_first_set(before_sleep, self.before_sleep),
|
239 |
-
reraise=_first_set(reraise, self.reraise),
|
240 |
-
retry_error_cls=_first_set(retry_error_cls, self.retry_error_cls),
|
241 |
-
retry_error_callback=_first_set(retry_error_callback, self.retry_error_callback),
|
242 |
-
)
|
243 |
-
|
244 |
-
def __repr__(self) -> str:
|
245 |
-
return (
|
246 |
-
f"<{self.__class__.__name__} object at 0x{id(self):x} ("
|
247 |
-
f"stop={self.stop}, "
|
248 |
-
f"wait={self.wait}, "
|
249 |
-
f"sleep={self.sleep}, "
|
250 |
-
f"retry={self.retry}, "
|
251 |
-
f"before={self.before}, "
|
252 |
-
f"after={self.after})>"
|
253 |
-
)
|
254 |
-
|
255 |
-
@property
|
256 |
-
def statistics(self) -> t.Dict[str, t.Any]:
|
257 |
-
"""Return a dictionary of runtime statistics.
|
258 |
-
|
259 |
-
This dictionary will be empty when the controller has never been
|
260 |
-
ran. When it is running or has ran previously it should have (but
|
261 |
-
may not) have useful and/or informational keys and values when
|
262 |
-
running is underway and/or completed.
|
263 |
-
|
264 |
-
.. warning:: The keys in this dictionary **should** be some what
|
265 |
-
stable (not changing), but there existence **may**
|
266 |
-
change between major releases as new statistics are
|
267 |
-
gathered or removed so before accessing keys ensure that
|
268 |
-
they actually exist and handle when they do not.
|
269 |
-
|
270 |
-
.. note:: The values in this dictionary are local to the thread
|
271 |
-
running call (so if multiple threads share the same retrying
|
272 |
-
object - either directly or indirectly) they will each have
|
273 |
-
there own view of statistics they have collected (in the
|
274 |
-
future we may provide a way to aggregate the various
|
275 |
-
statistics from each thread).
|
276 |
-
"""
|
277 |
-
try:
|
278 |
-
return self._local.statistics # type: ignore[no-any-return]
|
279 |
-
except AttributeError:
|
280 |
-
self._local.statistics = t.cast(t.Dict[str, t.Any], {})
|
281 |
-
return self._local.statistics
|
282 |
-
|
283 |
-
def wraps(self, f: WrappedFn) -> WrappedFn:
|
284 |
-
"""Wrap a function for retrying.
|
285 |
-
|
286 |
-
:param f: A function to wraps for retrying.
|
287 |
-
"""
|
288 |
-
|
289 |
-
@functools.wraps(f)
|
290 |
-
def wrapped_f(*args: t.Any, **kw: t.Any) -> t.Any:
|
291 |
-
return self(f, *args, **kw)
|
292 |
-
|
293 |
-
def retry_with(*args: t.Any, **kwargs: t.Any) -> WrappedFn:
|
294 |
-
return self.copy(*args, **kwargs).wraps(f)
|
295 |
-
|
296 |
-
wrapped_f.retry = self # type: ignore[attr-defined]
|
297 |
-
wrapped_f.retry_with = retry_with # type: ignore[attr-defined]
|
298 |
-
|
299 |
-
return wrapped_f # type: ignore[return-value]
|
300 |
-
|
301 |
-
def begin(self) -> None:
|
302 |
-
self.statistics.clear()
|
303 |
-
self.statistics["start_time"] = time.monotonic()
|
304 |
-
self.statistics["attempt_number"] = 1
|
305 |
-
self.statistics["idle_for"] = 0
|
306 |
-
|
307 |
-
def iter(self, retry_state: "RetryCallState") -> t.Union[DoAttempt, DoSleep, t.Any]: # noqa
|
308 |
-
fut = retry_state.outcome
|
309 |
-
if fut is None:
|
310 |
-
if self.before is not None:
|
311 |
-
self.before(retry_state)
|
312 |
-
return DoAttempt()
|
313 |
-
|
314 |
-
is_explicit_retry = fut.failed and isinstance(fut.exception(), TryAgain)
|
315 |
-
if not (is_explicit_retry or self.retry(retry_state)):
|
316 |
-
return fut.result()
|
317 |
-
|
318 |
-
if self.after is not None:
|
319 |
-
self.after(retry_state)
|
320 |
-
|
321 |
-
self.statistics["delay_since_first_attempt"] = retry_state.seconds_since_start
|
322 |
-
if self.stop(retry_state):
|
323 |
-
if self.retry_error_callback:
|
324 |
-
return self.retry_error_callback(retry_state)
|
325 |
-
retry_exc = self.retry_error_cls(fut)
|
326 |
-
if self.reraise:
|
327 |
-
raise retry_exc.reraise()
|
328 |
-
raise retry_exc from fut.exception()
|
329 |
-
|
330 |
-
if self.wait:
|
331 |
-
sleep = self.wait(retry_state)
|
332 |
-
else:
|
333 |
-
sleep = 0.0
|
334 |
-
retry_state.next_action = RetryAction(sleep)
|
335 |
-
retry_state.idle_for += sleep
|
336 |
-
self.statistics["idle_for"] += sleep
|
337 |
-
self.statistics["attempt_number"] += 1
|
338 |
-
|
339 |
-
if self.before_sleep is not None:
|
340 |
-
self.before_sleep(retry_state)
|
341 |
-
|
342 |
-
return DoSleep(sleep)
|
343 |
-
|
344 |
-
def __iter__(self) -> t.Generator[AttemptManager, None, None]:
|
345 |
-
self.begin()
|
346 |
-
|
347 |
-
retry_state = RetryCallState(self, fn=None, args=(), kwargs={})
|
348 |
-
while True:
|
349 |
-
do = self.iter(retry_state=retry_state)
|
350 |
-
if isinstance(do, DoAttempt):
|
351 |
-
yield AttemptManager(retry_state=retry_state)
|
352 |
-
elif isinstance(do, DoSleep):
|
353 |
-
retry_state.prepare_for_next_attempt()
|
354 |
-
self.sleep(do)
|
355 |
-
else:
|
356 |
-
break
|
357 |
-
|
358 |
-
@abstractmethod
|
359 |
-
def __call__(
|
360 |
-
self,
|
361 |
-
fn: t.Callable[..., WrappedFnReturnT],
|
362 |
-
*args: t.Any,
|
363 |
-
**kwargs: t.Any,
|
364 |
-
) -> WrappedFnReturnT:
|
365 |
-
pass
|
366 |
-
|
367 |
-
|
368 |
-
class Retrying(BaseRetrying):
|
369 |
-
"""Retrying controller."""
|
370 |
-
|
371 |
-
def __call__(
|
372 |
-
self,
|
373 |
-
fn: t.Callable[..., WrappedFnReturnT],
|
374 |
-
*args: t.Any,
|
375 |
-
**kwargs: t.Any,
|
376 |
-
) -> WrappedFnReturnT:
|
377 |
-
self.begin()
|
378 |
-
|
379 |
-
retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
|
380 |
-
while True:
|
381 |
-
do = self.iter(retry_state=retry_state)
|
382 |
-
if isinstance(do, DoAttempt):
|
383 |
-
try:
|
384 |
-
result = fn(*args, **kwargs)
|
385 |
-
except BaseException: # noqa: B902
|
386 |
-
retry_state.set_exception(sys.exc_info()) # type: ignore[arg-type]
|
387 |
-
else:
|
388 |
-
retry_state.set_result(result)
|
389 |
-
elif isinstance(do, DoSleep):
|
390 |
-
retry_state.prepare_for_next_attempt()
|
391 |
-
self.sleep(do)
|
392 |
-
else:
|
393 |
-
return do # type: ignore[no-any-return]
|
394 |
-
|
395 |
-
|
396 |
-
if sys.version_info[1] >= 9:
|
397 |
-
FutureGenericT = futures.Future[t.Any]
|
398 |
-
else:
|
399 |
-
FutureGenericT = futures.Future
|
400 |
-
|
401 |
-
|
402 |
-
class Future(FutureGenericT):
|
403 |
-
"""Encapsulates a (future or past) attempted call to a target function."""
|
404 |
-
|
405 |
-
def __init__(self, attempt_number: int) -> None:
|
406 |
-
super().__init__()
|
407 |
-
self.attempt_number = attempt_number
|
408 |
-
|
409 |
-
@property
|
410 |
-
def failed(self) -> bool:
|
411 |
-
"""Return whether a exception is being held in this future."""
|
412 |
-
return self.exception() is not None
|
413 |
-
|
414 |
-
@classmethod
|
415 |
-
def construct(cls, attempt_number: int, value: t.Any, has_exception: bool) -> "Future":
|
416 |
-
"""Construct a new Future object."""
|
417 |
-
fut = cls(attempt_number)
|
418 |
-
if has_exception:
|
419 |
-
fut.set_exception(value)
|
420 |
-
else:
|
421 |
-
fut.set_result(value)
|
422 |
-
return fut
|
423 |
-
|
424 |
-
|
425 |
-
class RetryCallState:
|
426 |
-
"""State related to a single call wrapped with Retrying."""
|
427 |
-
|
428 |
-
def __init__(
|
429 |
-
self,
|
430 |
-
retry_object: BaseRetrying,
|
431 |
-
fn: t.Optional[WrappedFn],
|
432 |
-
args: t.Any,
|
433 |
-
kwargs: t.Any,
|
434 |
-
) -> None:
|
435 |
-
#: Retry call start timestamp
|
436 |
-
self.start_time = time.monotonic()
|
437 |
-
#: Retry manager object
|
438 |
-
self.retry_object = retry_object
|
439 |
-
#: Function wrapped by this retry call
|
440 |
-
self.fn = fn
|
441 |
-
#: Arguments of the function wrapped by this retry call
|
442 |
-
self.args = args
|
443 |
-
#: Keyword arguments of the function wrapped by this retry call
|
444 |
-
self.kwargs = kwargs
|
445 |
-
|
446 |
-
#: The number of the current attempt
|
447 |
-
self.attempt_number: int = 1
|
448 |
-
#: Last outcome (result or exception) produced by the function
|
449 |
-
self.outcome: t.Optional[Future] = None
|
450 |
-
#: Timestamp of the last outcome
|
451 |
-
self.outcome_timestamp: t.Optional[float] = None
|
452 |
-
#: Time spent sleeping in retries
|
453 |
-
self.idle_for: float = 0.0
|
454 |
-
#: Next action as decided by the retry manager
|
455 |
-
self.next_action: t.Optional[RetryAction] = None
|
456 |
-
|
457 |
-
@property
|
458 |
-
def seconds_since_start(self) -> t.Optional[float]:
|
459 |
-
if self.outcome_timestamp is None:
|
460 |
-
return None
|
461 |
-
return self.outcome_timestamp - self.start_time
|
462 |
-
|
463 |
-
def prepare_for_next_attempt(self) -> None:
|
464 |
-
self.outcome = None
|
465 |
-
self.outcome_timestamp = None
|
466 |
-
self.attempt_number += 1
|
467 |
-
self.next_action = None
|
468 |
-
|
469 |
-
def set_result(self, val: t.Any) -> None:
|
470 |
-
ts = time.monotonic()
|
471 |
-
fut = Future(self.attempt_number)
|
472 |
-
fut.set_result(val)
|
473 |
-
self.outcome, self.outcome_timestamp = fut, ts
|
474 |
-
|
475 |
-
def set_exception(
|
476 |
-
self, exc_info: t.Tuple[t.Type[BaseException], BaseException, "types.TracebackType| None"]
|
477 |
-
) -> None:
|
478 |
-
ts = time.monotonic()
|
479 |
-
fut = Future(self.attempt_number)
|
480 |
-
fut.set_exception(exc_info[1])
|
481 |
-
self.outcome, self.outcome_timestamp = fut, ts
|
482 |
-
|
483 |
-
def __repr__(self) -> str:
|
484 |
-
if self.outcome is None:
|
485 |
-
result = "none yet"
|
486 |
-
elif self.outcome.failed:
|
487 |
-
exception = self.outcome.exception()
|
488 |
-
result = f"failed ({exception.__class__.__name__} {exception})"
|
489 |
-
else:
|
490 |
-
result = f"returned {self.outcome.result()}"
|
491 |
-
|
492 |
-
slept = float(round(self.idle_for, 2))
|
493 |
-
clsname = self.__class__.__name__
|
494 |
-
return f"<{clsname} {id(self)}: attempt #{self.attempt_number}; slept for {slept}; last result: {result}>"
|
495 |
-
|
496 |
-
|
497 |
-
@t.overload
|
498 |
-
def retry(func: WrappedFn) -> WrappedFn:
|
499 |
-
...
|
500 |
-
|
501 |
-
|
502 |
-
@t.overload
|
503 |
-
def retry(
|
504 |
-
sleep: t.Callable[[t.Union[int, float]], None] = sleep,
|
505 |
-
stop: "StopBaseT" = stop_never,
|
506 |
-
wait: "WaitBaseT" = wait_none(),
|
507 |
-
retry: "RetryBaseT" = retry_if_exception_type(),
|
508 |
-
before: t.Callable[["RetryCallState"], None] = before_nothing,
|
509 |
-
after: t.Callable[["RetryCallState"], None] = after_nothing,
|
510 |
-
before_sleep: t.Optional[t.Callable[["RetryCallState"], None]] = None,
|
511 |
-
reraise: bool = False,
|
512 |
-
retry_error_cls: t.Type["RetryError"] = RetryError,
|
513 |
-
retry_error_callback: t.Optional[t.Callable[["RetryCallState"], t.Any]] = None,
|
514 |
-
) -> t.Callable[[WrappedFn], WrappedFn]:
|
515 |
-
...
|
516 |
-
|
517 |
-
|
518 |
-
def retry(*dargs: t.Any, **dkw: t.Any) -> t.Any:
|
519 |
-
"""Wrap a function with a new `Retrying` object.
|
520 |
-
|
521 |
-
:param dargs: positional arguments passed to Retrying object
|
522 |
-
:param dkw: keyword arguments passed to the Retrying object
|
523 |
-
"""
|
524 |
-
# support both @retry and @retry() as valid syntax
|
525 |
-
if len(dargs) == 1 and callable(dargs[0]):
|
526 |
-
return retry()(dargs[0])
|
527 |
-
else:
|
528 |
-
|
529 |
-
def wrap(f: WrappedFn) -> WrappedFn:
|
530 |
-
if isinstance(f, retry_base):
|
531 |
-
warnings.warn(
|
532 |
-
f"Got retry_base instance ({f.__class__.__name__}) as callable argument, "
|
533 |
-
f"this will probably hang indefinitely (did you mean retry={f.__class__.__name__}(...)?)"
|
534 |
-
)
|
535 |
-
r: "BaseRetrying"
|
536 |
-
if iscoroutinefunction(f):
|
537 |
-
r = AsyncRetrying(*dargs, **dkw)
|
538 |
-
elif tornado and hasattr(tornado.gen, "is_coroutine_function") and tornado.gen.is_coroutine_function(f):
|
539 |
-
r = TornadoRetrying(*dargs, **dkw)
|
540 |
-
else:
|
541 |
-
r = Retrying(*dargs, **dkw)
|
542 |
-
|
543 |
-
return r.wraps(f)
|
544 |
-
|
545 |
-
return wrap
|
546 |
-
|
547 |
-
|
548 |
-
from pip._vendor.tenacity._asyncio import AsyncRetrying # noqa:E402,I100
|
549 |
-
|
550 |
-
if tornado:
|
551 |
-
from pip._vendor.tenacity.tornadoweb import TornadoRetrying
|
552 |
-
|
553 |
-
|
554 |
-
__all__ = [
|
555 |
-
"retry_base",
|
556 |
-
"retry_all",
|
557 |
-
"retry_always",
|
558 |
-
"retry_any",
|
559 |
-
"retry_if_exception",
|
560 |
-
"retry_if_exception_type",
|
561 |
-
"retry_if_exception_cause_type",
|
562 |
-
"retry_if_not_exception_type",
|
563 |
-
"retry_if_not_result",
|
564 |
-
"retry_if_result",
|
565 |
-
"retry_never",
|
566 |
-
"retry_unless_exception_type",
|
567 |
-
"retry_if_exception_message",
|
568 |
-
"retry_if_not_exception_message",
|
569 |
-
"sleep",
|
570 |
-
"sleep_using_event",
|
571 |
-
"stop_after_attempt",
|
572 |
-
"stop_after_delay",
|
573 |
-
"stop_all",
|
574 |
-
"stop_any",
|
575 |
-
"stop_never",
|
576 |
-
"stop_when_event_set",
|
577 |
-
"wait_chain",
|
578 |
-
"wait_combine",
|
579 |
-
"wait_exponential",
|
580 |
-
"wait_fixed",
|
581 |
-
"wait_incrementing",
|
582 |
-
"wait_none",
|
583 |
-
"wait_random",
|
584 |
-
"wait_random_exponential",
|
585 |
-
"wait_full_jitter",
|
586 |
-
"wait_exponential_jitter",
|
587 |
-
"before_log",
|
588 |
-
"before_nothing",
|
589 |
-
"after_log",
|
590 |
-
"after_nothing",
|
591 |
-
"before_sleep_log",
|
592 |
-
"before_sleep_nothing",
|
593 |
-
"retry",
|
594 |
-
"WrappedFn",
|
595 |
-
"TryAgain",
|
596 |
-
"NO_RESULT",
|
597 |
-
"DoAttempt",
|
598 |
-
"DoSleep",
|
599 |
-
"BaseAction",
|
600 |
-
"RetryAction",
|
601 |
-
"RetryError",
|
602 |
-
"AttemptManager",
|
603 |
-
"BaseRetrying",
|
604 |
-
"Retrying",
|
605 |
-
"Future",
|
606 |
-
"RetryCallState",
|
607 |
-
"AsyncRetrying",
|
608 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Borda90/Titanic_Esp/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Titanic_Esp
|
3 |
-
emoji: 📉
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 2.8.13
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/__init__.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
2 |
-
|
3 |
-
from .utils.env import setup_environment
|
4 |
-
|
5 |
-
setup_environment()
|
6 |
-
|
7 |
-
|
8 |
-
# This line will be programatically read/write by setup.py.
|
9 |
-
# Leave them at the bottom of this file and don't touch them.
|
10 |
-
__version__ = "0.1.1"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/nms.py
DELETED
@@ -1,146 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
3 |
-
|
4 |
-
import torch
|
5 |
-
from torchvision.ops import boxes as box_ops
|
6 |
-
from torchvision.ops import nms # BC-compat
|
7 |
-
|
8 |
-
|
9 |
-
def batched_nms(boxes, scores, idxs, iou_threshold):
|
10 |
-
"""
|
11 |
-
Same as torchvision.ops.boxes.batched_nms, but safer.
|
12 |
-
"""
|
13 |
-
assert boxes.shape[-1] == 4
|
14 |
-
# TODO may need better strategy.
|
15 |
-
# Investigate after having a fully-cuda NMS op.
|
16 |
-
if len(boxes) < 40000:
|
17 |
-
return box_ops.batched_nms(boxes, scores, idxs, iou_threshold)
|
18 |
-
|
19 |
-
result_mask = scores.new_zeros(scores.size(), dtype=torch.bool)
|
20 |
-
for id in torch.unique(idxs).cpu().tolist():
|
21 |
-
mask = (idxs == id).nonzero().view(-1)
|
22 |
-
keep = nms(boxes[mask], scores[mask], iou_threshold)
|
23 |
-
result_mask[mask[keep]] = True
|
24 |
-
keep = result_mask.nonzero().view(-1)
|
25 |
-
keep = keep[scores[keep].argsort(descending=True)]
|
26 |
-
return keep
|
27 |
-
|
28 |
-
|
29 |
-
# Note: this function (nms_rotated) might be moved into
|
30 |
-
# torchvision/ops/boxes.py in the future
|
31 |
-
def nms_rotated(boxes, scores, iou_threshold):
|
32 |
-
"""
|
33 |
-
Performs non-maximum suppression (NMS) on the rotated boxes according
|
34 |
-
to their intersection-over-union (IoU).
|
35 |
-
|
36 |
-
Rotated NMS iteratively removes lower scoring rotated boxes which have an
|
37 |
-
IoU greater than iou_threshold with another (higher scoring) rotated box.
|
38 |
-
|
39 |
-
Note that RotatedBox (5, 3, 4, 2, -90) covers exactly the same region as
|
40 |
-
RotatedBox (5, 3, 4, 2, 90) does, and their IoU will be 1. However, they
|
41 |
-
can be representing completely different objects in certain tasks, e.g., OCR.
|
42 |
-
|
43 |
-
As for the question of whether rotated-NMS should treat them as faraway boxes
|
44 |
-
even though their IOU is 1, it depends on the application and/or ground truth annotation.
|
45 |
-
|
46 |
-
As an extreme example, consider a single character v and the square box around it.
|
47 |
-
|
48 |
-
If the angle is 0 degree, the object (text) would be read as 'v';
|
49 |
-
|
50 |
-
If the angle is 90 degrees, the object (text) would become '>';
|
51 |
-
|
52 |
-
If the angle is 180 degrees, the object (text) would become '^';
|
53 |
-
|
54 |
-
If the angle is 270/-90 degrees, the object (text) would become '<'
|
55 |
-
|
56 |
-
All of these cases have IoU of 1 to each other, and rotated NMS that only
|
57 |
-
uses IoU as criterion would only keep one of them with the highest score -
|
58 |
-
which, practically, still makes sense in most cases because typically
|
59 |
-
only one of theses orientations is the correct one. Also, it does not matter
|
60 |
-
as much if the box is only used to classify the object (instead of transcribing
|
61 |
-
them with a sequential OCR recognition model) later.
|
62 |
-
|
63 |
-
On the other hand, when we use IoU to filter proposals that are close to the
|
64 |
-
ground truth during training, we should definitely take the angle into account if
|
65 |
-
we know the ground truth is labeled with the strictly correct orientation (as in,
|
66 |
-
upside-down words are annotated with -180 degrees even though they can be covered
|
67 |
-
with a 0/90/-90 degree box, etc.)
|
68 |
-
|
69 |
-
The way the original dataset is annotated also matters. For example, if the dataset
|
70 |
-
is a 4-point polygon dataset that does not enforce ordering of vertices/orientation,
|
71 |
-
we can estimate a minimum rotated bounding box to this polygon, but there's no way
|
72 |
-
we can tell the correct angle with 100% confidence (as shown above, there could be 4 different
|
73 |
-
rotated boxes, with angles differed by 90 degrees to each other, covering the exactly
|
74 |
-
same region). In that case we have to just use IoU to determine the box
|
75 |
-
proximity (as many detection benchmarks (even for text) do) unless there're other
|
76 |
-
assumptions we can make (like width is always larger than height, or the object is not
|
77 |
-
rotated by more than 90 degrees CCW/CW, etc.)
|
78 |
-
|
79 |
-
In summary, not considering angles in rotated NMS seems to be a good option for now,
|
80 |
-
but we should be aware of its implications.
|
81 |
-
|
82 |
-
Args:
|
83 |
-
boxes (Tensor[N, 5]): Rotated boxes to perform NMS on. They are expected to be in
|
84 |
-
(x_center, y_center, width, height, angle_degrees) format.
|
85 |
-
scores (Tensor[N]): Scores for each one of the rotated boxes
|
86 |
-
iou_threshold (float): Discards all overlapping rotated boxes with IoU < iou_threshold
|
87 |
-
|
88 |
-
Returns:
|
89 |
-
keep (Tensor): int64 tensor with the indices of the elements that have been kept
|
90 |
-
by Rotated NMS, sorted in decreasing order of scores
|
91 |
-
"""
|
92 |
-
from detectron2 import _C
|
93 |
-
|
94 |
-
return _C.nms_rotated(boxes, scores, iou_threshold)
|
95 |
-
|
96 |
-
|
97 |
-
# Note: this function (batched_nms_rotated) might be moved into
|
98 |
-
# torchvision/ops/boxes.py in the future
|
99 |
-
def batched_nms_rotated(boxes, scores, idxs, iou_threshold):
|
100 |
-
"""
|
101 |
-
Performs non-maximum suppression in a batched fashion.
|
102 |
-
|
103 |
-
Each index value correspond to a category, and NMS
|
104 |
-
will not be applied between elements of different categories.
|
105 |
-
|
106 |
-
Args:
|
107 |
-
boxes (Tensor[N, 5]):
|
108 |
-
boxes where NMS will be performed. They
|
109 |
-
are expected to be in (x_ctr, y_ctr, width, height, angle_degrees) format
|
110 |
-
scores (Tensor[N]):
|
111 |
-
scores for each one of the boxes
|
112 |
-
idxs (Tensor[N]):
|
113 |
-
indices of the categories for each one of the boxes.
|
114 |
-
iou_threshold (float):
|
115 |
-
discards all overlapping boxes
|
116 |
-
with IoU < iou_threshold
|
117 |
-
|
118 |
-
Returns:
|
119 |
-
Tensor:
|
120 |
-
int64 tensor with the indices of the elements that have been kept
|
121 |
-
by NMS, sorted in decreasing order of scores
|
122 |
-
"""
|
123 |
-
assert boxes.shape[-1] == 5
|
124 |
-
|
125 |
-
if boxes.numel() == 0:
|
126 |
-
return torch.empty((0,), dtype=torch.int64, device=boxes.device)
|
127 |
-
# Strategy: in order to perform NMS independently per class,
|
128 |
-
# we add an offset to all the boxes. The offset is dependent
|
129 |
-
# only on the class idx, and is large enough so that boxes
|
130 |
-
# from different classes do not overlap
|
131 |
-
|
132 |
-
# Note that batched_nms in torchvision/ops/boxes.py only uses max_coordinate,
|
133 |
-
# which won't handle negative coordinates correctly.
|
134 |
-
# Here by using min_coordinate we can make sure the negative coordinates are
|
135 |
-
# correctly handled.
|
136 |
-
max_coordinate = (
|
137 |
-
torch.max(boxes[:, 0], boxes[:, 1]) + torch.max(boxes[:, 2], boxes[:, 3]) / 2
|
138 |
-
).max()
|
139 |
-
min_coordinate = (
|
140 |
-
torch.min(boxes[:, 0], boxes[:, 1]) - torch.min(boxes[:, 2], boxes[:, 3]) / 2
|
141 |
-
).min()
|
142 |
-
offsets = idxs.to(boxes) * (max_coordinate - min_coordinate + 1)
|
143 |
-
boxes_for_nms = boxes.clone() # avoid modifying the original values in boxes
|
144 |
-
boxes_for_nms[:, :2] += offsets[:, None]
|
145 |
-
keep = nms_rotated(boxes_for_nms, scores, iou_threshold)
|
146 |
-
return keep
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/inner_product.h
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
|
18 |
-
#pragma once
|
19 |
-
|
20 |
-
#include <thrust/detail/config.h>
|
21 |
-
#include <thrust/system/detail/generic/tag.h>
|
22 |
-
|
23 |
-
namespace thrust
|
24 |
-
{
|
25 |
-
namespace system
|
26 |
-
{
|
27 |
-
namespace detail
|
28 |
-
{
|
29 |
-
namespace generic
|
30 |
-
{
|
31 |
-
|
32 |
-
|
33 |
-
template<typename DerivedPolicy, typename InputIterator1, typename InputIterator2, typename OutputType>
|
34 |
-
__host__ __device__
|
35 |
-
OutputType inner_product(thrust::execution_policy<DerivedPolicy> &exec,
|
36 |
-
InputIterator1 first1,
|
37 |
-
InputIterator1 last1,
|
38 |
-
InputIterator2 first2,
|
39 |
-
OutputType init);
|
40 |
-
|
41 |
-
|
42 |
-
template<typename DerivedPolicy, typename InputIterator1, typename InputIterator2, typename OutputType, typename BinaryFunction1, typename BinaryFunction2>
|
43 |
-
__host__ __device__
|
44 |
-
OutputType inner_product(thrust::execution_policy<DerivedPolicy> &exec,
|
45 |
-
InputIterator1 first1,
|
46 |
-
InputIterator1 last1,
|
47 |
-
InputIterator2 first2,
|
48 |
-
OutputType init,
|
49 |
-
BinaryFunction1 binary_op1,
|
50 |
-
BinaryFunction2 binary_op2);
|
51 |
-
|
52 |
-
|
53 |
-
} // end namespace generic
|
54 |
-
} // end namespace detail
|
55 |
-
} // end namespace system
|
56 |
-
} // end namespace thrust
|
57 |
-
|
58 |
-
#include <thrust/system/detail/generic/inner_product.inl>
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/inner_product.h
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// this system inherits inner_product
|
22 |
-
#include <thrust/system/cpp/detail/inner_product.h>
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/MonoScene/monoscene/app.py
DELETED
@@ -1,138 +0,0 @@
|
|
1 |
-
from pytorch_lightning import Trainer
|
2 |
-
from monoscene.models.monoscene import MonoScene
|
3 |
-
from monoscene.data.NYU.nyu_dm import NYUDataModule
|
4 |
-
from monoscene.data.semantic_kitti.kitti_dm import KittiDataModule
|
5 |
-
from monoscene.data.kitti_360.kitti_360_dm import Kitti360DataModule
|
6 |
-
# import hydra
|
7 |
-
from omegaconf import DictConfig
|
8 |
-
import torch
|
9 |
-
import numpy as np
|
10 |
-
import os
|
11 |
-
from hydra.utils import get_original_cwd
|
12 |
-
import gradio as gr
|
13 |
-
import numpy as np
|
14 |
-
import plotly.express as px
|
15 |
-
import pandas as pd
|
16 |
-
|
17 |
-
|
18 |
-
# @hydra.main(config_name="../config/monoscene.yaml")
|
19 |
-
def plot(input_img):
|
20 |
-
torch.set_grad_enabled(False)
|
21 |
-
|
22 |
-
# Setup dataloader
|
23 |
-
# if config.dataset == "kitti" or config.dataset == "kitti_360":
|
24 |
-
feature = 64
|
25 |
-
project_scale = 2
|
26 |
-
full_scene_size = (256, 256, 32)
|
27 |
-
|
28 |
-
# if config.dataset == "kitti":
|
29 |
-
# data_module = KittiDataModule(
|
30 |
-
# root=config.kitti_root,
|
31 |
-
# preprocess_root=config.kitti_preprocess_root,
|
32 |
-
# frustum_size=config.frustum_size,
|
33 |
-
# batch_size=int(config.batch_size / config.n_gpus),
|
34 |
-
# num_workers=int(config.num_workers_per_gpu * config.n_gpus),
|
35 |
-
# )
|
36 |
-
# data_module.setup()
|
37 |
-
# data_loader = data_module.val_dataloader()
|
38 |
-
# # data_loader = data_module.test_dataloader() # use this if you want to infer on test set
|
39 |
-
# else:
|
40 |
-
# data_module = Kitti360DataModule(
|
41 |
-
# root=config.kitti_360_root,
|
42 |
-
# sequences=[config.kitti_360_sequence],
|
43 |
-
# n_scans=2000,
|
44 |
-
# batch_size=1,
|
45 |
-
# num_workers=3,
|
46 |
-
# )
|
47 |
-
# data_module.setup()
|
48 |
-
# data_loader = data_module.dataloader()
|
49 |
-
|
50 |
-
# elif config.dataset == "NYU":
|
51 |
-
# project_scale = 1
|
52 |
-
# feature = 200
|
53 |
-
# full_scene_size = (60, 36, 60)
|
54 |
-
# data_module = NYUDataModule(
|
55 |
-
# root=config.NYU_root,
|
56 |
-
# preprocess_root=config.NYU_preprocess_root,
|
57 |
-
# n_relations=config.n_relations,
|
58 |
-
# frustum_size=config.frustum_size,
|
59 |
-
# batch_size=int(config.batch_size / config.n_gpus),
|
60 |
-
# num_workers=int(config.num_workers_per_gpu * config.n_gpus),
|
61 |
-
# )
|
62 |
-
# data_module.setup()
|
63 |
-
# data_loader = data_module.val_dataloader()
|
64 |
-
# # data_loader = data_module.test_dataloader() # use this if you want to infer on test set
|
65 |
-
# else:
|
66 |
-
# print("dataset not support")
|
67 |
-
|
68 |
-
# Load pretrained models
|
69 |
-
# if config.dataset == "NYU":
|
70 |
-
# model_path = os.path.join(
|
71 |
-
# get_original_cwd(), "trained_models", "monoscene_nyu.ckpt"
|
72 |
-
# )
|
73 |
-
# else:
|
74 |
-
# model_path = os.path.join(
|
75 |
-
# get_original_cwd(), "trained_models", "monoscene_kitti.ckpt"
|
76 |
-
# )
|
77 |
-
model_path = "trained_models/monoscene_kitti.ckpt"
|
78 |
-
|
79 |
-
model = MonoScene.load_from_checkpoint(
|
80 |
-
model_path,
|
81 |
-
feature=feature,
|
82 |
-
project_scale=project_scale,
|
83 |
-
fp_loss=False,
|
84 |
-
full_scene_size=full_scene_size,
|
85 |
-
)
|
86 |
-
model.cuda()
|
87 |
-
model.eval()
|
88 |
-
|
89 |
-
print(input_img.shape)
|
90 |
-
|
91 |
-
x = np.arange(12).reshape(4, 3) / 12
|
92 |
-
data = pd.DataFrame(data=x, columns=['x', 'y', 'z'])
|
93 |
-
fig = px.scatter_3d(data, x="x", y="y", z="z")
|
94 |
-
return fig
|
95 |
-
|
96 |
-
demo = gr.Interface(plot, gr.Image(shape=(200, 200)), gr.Plot())
|
97 |
-
demo.launch()
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
# Save prediction and additional data
|
102 |
-
# to draw the viewing frustum and remove scene outside the room for NYUv2
|
103 |
-
# output_path = os.path.join(config.output_path, config.dataset)
|
104 |
-
# with torch.no_grad():
|
105 |
-
# for batch in tqdm(data_loader):
|
106 |
-
# batch["img"] = batch["img"].cuda()
|
107 |
-
# pred = model(batch)
|
108 |
-
# y_pred = torch.softmax(pred["ssc_logit"], dim=1).detach().cpu().numpy()
|
109 |
-
# y_pred = np.argmax(y_pred, axis=1)
|
110 |
-
# for i in range(config.batch_size):
|
111 |
-
# out_dict = {"y_pred": y_pred[i].astype(np.uint16)}
|
112 |
-
# if "target" in batch:
|
113 |
-
# out_dict["target"] = (
|
114 |
-
# batch["target"][i].detach().cpu().numpy().astype(np.uint16)
|
115 |
-
# )
|
116 |
-
|
117 |
-
# if config.dataset == "NYU":
|
118 |
-
# write_path = output_path
|
119 |
-
# filepath = os.path.join(write_path, batch["name"][i] + ".pkl")
|
120 |
-
# out_dict["cam_pose"] = batch["cam_pose"][i].detach().cpu().numpy()
|
121 |
-
# out_dict["vox_origin"] = (
|
122 |
-
# batch["vox_origin"][i].detach().cpu().numpy()
|
123 |
-
# )
|
124 |
-
# else:
|
125 |
-
# write_path = os.path.join(output_path, batch["sequence"][i])
|
126 |
-
# filepath = os.path.join(write_path, batch["frame_id"][i] + ".pkl")
|
127 |
-
# out_dict["fov_mask_1"] = (
|
128 |
-
# batch["fov_mask_1"][i].detach().cpu().numpy()
|
129 |
-
# )
|
130 |
-
# out_dict["cam_k"] = batch["cam_k"][i].detach().cpu().numpy()
|
131 |
-
# out_dict["T_velo_2_cam"] = (
|
132 |
-
# batch["T_velo_2_cam"][i].detach().cpu().numpy()
|
133 |
-
# )
|
134 |
-
|
135 |
-
# os.makedirs(write_path, exist_ok=True)
|
136 |
-
# with open(filepath, "wb") as handle:
|
137 |
-
# pickle.dump(out_dict, handle)
|
138 |
-
# print("wrote to", filepath)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|