Commit
·
33e13ec
1
Parent(s):
457fc41
Update parquet files (step 37 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/101-5/gpt4free/g4f/.v1/testing/test_main.py +0 -27
- spaces/17TheWord/RealESRGAN/realesrgan/archs/discriminator_arch.py +0 -67
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Aayirathil Oruvan 2010 HD Full Movie Uncut Version with English Subtitles - The Unforgettable Saga of Love and War.md +0 -99
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Email Extractor 14 Serial Key.md +0 -142
- spaces/1gistliPinn/ChatGPT4/Examples/Alsat m live seriali me fal Nj serial q trajton tema si dashuria tradhtia hakmarrja dhe falja.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Ap Calculus Textbook Finney Pdf Download Master Calculus with Thomas and Finneys Classic Textbook.md +0 -5
- spaces/1gistliPinn/ChatGPT4/Examples/Candydoll.tv Laura-B Set Updated !FREE!.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Dpwh Blue Book Volume 2 Pdf Free Download HOT!.md +0 -6
- spaces/1toTree/lora_test/ppdiffusers/__init__.py +0 -162
- spaces/2023Liu2023/bingo/src/lib/isomorphic/index.ts +0 -17
- spaces/7hao/bingo/src/lib/bots/bing/tts.ts +0 -82
- spaces/801artistry/RVC801/demucs/parser.py +0 -244
- spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/models/utils.py +0 -132
- spaces/Afrihub/README/README.md +0 -10
- spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/reflection.py +0 -128
- spaces/AgentVerse/agentVerse/agentverse/environments/tasksolving_env/rules/decision_maker/central.py +0 -56
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/anchor-plugin.d.ts +0 -9
- spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/criteria/id_loss.py +0 -39
- spaces/Amrrs/DragGan-Inversion/legacy.py +0 -369
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/textual_inversion/textual_inversion_flax.py +0 -681
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/controlnet/pipeline_controlnet.py +0 -1009
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/dynamic_modules_utils.py +0 -456
- spaces/Andy1621/uniformer_image_detection/configs/_base_/datasets/lvis_v1_instance.py +0 -23
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/utils/env.py +0 -95
- spaces/Araloak/fz/chat_completion.py +0 -62
- spaces/Artrajz/vits-simple-api/templates/index.html +0 -535
- spaces/AsakuraMizu/moe-tts/app.py +0 -320
- spaces/Ash123/stable-diffusion-nano/README.md +0 -16
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/idna/uts46data.py +0 -0
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/packaging/markers.py +0 -304
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/highlighter.py +0 -232
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/actions.py +0 -207
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/modeling/test_mmdet.py +0 -186
- spaces/Benson/text-generation/Examples/Cmo Hacer Un Android Sin Verificacin.md +0 -37
- spaces/Benson/text-generation/Examples/Descargar Chicos Stumble 2023 Apk.md +0 -75
- spaces/Benson/text-generation/Examples/Descargar Colegial Pelea Sin Sensor Apk.md +0 -74
- spaces/Benson/text-generation/Examples/Descargar Dungeon Quest Mod Apk.md +0 -64
- spaces/BetterAPI/BetterChat_new/src/lib/server/abortedGenerations.ts +0 -29
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/locators.py +0 -1300
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/util/proxy.py +0 -57
- spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/connectionpool.py +0 -1110
- spaces/CVPR/LIVE/thrust/thrust/device_new_allocator.h +0 -172
- spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/count.h +0 -44
- spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/unique.h +0 -78
- spaces/CVPR/time/README.md +0 -22
- spaces/Cecil8352/vits-models/transforms.py +0 -193
- spaces/Chomkwoy/Nilkessye/cpool_new/src/top_pool.cpp +0 -91
- spaces/CofAI/CalculatorUI/README.md +0 -10
- spaces/CofAI/picgen/README.md +0 -14
- spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/logger.py +0 -133
spaces/101-5/gpt4free/g4f/.v1/testing/test_main.py
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
import gpt4free
|
2 |
-
from gpt4free import Provider, quora, forefront
|
3 |
-
|
4 |
-
# usage You
|
5 |
-
response = gpt4free.Completion.create(Provider.You, prompt='Write a poem on Lionel Messi')
|
6 |
-
print(response)
|
7 |
-
|
8 |
-
# usage Poe
|
9 |
-
token = quora.Account.create(logging=False)
|
10 |
-
response = gpt4free.Completion.create(Provider.Poe, prompt='Write a poem on Lionel Messi', token=token, model='ChatGPT')
|
11 |
-
print(response)
|
12 |
-
|
13 |
-
# usage forefront
|
14 |
-
token = forefront.Account.create(logging=False)
|
15 |
-
response = gpt4free.Completion.create(
|
16 |
-
Provider.ForeFront, prompt='Write a poem on Lionel Messi', model='gpt-4', token=token
|
17 |
-
)
|
18 |
-
print(response)
|
19 |
-
print(f'END')
|
20 |
-
|
21 |
-
# usage theb
|
22 |
-
response = gpt4free.Completion.create(Provider.Theb, prompt='Write a poem on Lionel Messi')
|
23 |
-
print(response)
|
24 |
-
|
25 |
-
# usage cocalc
|
26 |
-
response = gpt4free.Completion.create(Provider.CoCalc, prompt='Write a poem on Lionel Messi', cookie_input='')
|
27 |
-
print(response)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/17TheWord/RealESRGAN/realesrgan/archs/discriminator_arch.py
DELETED
@@ -1,67 +0,0 @@
|
|
1 |
-
from basicsr.utils.registry import ARCH_REGISTRY
|
2 |
-
from torch import nn as nn
|
3 |
-
from torch.nn import functional as F
|
4 |
-
from torch.nn.utils import spectral_norm
|
5 |
-
|
6 |
-
|
7 |
-
@ARCH_REGISTRY.register()
|
8 |
-
class UNetDiscriminatorSN(nn.Module):
|
9 |
-
"""Defines a U-Net discriminator with spectral normalization (SN)
|
10 |
-
|
11 |
-
It is used in Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data.
|
12 |
-
|
13 |
-
Arg:
|
14 |
-
num_in_ch (int): Channel number of inputs. Default: 3.
|
15 |
-
num_feat (int): Channel number of base intermediate features. Default: 64.
|
16 |
-
skip_connection (bool): Whether to use skip connections between U-Net. Default: True.
|
17 |
-
"""
|
18 |
-
|
19 |
-
def __init__(self, num_in_ch, num_feat=64, skip_connection=True):
|
20 |
-
super(UNetDiscriminatorSN, self).__init__()
|
21 |
-
self.skip_connection = skip_connection
|
22 |
-
norm = spectral_norm
|
23 |
-
# the first convolution
|
24 |
-
self.conv0 = nn.Conv2d(num_in_ch, num_feat, kernel_size=3, stride=1, padding=1)
|
25 |
-
# downsample
|
26 |
-
self.conv1 = norm(nn.Conv2d(num_feat, num_feat * 2, 4, 2, 1, bias=False))
|
27 |
-
self.conv2 = norm(nn.Conv2d(num_feat * 2, num_feat * 4, 4, 2, 1, bias=False))
|
28 |
-
self.conv3 = norm(nn.Conv2d(num_feat * 4, num_feat * 8, 4, 2, 1, bias=False))
|
29 |
-
# upsample
|
30 |
-
self.conv4 = norm(nn.Conv2d(num_feat * 8, num_feat * 4, 3, 1, 1, bias=False))
|
31 |
-
self.conv5 = norm(nn.Conv2d(num_feat * 4, num_feat * 2, 3, 1, 1, bias=False))
|
32 |
-
self.conv6 = norm(nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1, bias=False))
|
33 |
-
# extra convolutions
|
34 |
-
self.conv7 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False))
|
35 |
-
self.conv8 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False))
|
36 |
-
self.conv9 = nn.Conv2d(num_feat, 1, 3, 1, 1)
|
37 |
-
|
38 |
-
def forward(self, x):
|
39 |
-
# downsample
|
40 |
-
x0 = F.leaky_relu(self.conv0(x), negative_slope=0.2, inplace=True)
|
41 |
-
x1 = F.leaky_relu(self.conv1(x0), negative_slope=0.2, inplace=True)
|
42 |
-
x2 = F.leaky_relu(self.conv2(x1), negative_slope=0.2, inplace=True)
|
43 |
-
x3 = F.leaky_relu(self.conv3(x2), negative_slope=0.2, inplace=True)
|
44 |
-
|
45 |
-
# upsample
|
46 |
-
x3 = F.interpolate(x3, scale_factor=2, mode='bilinear', align_corners=False)
|
47 |
-
x4 = F.leaky_relu(self.conv4(x3), negative_slope=0.2, inplace=True)
|
48 |
-
|
49 |
-
if self.skip_connection:
|
50 |
-
x4 = x4 + x2
|
51 |
-
x4 = F.interpolate(x4, scale_factor=2, mode='bilinear', align_corners=False)
|
52 |
-
x5 = F.leaky_relu(self.conv5(x4), negative_slope=0.2, inplace=True)
|
53 |
-
|
54 |
-
if self.skip_connection:
|
55 |
-
x5 = x5 + x1
|
56 |
-
x5 = F.interpolate(x5, scale_factor=2, mode='bilinear', align_corners=False)
|
57 |
-
x6 = F.leaky_relu(self.conv6(x5), negative_slope=0.2, inplace=True)
|
58 |
-
|
59 |
-
if self.skip_connection:
|
60 |
-
x6 = x6 + x0
|
61 |
-
|
62 |
-
# extra convolutions
|
63 |
-
out = F.leaky_relu(self.conv7(x6), negative_slope=0.2, inplace=True)
|
64 |
-
out = F.leaky_relu(self.conv8(out), negative_slope=0.2, inplace=True)
|
65 |
-
out = self.conv9(out)
|
66 |
-
|
67 |
-
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Aayirathil Oruvan 2010 HD Full Movie Uncut Version with English Subtitles - The Unforgettable Saga of Love and War.md
DELETED
@@ -1,99 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Aayirathil Oruvan 2010 HD Full Movie Uncut Version with English Subtitles</h1>
|
3 |
-
<p>If you are a fan of Tamil cinema, you might have heard of Aayirathil Oruvan, a 2010 epic fantasy adventure film directed by Selvaraghavan. The film was a commercial failure when it was released, but over the years, it has gained a cult following among the audience and critics alike. The film is praised for its ambitious vision, original story, stunning visuals, and haunting music. It is also known for its controversial and violent scenes, which were censored in the theatrical release. However, in 2021, the director released an uncut version of the film on an OTT platform, which restored the original footage and added more depth and clarity to the film. In this article, we will explore what makes Aayirathil Oruvan a masterpiece of Tamil cinema, and why you should watch the uncut version with English subtitles.</p>
|
4 |
-
<h2>Introduction</h2>
|
5 |
-
<h3>What is Aayirathil Oruvan?</h3>
|
6 |
-
<p>Aayirathil Oruvan (English: One Man in a Thousand) is a 2010 Tamil-language film written and directed by Selvaraghavan, starring Karthi, Reemma Sen, Andrea Jeremiah, Parthiban, and Pratap Pothan. The film is loosely based on the historical novel Ponniyin Selvan by Kalki Krishnamurthy, which narrates the story of the Chola dynasty in medieval India. However, Selvaraghavan adds his own twist to the tale by setting it in a fictional world where the Chola kingdom has been lost for centuries and is rediscovered by a group of explorers in the present day. The film blends elements of fantasy, adventure, horror, mystery, and romance to create a unique cinematic experience.</p>
|
7 |
-
<h2>aayirathil oruvan 2010 hd full movie uncut version with english subtitles</h2><br /><p><b><b>Download</b> <a href="https://byltly.com/2uKwsc">https://byltly.com/2uKwsc</a></b></p><br /><br />
|
8 |
-
<h3>Why is it a cult classic?</h3>
|
9 |
-
<p>Aayirathil Oruvan was one of the most expensive and ambitious films ever made in Tamil cinema at that time. It took more than three years to complete, with extensive research, pre-production, shooting, and post-production. The film was shot in various locations across India, Thailand, Vietnam, and Jordan. The film also featured elaborate sets, costumes, props, and special effects to create a realistic and immersive world. The film had a grand musical score composed by G.V. Prakash Kumar, with lyrics by Vairamuthu. The film also had some of the most talented actors in Tamil cinema who gave memorable performances.</p>
|
10 |
-
<p>and lack of commercial appeal. The film also faced several controversies and legal issues, such as plagiarism allegations, censorship cuts, and distribution problems. The film was a box office flop and was soon forgotten by the mainstream audience.</p>
|
11 |
-
<p>However, over the years, the film started to gain a loyal fan base who appreciated the film for its artistic vision, innovative storytelling, and technical brilliance. The film also received positive reviews from some critics who re-evaluated the film and recognized its merits. The film became a cult classic and a benchmark for Tamil cinema. The film also inspired many filmmakers and artists who were influenced by its style and themes. The film also generated a lot of curiosity and speculation among the fans who wanted to know more about the film's world and characters.</p>
|
12 |
-
<h3>What is the uncut version and why is it different?</h3>
|
13 |
-
<p>The uncut version of Aayirathil Oruvan is the director's original cut of the film, which was never released in theatres due to censorship issues. The uncut version has more than 30 minutes of additional footage that was either trimmed or deleted from the theatrical release. The uncut version also has improved sound design, color grading, and subtitles. The uncut version reveals more details and explanations about the film's plot, characters, and themes. The uncut version also has more graphic and disturbing scenes that showcase the brutality and horror of the film's world. The uncut version is considered to be the definitive version of the film by the director and the fans.</p>
|
14 |
-
<p>The uncut version of Aayirathil Oruvan was released on an OTT platform in January 2021, coinciding with the 11th anniversary of the film's release. The uncut version received an overwhelming response from the fans and critics who watched it for the first time or revisited it after a long time. The uncut version also attracted new viewers who were curious about the film's hype and reputation. The uncut version created a huge buzz on social media and online forums, where people discussed and debated about the film's various aspects. The uncut version also received appreciation from celebrities and industry insiders who praised the film's vision and quality.</p>
|
15 |
-
<h2>Plot Summary</h2>
|
16 |
-
<h3>The expedition to find the lost Chola kingdom</h3>
|
17 |
-
<p>The film begins with a prologue that shows how the Chola kingdom was attacked by the Pandya kingdom in 1279 CE. The Chola king and his people fled to a secret location to escape from their enemies. However, their whereabouts were never known to anyone.</p>
|
18 |
-
<p>watch aayirathil oruvan 2010 hd uncut movie online with subtitles<br />
|
19 |
-
aayirathil oruvan 2010 full hd movie download uncut version english subs<br />
|
20 |
-
how to stream aayirathil oruvan 2010 hd movie uncut version with subtitles<br />
|
21 |
-
aayirathil oruvan 2010 hd uncut movie review and ratings with english subtitles<br />
|
22 |
-
where to find aayirathil oruvan 2010 hd full movie uncut version with subs<br />
|
23 |
-
aayirathil oruvan 2010 hd movie uncut version english subtitles cast and crew<br />
|
24 |
-
aayirathil oruvan 2010 hd full movie uncut version with subtitles plot and summary<br />
|
25 |
-
aayirathil oruvan 2010 hd movie uncut version with english subs trailer and teaser<br />
|
26 |
-
aayirathil oruvan 2010 hd full movie uncut version with subtitles songs and soundtrack<br />
|
27 |
-
aayirathil oruvan 2010 hd movie uncut version english subtitles trivia and facts<br />
|
28 |
-
aayirathil oruvan 2010 hd full movie uncut version with subtitles awards and nominations<br />
|
29 |
-
aayirathil oruvan 2010 hd movie uncut version with english subs box office and budget<br />
|
30 |
-
aayirathil oruvan 2010 hd full movie uncut version with subtitles behind the scenes and making<br />
|
31 |
-
aayirathil oruvan 2010 hd movie uncut version english subtitles analysis and interpretation<br />
|
32 |
-
aayirathil oruvan 2010 hd full movie uncut version with subtitles comparison and contrast<br />
|
33 |
-
aayirathil oruvan 2010 hd movie uncut version with english subs fan theories and speculations<br />
|
34 |
-
aayirathil oruvan 2010 hd full movie uncut version with subtitles memes and jokes<br />
|
35 |
-
aayirathil oruvan 2010 hd movie uncut version english subtitles quotes and dialogues<br />
|
36 |
-
aayirathil oruvan 2010 hd full movie uncut version with subtitles references and easter eggs<br />
|
37 |
-
aayirathil oruvan 2010 hd movie uncut version with english subs controversies and criticisms<br />
|
38 |
-
aayirathil oruvan 2010 hd full movie uncut version with subtitles sequel and prequel<br />
|
39 |
-
aayirathil oruvan 2010 hd movie uncut version english subtitles remake and reboot<br />
|
40 |
-
aayirathil oruvan 2010 hd full movie uncut version with subtitles adaptation and inspiration<br />
|
41 |
-
aayirathil oruvan 2010 hd movie uncut version with english subs genre and theme<br />
|
42 |
-
aayirathil oruvan 2010 hd full movie uncut version with subtitles symbolism and imagery<br />
|
43 |
-
aayirathil oruvan 2010 hd movie uncut version english subtitles style and tone<br />
|
44 |
-
aayirathil oruvan 2010 hd full movie uncut version with subtitles message and moral<br />
|
45 |
-
aayirathil oruvan 2010 hd movie uncut version with english subs history and background<br />
|
46 |
-
aayirathil oruvan 2010 hd full movie uncut version with subtitles influence and impact<br />
|
47 |
-
aayirathil oruvan 2010 hd movie uncut version english subtitles best and worst scenes<br />
|
48 |
-
aayirathil oruvan 2010 hd full movie uncut version with subtitles favorite and least favorite characters<br />
|
49 |
-
aayirathil oruvan 2010 hd movie uncut version with english subs recommendations and suggestions<br />
|
50 |
-
aayirathil oruvan 2010 hd full movie uncut version with subtitles opinions and feedbacks<br />
|
51 |
-
aayirathil oruvan 2010 hd movie uncut version english subtitles questions and answers<br />
|
52 |
-
aayirathil oruvan 2010 hd full movie uncut version with subtitles challenges and quizzes<br />
|
53 |
-
aayirathil oruvan 2010 hd movie uncut version with english subs facts and myths<br />
|
54 |
-
aayirathil oruvan 2010 hd full movie uncut version with subtitles secrets and surprises<br />
|
55 |
-
aayirathil oruvan 2010 hd movie uncut version english subtitles mistakes and errors<br />
|
56 |
-
aayirathil oruvan 2010 hd full movie uncut version with subtitles tips and tricks<br />
|
57 |
-
aayirathil oruvan 2010 hd movie uncut version with english subs fun facts and trivia</p>
|
58 |
-
<p>the kingdom through a sea route. Anitha decides to follow the map and hires a team of mercenaries led by a man named Ravichandran (Parthiban) to escort her. She also recruits a local guide named Muthu (Karthi), who is a coolie and a smuggler. Muthu agrees to join the expedition for money and also to impress his love interest Lavanya (Andrea Jeremiah), who is a part of Anitha's team.</p>
|
59 |
-
<h3>The challenges and dangers faced by the team</h3>
|
60 |
-
<p>The team sets sail on a ship and follows the map. Along the way, they encounter various obstacles and threats, such as storms, pirates, sea monsters, and hostile tribes. The team also faces internal conflicts and mistrust, as some of the members have ulterior motives and hidden agendas. The team also learns that they are not the only ones looking for the kingdom, as there is another rival team led by a mysterious man named Chidambaram (Pratap Pothan), who claims to be a descendant of the Chola king.</p>
|
61 |
-
<h3>The secrets and mysteries of the ancient civilization</h3>
|
62 |
-
<p>After facing many hardships and losses, the team finally reaches the island where the kingdom is supposed to be located. However, they are shocked to find that the kingdom is not a normal human civilization, but a bizarre and twisted world where the Chola people have devolved into primitive and savage beings. The Chola people live in fear and worship a tyrannical king who rules over them with an iron fist. The king also has a secret weapon that can destroy anyone who opposes him.</p>
|
63 |
-
<p>The team realizes that they have entered a dangerous and deadly place, where they have to fight for their survival and sanity. They also discover many secrets and mysteries about the kingdom, such as its history, culture, religion, and technology. They also learn that the kingdom is connected to an ancient prophecy that involves Anitha and Muthu.</p>
|
64 |
-
<h3>The climax and the twist ending</h3>
|
65 |
-
<p>The film reaches its climax when the team confronts the king and his weapon in his palace. The king reveals his identity and his connection to Chidambaram. He also reveals his plan to use his weapon to destroy the Pandya kingdom and reclaim his glory. However, his plan is thwarted by Anitha and Muthu, who manage to stop him and his weapon with their courage and intelligence. The film ends with a twist that changes everything about the film's story and characters.</p>
|
66 |
-
<h2>Analysis and Review</h2>
|
67 |
-
<h3>The themes and messages of the film</h3>
|
68 |
-
<p>and how it is influenced by one's culture and heritage. The film also questions the notions of civilization and how it is defined by one's values and morals. The film also questions the notions of power and how it is used and abused by those who have it. The film also questions the notions of loyalty and love and how they are tested and proven by one's actions and choices. The film also questions the notions of destiny and how it is shaped by one's will and fate.</p>
|
69 |
-
<p>The film conveys these themes and messages through its story, characters, and visuals. The film shows how history is not always what it seems, and how it can be manipulated and distorted by those who want to control it. The film shows how identity is not always fixed, and how it can change and evolve over time and circumstances. The film shows how civilization is not always superior, and how it can degrade and decay over generations and environments. The film shows how power is not always righteous, and how it can corrupt and destroy those who possess it. The film shows how loyalty and love are not always easy, and how they can be challenged and betrayed by those who claim them. The film shows how destiny is not always predetermined, and how it can be altered and fulfilled by those who pursue it.</p>
|
70 |
-
<h3>The performances and characters of the film</h3>
|
71 |
-
<p>Aayirathil Oruvan features some of the finest performances and characters in Tamil cinema. The film has a diverse and dynamic cast of actors who bring their characters to life with their skills and expressions. The film has three main protagonists who have their own arcs and motivations. Anitha is a strong and determined woman who is passionate about her work and her mission. She is also a compassionate and caring person who values human life and dignity. Muthu is a witty and charming man who is loyal to his friends and his love. He is also a brave and clever person who uses his wit and humor to overcome his challenges. Ravichandran is a ruthless and cunning leader who is loyal to his team and his duty. He is also a pragmatic and realistic person who does not hesitate to make tough decisions.</p>
|
72 |
-
<p>and ambitious person who wants to restore his ancestral glory and power. The king is a cruel and tyrannical ruler who oppresses his people and enemies with his weapon. He is also a delusional and paranoid person who believes that he is a god and the chosen one.</p>
|
73 |
-
<p>The film also has some supporting characters who have their own roles and personalities. Lavanya is a sweet and innocent girl who loves Muthu and supports him in his journey. She is also a brave and loyal person who sacrifices herself for him. The Chola people are a group of primitive and savage beings who live in fear and misery under the king's rule. They are also a group of loyal and proud beings who follow their traditions and customs. The Pandya people are a group of civilized and cultured beings who live in peace and harmony in their kingdom. They are also a group of brave and noble beings who fight for their freedom and justice.</p>
|
74 |
-
<h3>The visuals and music of the film</h3>
|
75 |
-
<p>Aayirathil Oruvan is a film that showcases some of the most stunning visuals and music in Tamil cinema. The film has a rich and diverse visual style that creates a realistic and immersive world. The film has a variety of locations, such as forests, deserts, islands, caves, temples, palaces, and cities. The film also has a variety of sets, costumes, props, and special effects that create a authentic and impressive world. The film also has a variety of shots, angles, lighting, and colors that create a dynamic and expressive world.</p>
|
76 |
-
<p>The film also has a grand and haunting musical score that enhances the mood and emotion of the film. The film has a variety of songs, such as folk songs, rock songs, classical songs, and theme songs. The film also has a variety of instruments, such as drums, guitars, flutes, violins, and trumpets. The film also has a variety of vocals, such as male vocals, female vocals, chorus vocals, and tribal vocals. The film also has a variety of lyrics, such as Tamil lyrics, English lyrics, Sanskrit lyrics, and gibberish lyrics.</p>
|
77 |
-
<h3>The strengths and weaknesses of the film</h3>
|
78 |
-
<p>vision, and quality. The film's weaknesses are its complexity, confusion, and controversy. The film's strengths are that it is a film that dares to be different and innovative in a industry that is often dominated by formulaic and commercial films. The film's strengths are that it is a film that has a clear and strong vision of what it wants to say and show, and does not compromise on its artistic integrity and values. The film's strengths are that it is a film that has a high level of quality in terms of its technical aspects, such as its production design, cinematography, editing, sound design, music, and special effects.</p>
|
79 |
-
<p>The film's weaknesses are that it is a film that is too complex and confusing for the average viewer to understand and appreciate. The film's weaknesses are that it is a film that has a lot of plot holes, inconsistencies, and contradictions that make it hard to follow and believe. The film's weaknesses are that it is a film that has a lot of controversial and violent scenes that make it hard to watch and enjoy. The film's weaknesses are that it is a film that has a lot of negative reviews and feedback from the audience and critics who did not like or appreciate the film.</p>
|
80 |
-
<h2>Conclusion</h2>
|
81 |
-
<h3>Why you should watch Aayirathil Oruvan 2010 HD Full Movie Uncut Version with English Subtitles</h3>
|
82 |
-
<p>In conclusion, Aayirathil Oruvan 2010 HD Full Movie Uncut Version with English Subtitles is a film that you should watch if you are looking for a different and unique cinematic experience. It is a film that will challenge your mind and senses with its story, characters, visuals, and music. It is a film that will make you think and feel with its themes and messages. It is a film that will surprise and shock you with its twists and turns. It is a film that will inspire and impress you with its vision and quality. It is a film that will make you appreciate the art and craft of filmmaking.</p>
|
83 |
-
<p>Aayirathil Oruvan 2010 HD Full Movie Uncut Version with English Subtitles is not a perfect film, but it is a masterpiece of Tamil cinema. It is a film that deserves your attention and respect. It is a film that you should not miss.</p>
|
84 |
-
<h2>FAQs</h2>
|
85 |
-
<p>Here are some frequently asked questions about Aayirathil Oruvan 2010 HD Full Movie Uncut Version with English Subtitles:</p>
|
86 |
-
<ol>
|
87 |
-
<li>Where can I watch Aayirathil Oruvan 2010 HD Full Movie Uncut Version with English Subtitles?</li>
|
88 |
-
<p>You can watch Aayirathil Oruvan 2010 HD Full Movie Uncut Version with English Subtitles on the OTT platform Zee5, where it was released in January 2021.</p>
|
89 |
-
<li>What is the difference between the theatrical release and the uncut version of Aayirathil Oruvan?</li>
|
90 |
-
<p>and adds more clarity and depth to the film. The uncut version also has improved sound design, color grading, and subtitles.</p>
|
91 |
-
<li>What is the meaning of the title Aayirathil Oruvan?</li>
|
92 |
-
<p>The title Aayirathil Oruvan means One Man in a Thousand in Tamil. It refers to the protagonist Muthu, who is a common man who becomes a hero in his journey. It also refers to the antagonist Chidambaram, who is a rare man who claims to be a king in his lineage. It also refers to the director Selvaraghavan, who is a unique man who made a film like no other.</p>
|
93 |
-
<li>What is the genre of Aayirathil Oruvan?</li>
|
94 |
-
<p>Aayirathil Oruvan is a film that does not fit into one genre, but rather combines elements of various genres, such as fantasy, adventure, horror, mystery, and romance. The film can be considered as a historical fantasy adventure film with a touch of horror and mystery.</p>
|
95 |
-
<li>What is the message of Aayirathil Oruvan?</li>
|
96 |
-
<p>Aayirathil Oruvan is a film that has many messages and interpretations, depending on the viewer's perspective and understanding. Some of the possible messages are: - History is not always what it seems, and it can be rewritten by those who have the power and the will to do so. - Identity is not always fixed, and it can change and evolve over time and circumstances. - Civilization is not always superior, and it can degrade and decay over generations and environments. - Power is not always righteous, and it can corrupt and destroy those who possess it. - Loyalty and love are not always easy, and they can be challenged and betrayed by those who claim them. - Destiny is not always predetermined, and it can be altered and fulfilled by those who pursue it.</p>
|
97 |
-
</p> 0a6ba089eb<br />
|
98 |
-
<br />
|
99 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Email Extractor 14 Serial Key.md
DELETED
@@ -1,142 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Email Extractor 14 Serial Key: How to Get It and Why You Need It</h1>
|
3 |
-
<p>If you are looking for a powerful and easy-to-use tool to extract email addresses from various sources, such as websites, search engines, files, folders, and email accounts, then you might have heard of Email Extractor 14. This software is designed to help you build your own email list for marketing, sales, or communication purposes. But how can you get the most out of this software? And how can you get a valid serial key to activate it? In this article, we will answer these questions and more.</p>
|
4 |
-
<h2>email extractor 14 serial key</h2><br /><p><b><b>Download</b> ——— <a href="https://byltly.com/2uKznp">https://byltly.com/2uKznp</a></b></p><br /><br />
|
5 |
-
<h2>What is Email Extractor 14?</h2>
|
6 |
-
<p>Email Extractor 14 is a software program that allows you to extract email addresses from various sources in a fast and efficient way. You can use it to find email addresses of your potential customers, clients, partners, or competitors. You can also use it to verify and clean your existing email list, removing duplicates and invalid emails. With Email Extractor 14, you can create your own targeted and customized email list that suits your needs and goals.</p>
|
7 |
-
<h3>Features and Benefits of Email Extractor 14</h3>
|
8 |
-
<p>Some of the features and benefits of Email Extractor 14 are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>It can extract email addresses from websites, search engines, files, folders, and email accounts.</li>
|
11 |
-
<li>It can extract email addresses based on keywords, domains, or specific criteria.</li>
|
12 |
-
<li>It can filter out unwanted email addresses, such as spam, junk, or disposable emails.</li>
|
13 |
-
<li>It can save the extracted email addresses in various formats, such as TXT, CSV, XLS, or XML.</li>
|
14 |
-
<li>It can export the extracted email addresses to other software programs, such as Outlook, Gmail, or MailChimp.</li>
|
15 |
-
<li>It can update itself automatically to ensure optimal performance and compatibility.</li>
|
16 |
-
<li>It has a user-friendly interface that is easy to navigate and operate.</li>
|
17 |
-
<li>It has a high speed and accuracy that can process thousands of email addresses per minute.</li>
|
18 |
-
<li>It has a low system requirement that can run on any Windows computer.</li>
|
19 |
-
<li>It has a lifetime license that allows you to use it forever without any recurring fees.</li>
|
20 |
-
</ul>
|
21 |
-
<h3>How to Use Email Extractor 14</h3>
|
22 |
-
<p>To use Email Extractor 14, you need to follow these simple steps:</p>
|
23 |
-
<ol>
|
24 |
-
<li>Download and install the software from the official website.</li>
|
25 |
-
<li>Launch the software and enter your serial key to activate it.</li>
|
26 |
-
<li>Select the source from which you want to extract email addresses. You can choose from websites, search engines, files, folders, or email accounts.</li>
|
27 |
-
<li>Enter the parameters or criteria for your extraction. You can enter keywords, domains, filters, or other options.</li>
|
28 |
-
<li>Click on the "Start" button and wait for the extraction process to finish.</li>
|
29 |
-
<li>View the results and save them in your preferred format or export them to other software programs.</li>
|
30 |
-
</ol>
|
31 |
-
<h2>What is a Serial Key and Why Do You Need One?</h2>
|
32 |
-
<p>A serial key is a unique code that is used to activate a software program. It is also known as a license key, activation key, product key, or registration key. A serial key is usually composed of alphanumeric characters that are divided into groups by dashes or hyphens. For example: XXXX-XXXX-XXXX-XXXX.</p>
|
33 |
-
<p>You need a serial key to activate Email Extractor 14 because it is a paid software program that requires a valid license to use. Without a serial key, you will not be able to access all the features and functions of the software. You will also not be able to receive updates and support from the developer. Therefore, having a serial key is essential if you want to enjoy the full benefits of Email Extractor 14.</p>
|
34 |
-
<h3>How a Serial Key Works</h3>
|
35 |
-
<p>A serial key works by verifying the authenticity and legitimacy of the software program. When you enter your serial key into the software program, it will check if the key matches with its database. If the key is valid and genuine, it will grant you access to the software program. If the key is invalid or fake, it will deny you access to the software program. A serial key also helps prevent piracy and illegal distribution of the software program by ensuring that only authorized users can use it.</p>
|
36 |
-
<h3>Advantages of Having a Serial Key</h3>
|
37 |
-
<p>Some of the advantages of having a serial key are:</p>
|
38 |
-
<ul>
|
39 |
-
<li>You can use all the features and functions of Email Extractor 14 without any limitations or restrictions.</li>
|
40 |
-
<li>You can receive updates and support from the developer whenever there are new versions or issues with the software program.</li>
|
41 |
-
<li>You can protect your investment and avoid wasting money on buying fake or cracked versions of the software program.</li>
|
42 |
-
<li>You can avoid legal troubles and penalties that may arise from using pirated or unlicensed versions of the software program.</li>
|
43 |
-
</ul>
|
44 |
-
<h3>Risks of Using a Cracked or Fake Serial Key</h3>
|
45 |
-
<p>Some of the risks of using a cracked or fake serial key are:</p>
|
46 |
-
<ul>
|
47 |
-
<li>You may not be able to use all the features and functions of Email Extractor 14 properly or at all.</li>
|
48 |
-
<li>You may expose your computer to viruses, malware, spyware, or other harmful programs that may damage your system or steal your data.</li>
|
49 |
-
<li>You may compromise your security and privacy by allowing hackers or cybercriminals to access your information or accounts.</li>
|
50 |
-
<li>You may violate the terms and conditions of the software program and face legal actions or lawsuits from the developer or other parties.</li>
|
51 |
-
</ul>
|
52 |
-
<h2>How to Get a Genuine Email Extractor 14 Serial Key</h2>
|
53 |
-
<p>If you want to get a genuine Email Extractor 14 serial key, you have three options:</p>
|
54 |
-
<p>email extractor 14 activation code<br />
|
55 |
-
email extractor 14 crack download<br />
|
56 |
-
email extractor 14 license key free<br />
|
57 |
-
email extractor 14 full version<br />
|
58 |
-
email extractor 14 registration key<br />
|
59 |
-
email extractor 14 keygen generator<br />
|
60 |
-
email extractor 14 patch file<br />
|
61 |
-
email extractor 14 torrent link<br />
|
62 |
-
email extractor 14 product key online<br />
|
63 |
-
email extractor 14 serial number finder<br />
|
64 |
-
email extractor 14 cracked software<br />
|
65 |
-
email extractor 14 activation key generator<br />
|
66 |
-
email extractor 14 license code online<br />
|
67 |
-
email extractor 14 serial key free download<br />
|
68 |
-
email extractor 14 unlock code<br />
|
69 |
-
email extractor 14 crack file download<br />
|
70 |
-
email extractor 14 registration code free<br />
|
71 |
-
email extractor 14 keygen download<br />
|
72 |
-
email extractor 14 patch download<br />
|
73 |
-
email extractor 14 torrent download<br />
|
74 |
-
email extractor 14 product key generator<br />
|
75 |
-
email extractor 14 serial number generator<br />
|
76 |
-
email extractor 14 cracked version download<br />
|
77 |
-
email extractor 14 activation key free<br />
|
78 |
-
email extractor 14 license key generator<br />
|
79 |
-
email extractor 14 serial key online<br />
|
80 |
-
email extractor 14 unlock key<br />
|
81 |
-
email extractor 14 crack software download<br />
|
82 |
-
email extractor 14 registration key generator<br />
|
83 |
-
email extractor 14 keygen online<br />
|
84 |
-
email extractor 14 patch online<br />
|
85 |
-
email extractor 14 torrent file download<br />
|
86 |
-
email extractor 14 product key online free<br />
|
87 |
-
email extractor 14 serial number online<br />
|
88 |
-
email extractor 14 cracked version online<br />
|
89 |
-
email extractor 14 activation code generator<br />
|
90 |
-
email extractor 14 license code generator<br />
|
91 |
-
email extractor 14 serial key generator online<br />
|
92 |
-
email extractor 14 unlock code generator<br />
|
93 |
-
email extractor 14 crack software online<br />
|
94 |
-
email extractor 14 registration code generator online<br />
|
95 |
-
email extractor 14 keygen free download<br />
|
96 |
-
email extractor 14 patch free download<br />
|
97 |
-
email extractor 14 torrent file online<br />
|
98 |
-
email extractor 14 product key free download<br />
|
99 |
-
email extractor 14 serial number free download<br />
|
100 |
-
email extractor 14 cracked version free download<br />
|
101 |
-
email extractor 14 activation code free download<br />
|
102 |
-
email extractor 14 license code free download<br />
|
103 |
-
email extractor 14 serial key free online</p>
|
104 |
-
<h3>Buy from the Official Website</h3>
|
105 |
-
<p>The best and safest way to get a genuine Email Extractor 14 serial key is to buy it from the official website. The official website is https://www.emailxtractor.com/. Here you can find all the information about Email Extractor 14, such as its features, benefits, price, and testimonials. You can also download a free trial version of the software program to test it before buying it. To buy a genuine Email Extractor 14 serial key from the official website, you need to follow these steps:</p>
|
106 |
-
<ol>
|
107 |
-
<li>Select the license type that suits your needs. You can choose between Single User License ($69), Multi User License ($99), or Corporate License ($199).</li>
|
108 |
-
<li>Click on the "Buy Now" button and proceed to checkout. You will be redirected to a secure payment page where you can enter your billing details and payment method.</li>
|
109 |
-
<li>Complete your payment and wait for your confirmation email. You will receive an email with your receipt and your serial key within minutes after your payment is processed.</li>
|
110 |
-
<li>Enter your serial key into Email Extractor 14 and enjoy using it!</li>
|
111 |
-
</ol>
|
112 |
-
<h3>Contact the Customer Support</h3>
|
113 |
-
<p>If you have any questions, issues, or problems with your purchase, activation, or usage of Email Extractor 14, you can contact the customer support team for assistance. The customer support team is available 24/7 via email, phone, or live chat. You can find their contact details on their website https://www.emailxtractor.com/contact-us/. They will respond to your queries as soon as possible and help you resolve any issues you may have. They will also provide you with tips, tricks, and best practices on how to use Email Extractor 14 effectively and efficiently.</p>
|
114 |
-
<h3>Use a Coupon Code or a Discount Offer</h3>
|
115 |
-
<p>If you want to save some money on buying a genuine Email Extractor 14 serial key, you can use a coupon code or a discount offer that may be available from time to time. website https://www.emailxtractor.com/, their social media pages, their newsletters, or their affiliates. You can also search for them online using keywords such as "Email Extractor 14 coupon code" or "Email Extractor 14 discount offer". To use a coupon code or a discount offer, you need to follow these steps:</p>
|
116 |
-
<ol>
|
117 |
-
<li>Find a valid and working coupon code or discount offer that applies to Email Extractor 14.</li>
|
118 |
-
<li>Copy the coupon code or click on the discount offer link to activate it.</li>
|
119 |
-
<li>Go to the official website https://www.emailxtractor.com/ and select the license type that suits your needs.</li>
|
120 |
-
<li>Paste the coupon code in the designated box or apply the discount offer automatically at checkout.</li>
|
121 |
-
<li>Complete your payment and wait for your confirmation email. You will receive an email with your receipt and your serial key within minutes after your payment is processed.</li>
|
122 |
-
<li>Enter your serial key into Email Extractor 14 and enjoy using it!</li>
|
123 |
-
</ol>
|
124 |
-
<h2>Conclusion</h2>
|
125 |
-
<p>Email Extractor 14 is a powerful and easy-to-use tool that can help you extract email addresses from various sources in a fast and efficient way. You can use it to build your own email list for marketing, sales, or communication purposes. However, to use Email Extractor 14, you need a valid serial key to activate it. A serial key is a unique code that verifies the authenticity and legitimacy of the software program. Having a serial key allows you to access all the features and functions of Email Extractor 14 without any limitations or restrictions. It also allows you to receive updates and support from the developer whenever there are new versions or issues with the software program. Moreover, having a serial key protects your investment and avoids legal troubles that may arise from using pirated or unlicensed versions of the software program.</p>
|
126 |
-
<p>If you want to get a genuine Email Extractor 14 serial key, you have three options: buy from the official website, contact the customer support, or use a coupon code or a discount offer. Buying from the official website is the best and safest way to get a genuine Email Extractor 14 serial key. You can find all the information about Email Extractor 14 on their website https://www.emailxtractor.com/. You can also download a free trial version of the software program to test it before buying it. Contacting the customer support is another option if you have any questions, issues, or problems with your purchase, activation, or usage of Email Extractor 14. The customer support team is available 24/7 via email, phone, or live chat. They will respond to your queries as soon as possible and help you resolve any issues you may have. They will also provide you with tips, tricks, and best practices on how to use Email Extractor 14 effectively and efficiently. Using a coupon code or a discount offer is another option if you want to save some money on buying a genuine Email Extractor 14 serial key. You can find these coupon codes or discount offers on their website https://www.emailxtractor.com/, their social media pages, their newsletters, or their affiliates. You can also search for them online using keywords such as "Email Extractor 14 coupon code" or "Email Extractor 14 discount offer".</p>
|
127 |
-
<p>We hope this article has helped you understand what Email Extractor 14 is, what a serial key is, why you need one, and how to get one. If you are interested in using Email Extractor 14 for your email extraction needs, we recommend you to get a genuine Email Extractor 14 serial key from one of the options mentioned above. This way, you can enjoy the full benefits of Email Extractor 14 without any hassle or risk. Thank you for reading!</p>
|
128 |
-
<h2>FAQs</h2>
|
129 |
-
<p>Here are some frequently asked questions about Email Extractor 14 and its serial key:</p>
|
130 |
-
<h3>Q: Is Email Extractor 14 safe to use?</h3>
|
131 |
-
<p>A: Yes, Email Extractor 14 is safe to use as long as you download it from the official website https://www.emailxtractor.com/ and use a genuine serial key to activate it. The software program does not contain any viruses, malware, spyware, or other harmful programs that may damage your system or steal your data. The software program also respects your privacy and does not collect or share any of your personal information without your consent.</p>
|
132 |
-
<h3>Q: How long does it take to extract email addresses using Email Extractor 14?</h3>
|
133 |
-
<p>A: The time it takes to extract email addresses using Email Extractor 14 depends on several factors, such as the source, the parameters, the filters, and the speed of your internet connection. However, in general, Email Extractor 14 can process thousands of email addresses per minute. Therefore, it can extract email addresses from various sources in a fast and efficient way.</p>
|
134 |
-
<h3>Q: How many email addresses can I extract using Email Extractor 14?</h3>
|
135 |
-
<p>A: There is no limit on how many email addresses you can extract using Email Extractor 14. You can extract as many email addresses as you want from various sources using Email Extractor 14. However, you should be aware of the ethical and legal implications of extracting and using email addresses for your purposes. You should always respect the privacy and consent of the email owners and follow the rules and regulations of email marketing and communication.</p>
|
136 |
-
<h3>Q: Can I use Email Extractor 14 on multiple computers?</h3>
|
137 |
-
<p>A: Yes, you can use Email Extractor 14 on multiple computers as long as you have a valid serial key for each computer. You can buy multiple licenses for Email Extractor 14 from the official website https://www.emailxtractor.com/. You can choose between Single User License ($69), Multi User License ($99), or Corporate License ($199). Each license type allows you to use Email Extractor 14 on a different number of computers.</p>
|
138 |
-
<h3>Q: What if I lose my serial key?</h3>
|
139 |
-
<p>A: If you lose your serial key for Email Extractor 14, you can contact the customer support team for assistance. They will help you recover your serial key as soon as possible. You can find their contact details on their website https://www.emailxtractor.com/contact-us/. You can also check your confirmation email that contains your receipt and your serial key.</p>
|
140 |
-
</p> 0a6ba089eb<br />
|
141 |
-
<br />
|
142 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Alsat m live seriali me fal Nj serial q trajton tema si dashuria tradhtia hakmarrja dhe falja.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>alsat m live seriali me fal</h2><br /><p><b><b>Download File</b> ☆☆☆☆☆ <a href="https://imgfil.com/2uy1DW">https://imgfil.com/2uy1DW</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
aaccfb2cb3<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Ap Calculus Textbook Finney Pdf Download Master Calculus with Thomas and Finneys Classic Textbook.md
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<p>APEX Calculus is a calculus textbook written for traditional college/university calculus courses. It has the look and feel of the calculus book you likely use right now (Stewart, Thomas & Finney, etc.). The explanations of new concepts is clear, written for someone who does not yet know calculus. Each section ends with an exercise set with ample problems to practice & test skills (odd answers are in the back).</p>
|
3 |
-
<h2>Ap Calculus Textbook Finney Pdf Download</h2><br /><p><b><b>DOWNLOAD</b> → <a href="https://imgfil.com/2uxYrI">https://imgfil.com/2uxYrI</a></b></p><br /><br /> aaccfb2cb3<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Candydoll.tv Laura-B Set Updated !FREE!.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>candydoll.tv Laura-B set updated</h2><br /><p><b><b>Download File</b> » <a href="https://imgfil.com/2uxZKK">https://imgfil.com/2uxZKK</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
d5da3c52bf<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Dpwh Blue Book Volume 2 Pdf Free Download HOT!.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>dpwh blue book volume 2 pdf free download</h2><br /><p><b><b>DOWNLOAD</b> ★ <a href="https://imgfil.com/2uy1Ws">https://imgfil.com/2uy1Ws</a></b></p><br /><br />
|
2 |
-
|
3 |
-
3cee63e6c2<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/ppdiffusers/__init__.py
DELETED
@@ -1,162 +0,0 @@
|
|
1 |
-
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
2 |
-
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
# flake8: noqa
|
16 |
-
|
17 |
-
from .configuration_utils import ConfigMixin
|
18 |
-
from .fastdeploy_utils import FastDeployRuntimeModel
|
19 |
-
from .ppnlp_patch_utils import *
|
20 |
-
from .utils import (
|
21 |
-
OptionalDependencyNotAvailable,
|
22 |
-
is_fastdeploy_available,
|
23 |
-
is_inflect_available,
|
24 |
-
is_k_diffusion_available,
|
25 |
-
is_librosa_available,
|
26 |
-
is_onnx_available,
|
27 |
-
is_paddle_available,
|
28 |
-
is_paddlenlp_available,
|
29 |
-
is_scipy_available,
|
30 |
-
is_unidecode_available,
|
31 |
-
logging,
|
32 |
-
)
|
33 |
-
from .version import VERSION as __version__
|
34 |
-
|
35 |
-
try:
|
36 |
-
if not is_paddle_available():
|
37 |
-
raise OptionalDependencyNotAvailable()
|
38 |
-
except OptionalDependencyNotAvailable:
|
39 |
-
from .utils.dummy_paddle_objects import * # noqa F403
|
40 |
-
else:
|
41 |
-
from .initializer import *
|
42 |
-
from .modeling_utils import ModelMixin
|
43 |
-
from .models import (
|
44 |
-
AutoencoderKL,
|
45 |
-
PriorTransformer,
|
46 |
-
Transformer2DModel,
|
47 |
-
UNet1DModel,
|
48 |
-
UNet2DConditionModel,
|
49 |
-
UNet2DModel,
|
50 |
-
VQModel,
|
51 |
-
)
|
52 |
-
from .optimization import (
|
53 |
-
get_constant_schedule,
|
54 |
-
get_constant_schedule_with_warmup,
|
55 |
-
get_cosine_schedule_with_warmup,
|
56 |
-
get_cosine_with_hard_restarts_schedule_with_warmup,
|
57 |
-
get_linear_schedule_with_warmup,
|
58 |
-
get_polynomial_decay_schedule_with_warmup,
|
59 |
-
get_scheduler,
|
60 |
-
)
|
61 |
-
from .pipeline_utils import DiffusionPipeline
|
62 |
-
from .pipelines import (
|
63 |
-
DanceDiffusionPipeline,
|
64 |
-
DDIMPipeline,
|
65 |
-
DDPMPipeline,
|
66 |
-
KarrasVePipeline,
|
67 |
-
LDMPipeline,
|
68 |
-
LDMSuperResolutionPipeline,
|
69 |
-
PNDMPipeline,
|
70 |
-
RePaintPipeline,
|
71 |
-
ScoreSdeVePipeline,
|
72 |
-
)
|
73 |
-
from .schedulers import (
|
74 |
-
DDIMScheduler,
|
75 |
-
DDPMScheduler,
|
76 |
-
DPMSolverMultistepScheduler,
|
77 |
-
DPMSolverSinglestepScheduler,
|
78 |
-
EulerAncestralDiscreteScheduler,
|
79 |
-
EulerDiscreteScheduler,
|
80 |
-
HeunDiscreteScheduler,
|
81 |
-
IPNDMScheduler,
|
82 |
-
KarrasVeScheduler,
|
83 |
-
KDPM2AncestralDiscreteScheduler,
|
84 |
-
KDPM2DiscreteScheduler,
|
85 |
-
PNDMScheduler,
|
86 |
-
RePaintScheduler,
|
87 |
-
SchedulerMixin,
|
88 |
-
ScoreSdeVeScheduler,
|
89 |
-
UnCLIPScheduler,
|
90 |
-
VQDiffusionScheduler,
|
91 |
-
)
|
92 |
-
from .schedulers.preconfig import PreconfigEulerAncestralDiscreteScheduler
|
93 |
-
from .training_utils import EMAModel
|
94 |
-
|
95 |
-
try:
|
96 |
-
if not (is_paddle_available() and is_scipy_available()):
|
97 |
-
raise OptionalDependencyNotAvailable()
|
98 |
-
except OptionalDependencyNotAvailable:
|
99 |
-
from .utils.dummy_paddle_and_scipy_objects import * # noqa F403
|
100 |
-
else:
|
101 |
-
from .schedulers import LMSDiscreteScheduler
|
102 |
-
from .schedulers.preconfig import PreconfigLMSDiscreteScheduler
|
103 |
-
|
104 |
-
try:
|
105 |
-
if not (is_paddle_available() and is_paddlenlp_available()):
|
106 |
-
raise OptionalDependencyNotAvailable()
|
107 |
-
except OptionalDependencyNotAvailable:
|
108 |
-
from .utils.dummy_paddle_and_paddlenlp_objects import * # noqa F403
|
109 |
-
else:
|
110 |
-
from .pipelines import (
|
111 |
-
AltDiffusionImg2ImgPipeline,
|
112 |
-
AltDiffusionPipeline,
|
113 |
-
CycleDiffusionPipeline,
|
114 |
-
LDMBertModel,
|
115 |
-
LDMTextToImagePipeline,
|
116 |
-
PaintByExamplePipeline,
|
117 |
-
StableDiffusionDepth2ImgPipeline,
|
118 |
-
StableDiffusionImageVariationPipeline,
|
119 |
-
StableDiffusionImg2ImgPipeline,
|
120 |
-
StableDiffusionInpaintPipeline,
|
121 |
-
StableDiffusionInpaintPipelineLegacy,
|
122 |
-
StableDiffusionMegaPipeline,
|
123 |
-
StableDiffusionPipeline,
|
124 |
-
StableDiffusionPipelineAllinOne,
|
125 |
-
StableDiffusionPipelineSafe,
|
126 |
-
StableDiffusionUpscalePipeline,
|
127 |
-
UnCLIPPipeline,
|
128 |
-
VersatileDiffusionDualGuidedPipeline,
|
129 |
-
VersatileDiffusionImageVariationPipeline,
|
130 |
-
VersatileDiffusionPipeline,
|
131 |
-
VersatileDiffusionTextToImagePipeline,
|
132 |
-
VQDiffusionPipeline,
|
133 |
-
)
|
134 |
-
|
135 |
-
try:
|
136 |
-
if not (is_paddle_available() and is_paddlenlp_available() and is_k_diffusion_available()):
|
137 |
-
raise OptionalDependencyNotAvailable()
|
138 |
-
except OptionalDependencyNotAvailable:
|
139 |
-
from .utils.dummy_paddle_and_paddlenlp_and_k_diffusion_objects import * # noqa F403
|
140 |
-
else:
|
141 |
-
from .pipelines import StableDiffusionKDiffusionPipeline
|
142 |
-
|
143 |
-
try:
|
144 |
-
if not (is_paddle_available() and is_paddlenlp_available() and is_fastdeploy_available()):
|
145 |
-
raise OptionalDependencyNotAvailable()
|
146 |
-
except OptionalDependencyNotAvailable:
|
147 |
-
from .utils.dummy_paddle_and_paddlenlp_and_fastdeploy_objects import * # noqa F403
|
148 |
-
else:
|
149 |
-
from .pipelines import (
|
150 |
-
FastDeployStableDiffusionImg2ImgPipeline,
|
151 |
-
FastDeployStableDiffusionInpaintPipeline,
|
152 |
-
FastDeployStableDiffusionInpaintPipelineLegacy,
|
153 |
-
FastDeployStableDiffusionMegaPipeline,
|
154 |
-
FastDeployStableDiffusionPipeline,
|
155 |
-
)
|
156 |
-
try:
|
157 |
-
if not (is_paddle_available() and is_librosa_available()):
|
158 |
-
raise OptionalDependencyNotAvailable()
|
159 |
-
except OptionalDependencyNotAvailable:
|
160 |
-
from .utils.dummy_paddle_and_librosa_objects import * # noqa F403
|
161 |
-
else:
|
162 |
-
from .pipelines import AudioDiffusionPipeline, Mel
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2023Liu2023/bingo/src/lib/isomorphic/index.ts
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
'use client'
|
2 |
-
|
3 |
-
import Default from './browser'
|
4 |
-
|
5 |
-
let exportsModel: any = {}
|
6 |
-
|
7 |
-
if (process.browser) {
|
8 |
-
Object.assign(exportsModel, require('./browser').default)
|
9 |
-
} else {
|
10 |
-
Object.assign(exportsModel, require('./node').default)
|
11 |
-
}
|
12 |
-
|
13 |
-
export default exportsModel! as typeof Default
|
14 |
-
|
15 |
-
export const fetch: typeof Default.fetch = exportsModel!.fetch
|
16 |
-
export const WebSocket: typeof Default.WebSocket = exportsModel!.WebSocket
|
17 |
-
export const debug: typeof Default.debug = exportsModel!.debug
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/7hao/bingo/src/lib/bots/bing/tts.ts
DELETED
@@ -1,82 +0,0 @@
|
|
1 |
-
import { sleep } from './utils'
|
2 |
-
|
3 |
-
const synth = window.speechSynthesis
|
4 |
-
|
5 |
-
export class TTS {
|
6 |
-
currentText = ''
|
7 |
-
speakText = ''
|
8 |
-
private controller = new AbortController()
|
9 |
-
speaking = false
|
10 |
-
get isSpeaking() {
|
11 |
-
return this.speaking
|
12 |
-
}
|
13 |
-
finished = false
|
14 |
-
constructor() {}
|
15 |
-
abort = () => {
|
16 |
-
this.controller.abort()
|
17 |
-
}
|
18 |
-
|
19 |
-
reset = () => {
|
20 |
-
this.speaking = false
|
21 |
-
this.finished = true
|
22 |
-
this.currentText = ''
|
23 |
-
this.speakText = ''
|
24 |
-
this.abort()
|
25 |
-
}
|
26 |
-
|
27 |
-
speak = (text: string) => {
|
28 |
-
if (!synth || text?.trim()?.length < 2) {
|
29 |
-
return
|
30 |
-
}
|
31 |
-
this.currentText = text.replace(/[^\u4e00-\u9fa5_a-zA-Z0-9,。?,:;\.,:]+/g, '')
|
32 |
-
this.finished = false
|
33 |
-
this.loop()
|
34 |
-
}
|
35 |
-
|
36 |
-
private async doSpeek() {
|
37 |
-
return new Promise((resolve) => {
|
38 |
-
const endIndex = this.finished ? this.currentText.length :
|
39 |
-
Math.max(
|
40 |
-
this.currentText.lastIndexOf('。'),
|
41 |
-
this.currentText.lastIndexOf(';'),
|
42 |
-
this.currentText.lastIndexOf('、'),
|
43 |
-
this.currentText.lastIndexOf('?'),
|
44 |
-
this.currentText.lastIndexOf('\n')
|
45 |
-
)
|
46 |
-
const startIndex = this.speakText.length ? Math.max(0, this.currentText.lastIndexOf(this.speakText) + this.speakText.length) : 0
|
47 |
-
|
48 |
-
if (startIndex >= endIndex) {
|
49 |
-
return resolve(true)
|
50 |
-
}
|
51 |
-
const text = this.currentText.slice(startIndex, endIndex)
|
52 |
-
this.speakText = text
|
53 |
-
const utterThis = new SpeechSynthesisUtterance(text)
|
54 |
-
this.controller.signal.onabort = () => {
|
55 |
-
synth.cancel()
|
56 |
-
this.finished = true
|
57 |
-
resolve(false)
|
58 |
-
}
|
59 |
-
|
60 |
-
utterThis.onend = function (event) {
|
61 |
-
resolve(true)
|
62 |
-
}
|
63 |
-
|
64 |
-
utterThis.onerror = function (event) {
|
65 |
-
resolve(false)
|
66 |
-
}
|
67 |
-
|
68 |
-
const voice = synth.getVoices().find(v => v.name.includes('Microsoft Yunxi Online')) ?? null
|
69 |
-
utterThis.voice = voice
|
70 |
-
synth.speak(utterThis)
|
71 |
-
})
|
72 |
-
}
|
73 |
-
|
74 |
-
private async loop() {
|
75 |
-
if (this.speaking) return
|
76 |
-
this.speaking = true
|
77 |
-
while(!this.finished) {
|
78 |
-
await Promise.all([sleep(1000), this.doSpeek()])
|
79 |
-
}
|
80 |
-
this.speaking = false
|
81 |
-
}
|
82 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/demucs/parser.py
DELETED
@@ -1,244 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
import argparse
|
8 |
-
import os
|
9 |
-
from pathlib import Path
|
10 |
-
|
11 |
-
|
12 |
-
def get_parser():
|
13 |
-
parser = argparse.ArgumentParser("demucs", description="Train and evaluate Demucs.")
|
14 |
-
default_raw = None
|
15 |
-
default_musdb = None
|
16 |
-
if 'DEMUCS_RAW' in os.environ:
|
17 |
-
default_raw = Path(os.environ['DEMUCS_RAW'])
|
18 |
-
if 'DEMUCS_MUSDB' in os.environ:
|
19 |
-
default_musdb = Path(os.environ['DEMUCS_MUSDB'])
|
20 |
-
parser.add_argument(
|
21 |
-
"--raw",
|
22 |
-
type=Path,
|
23 |
-
default=default_raw,
|
24 |
-
help="Path to raw audio, can be faster, see python3 -m demucs.raw to extract.")
|
25 |
-
parser.add_argument("--no_raw", action="store_const", const=None, dest="raw")
|
26 |
-
parser.add_argument("-m",
|
27 |
-
"--musdb",
|
28 |
-
type=Path,
|
29 |
-
default=default_musdb,
|
30 |
-
help="Path to musdb root")
|
31 |
-
parser.add_argument("--is_wav", action="store_true",
|
32 |
-
help="Indicate that the MusDB dataset is in wav format (i.e. MusDB-HQ).")
|
33 |
-
parser.add_argument("--metadata", type=Path, default=Path("metadata/"),
|
34 |
-
help="Folder where metadata information is stored.")
|
35 |
-
parser.add_argument("--wav", type=Path,
|
36 |
-
help="Path to a wav dataset. This should contain a 'train' and a 'valid' "
|
37 |
-
"subfolder.")
|
38 |
-
parser.add_argument("--samplerate", type=int, default=44100)
|
39 |
-
parser.add_argument("--audio_channels", type=int, default=2)
|
40 |
-
parser.add_argument("--samples",
|
41 |
-
default=44100 * 10,
|
42 |
-
type=int,
|
43 |
-
help="number of samples to feed in")
|
44 |
-
parser.add_argument("--data_stride",
|
45 |
-
default=44100,
|
46 |
-
type=int,
|
47 |
-
help="Stride for chunks, shorter = longer epochs")
|
48 |
-
parser.add_argument("-w", "--workers", default=10, type=int, help="Loader workers")
|
49 |
-
parser.add_argument("--eval_workers", default=2, type=int, help="Final evaluation workers")
|
50 |
-
parser.add_argument("-d",
|
51 |
-
"--device",
|
52 |
-
help="Device to train on, default is cuda if available else cpu")
|
53 |
-
parser.add_argument("--eval_cpu", action="store_true", help="Eval on test will be run on cpu.")
|
54 |
-
parser.add_argument("--dummy", help="Dummy parameter, useful to create a new checkpoint file")
|
55 |
-
parser.add_argument("--test", help="Just run the test pipeline + one validation. "
|
56 |
-
"This should be a filename relative to the models/ folder.")
|
57 |
-
parser.add_argument("--test_pretrained", help="Just run the test pipeline + one validation, "
|
58 |
-
"on a pretrained model. ")
|
59 |
-
|
60 |
-
parser.add_argument("--rank", default=0, type=int)
|
61 |
-
parser.add_argument("--world_size", default=1, type=int)
|
62 |
-
parser.add_argument("--master")
|
63 |
-
|
64 |
-
parser.add_argument("--checkpoints",
|
65 |
-
type=Path,
|
66 |
-
default=Path("checkpoints"),
|
67 |
-
help="Folder where to store checkpoints etc")
|
68 |
-
parser.add_argument("--evals",
|
69 |
-
type=Path,
|
70 |
-
default=Path("evals"),
|
71 |
-
help="Folder where to store evals and waveforms")
|
72 |
-
parser.add_argument("--save",
|
73 |
-
action="store_true",
|
74 |
-
help="Save estimated for the test set waveforms")
|
75 |
-
parser.add_argument("--logs",
|
76 |
-
type=Path,
|
77 |
-
default=Path("logs"),
|
78 |
-
help="Folder where to store logs")
|
79 |
-
parser.add_argument("--models",
|
80 |
-
type=Path,
|
81 |
-
default=Path("models"),
|
82 |
-
help="Folder where to store trained models")
|
83 |
-
parser.add_argument("-R",
|
84 |
-
"--restart",
|
85 |
-
action='store_true',
|
86 |
-
help='Restart training, ignoring previous run')
|
87 |
-
|
88 |
-
parser.add_argument("--seed", type=int, default=42)
|
89 |
-
parser.add_argument("-e", "--epochs", type=int, default=180, help="Number of epochs")
|
90 |
-
parser.add_argument("-r",
|
91 |
-
"--repeat",
|
92 |
-
type=int,
|
93 |
-
default=2,
|
94 |
-
help="Repeat the train set, longer epochs")
|
95 |
-
parser.add_argument("-b", "--batch_size", type=int, default=64)
|
96 |
-
parser.add_argument("--lr", type=float, default=3e-4)
|
97 |
-
parser.add_argument("--mse", action="store_true", help="Use MSE instead of L1")
|
98 |
-
parser.add_argument("--init", help="Initialize from a pre-trained model.")
|
99 |
-
|
100 |
-
# Augmentation options
|
101 |
-
parser.add_argument("--no_augment",
|
102 |
-
action="store_false",
|
103 |
-
dest="augment",
|
104 |
-
default=True,
|
105 |
-
help="No basic data augmentation.")
|
106 |
-
parser.add_argument("--repitch", type=float, default=0.2,
|
107 |
-
help="Probability to do tempo/pitch change")
|
108 |
-
parser.add_argument("--max_tempo", type=float, default=12,
|
109 |
-
help="Maximum relative tempo change in %% when using repitch.")
|
110 |
-
|
111 |
-
parser.add_argument("--remix_group_size",
|
112 |
-
type=int,
|
113 |
-
default=4,
|
114 |
-
help="Shuffle sources using group of this size. Useful to somewhat "
|
115 |
-
"replicate multi-gpu training "
|
116 |
-
"on less GPUs.")
|
117 |
-
parser.add_argument("--shifts",
|
118 |
-
type=int,
|
119 |
-
default=10,
|
120 |
-
help="Number of random shifts used for the shift trick.")
|
121 |
-
parser.add_argument("--overlap",
|
122 |
-
type=float,
|
123 |
-
default=0.25,
|
124 |
-
help="Overlap when --split_valid is passed.")
|
125 |
-
|
126 |
-
# See model.py for doc
|
127 |
-
parser.add_argument("--growth",
|
128 |
-
type=float,
|
129 |
-
default=2.,
|
130 |
-
help="Number of channels between two layers will increase by this factor")
|
131 |
-
parser.add_argument("--depth",
|
132 |
-
type=int,
|
133 |
-
default=6,
|
134 |
-
help="Number of layers for the encoder and decoder")
|
135 |
-
parser.add_argument("--lstm_layers", type=int, default=2, help="Number of layers for the LSTM")
|
136 |
-
parser.add_argument("--channels",
|
137 |
-
type=int,
|
138 |
-
default=64,
|
139 |
-
help="Number of channels for the first encoder layer")
|
140 |
-
parser.add_argument("--kernel_size",
|
141 |
-
type=int,
|
142 |
-
default=8,
|
143 |
-
help="Kernel size for the (transposed) convolutions")
|
144 |
-
parser.add_argument("--conv_stride",
|
145 |
-
type=int,
|
146 |
-
default=4,
|
147 |
-
help="Stride for the (transposed) convolutions")
|
148 |
-
parser.add_argument("--context",
|
149 |
-
type=int,
|
150 |
-
default=3,
|
151 |
-
help="Context size for the decoder convolutions "
|
152 |
-
"before the transposed convolutions")
|
153 |
-
parser.add_argument("--rescale",
|
154 |
-
type=float,
|
155 |
-
default=0.1,
|
156 |
-
help="Initial weight rescale reference")
|
157 |
-
parser.add_argument("--no_resample", action="store_false",
|
158 |
-
default=True, dest="resample",
|
159 |
-
help="No Resampling of the input/output x2")
|
160 |
-
parser.add_argument("--no_glu",
|
161 |
-
action="store_false",
|
162 |
-
default=True,
|
163 |
-
dest="glu",
|
164 |
-
help="Replace all GLUs by ReLUs")
|
165 |
-
parser.add_argument("--no_rewrite",
|
166 |
-
action="store_false",
|
167 |
-
default=True,
|
168 |
-
dest="rewrite",
|
169 |
-
help="No 1x1 rewrite convolutions")
|
170 |
-
parser.add_argument("--normalize", action="store_true")
|
171 |
-
parser.add_argument("--no_norm_wav", action="store_false", dest='norm_wav', default=True)
|
172 |
-
|
173 |
-
# Tasnet options
|
174 |
-
parser.add_argument("--tasnet", action="store_true")
|
175 |
-
parser.add_argument("--split_valid",
|
176 |
-
action="store_true",
|
177 |
-
help="Predict chunks by chunks for valid and test. Required for tasnet")
|
178 |
-
parser.add_argument("--X", type=int, default=8)
|
179 |
-
|
180 |
-
# Other options
|
181 |
-
parser.add_argument("--show",
|
182 |
-
action="store_true",
|
183 |
-
help="Show model architecture, size and exit")
|
184 |
-
parser.add_argument("--save_model", action="store_true",
|
185 |
-
help="Skip traning, just save final model "
|
186 |
-
"for the current checkpoint value.")
|
187 |
-
parser.add_argument("--save_state",
|
188 |
-
help="Skip training, just save state "
|
189 |
-
"for the current checkpoint value. You should "
|
190 |
-
"provide a model name as argument.")
|
191 |
-
|
192 |
-
# Quantization options
|
193 |
-
parser.add_argument("--q-min-size", type=float, default=1,
|
194 |
-
help="Only quantize layers over this size (in MB)")
|
195 |
-
parser.add_argument(
|
196 |
-
"--qat", type=int, help="If provided, use QAT training with that many bits.")
|
197 |
-
|
198 |
-
parser.add_argument("--diffq", type=float, default=0)
|
199 |
-
parser.add_argument(
|
200 |
-
"--ms-target", type=float, default=162,
|
201 |
-
help="Model size target in MB, when using DiffQ. Best model will be kept "
|
202 |
-
"only if it is smaller than this target.")
|
203 |
-
|
204 |
-
return parser
|
205 |
-
|
206 |
-
|
207 |
-
def get_name(parser, args):
|
208 |
-
"""
|
209 |
-
Return the name of an experiment given the args. Some parameters are ignored,
|
210 |
-
for instance --workers, as they do not impact the final result.
|
211 |
-
"""
|
212 |
-
ignore_args = set([
|
213 |
-
"checkpoints",
|
214 |
-
"deterministic",
|
215 |
-
"eval",
|
216 |
-
"evals",
|
217 |
-
"eval_cpu",
|
218 |
-
"eval_workers",
|
219 |
-
"logs",
|
220 |
-
"master",
|
221 |
-
"rank",
|
222 |
-
"restart",
|
223 |
-
"save",
|
224 |
-
"save_model",
|
225 |
-
"save_state",
|
226 |
-
"show",
|
227 |
-
"workers",
|
228 |
-
"world_size",
|
229 |
-
])
|
230 |
-
parts = []
|
231 |
-
name_args = dict(args.__dict__)
|
232 |
-
for name, value in name_args.items():
|
233 |
-
if name in ignore_args:
|
234 |
-
continue
|
235 |
-
if value != parser.get_default(name):
|
236 |
-
if isinstance(value, Path):
|
237 |
-
parts.append(f"{name}={value.name}")
|
238 |
-
else:
|
239 |
-
parts.append(f"{name}={value}")
|
240 |
-
if parts:
|
241 |
-
name = " ".join(parts)
|
242 |
-
else:
|
243 |
-
name = "default"
|
244 |
-
return name
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/models/utils.py
DELETED
@@ -1,132 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
|
3 |
-
import numpy as np
|
4 |
-
import torch
|
5 |
-
import torch.nn as nn
|
6 |
-
|
7 |
-
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
|
8 |
-
|
9 |
-
|
10 |
-
def sort_pack_padded_sequence(input, lengths):
|
11 |
-
sorted_lengths, indices = torch.sort(lengths, descending=True)
|
12 |
-
tmp = pack_padded_sequence(input[indices], sorted_lengths.cpu(), batch_first=True)
|
13 |
-
inv_ix = indices.clone()
|
14 |
-
inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix)
|
15 |
-
return tmp, inv_ix
|
16 |
-
|
17 |
-
def pad_unsort_packed_sequence(input, inv_ix):
|
18 |
-
tmp, _ = pad_packed_sequence(input, batch_first=True)
|
19 |
-
tmp = tmp[inv_ix]
|
20 |
-
return tmp
|
21 |
-
|
22 |
-
def pack_wrapper(module, attn_feats, attn_feat_lens):
|
23 |
-
packed, inv_ix = sort_pack_padded_sequence(attn_feats, attn_feat_lens)
|
24 |
-
if isinstance(module, torch.nn.RNNBase):
|
25 |
-
return pad_unsort_packed_sequence(module(packed)[0], inv_ix)
|
26 |
-
else:
|
27 |
-
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
|
28 |
-
|
29 |
-
def generate_length_mask(lens, max_length=None):
|
30 |
-
lens = torch.as_tensor(lens)
|
31 |
-
N = lens.size(0)
|
32 |
-
if max_length is None:
|
33 |
-
max_length = max(lens)
|
34 |
-
idxs = torch.arange(max_length).repeat(N).view(N, max_length)
|
35 |
-
idxs = idxs.to(lens.device)
|
36 |
-
mask = (idxs < lens.view(-1, 1))
|
37 |
-
return mask
|
38 |
-
|
39 |
-
def mean_with_lens(features, lens):
|
40 |
-
"""
|
41 |
-
features: [N, T, ...] (assume the second dimension represents length)
|
42 |
-
lens: [N,]
|
43 |
-
"""
|
44 |
-
lens = torch.as_tensor(lens)
|
45 |
-
if max(lens) != features.size(1):
|
46 |
-
max_length = features.size(1)
|
47 |
-
mask = generate_length_mask(lens, max_length)
|
48 |
-
else:
|
49 |
-
mask = generate_length_mask(lens)
|
50 |
-
mask = mask.to(features.device) # [N, T]
|
51 |
-
|
52 |
-
while mask.ndim < features.ndim:
|
53 |
-
mask = mask.unsqueeze(-1)
|
54 |
-
feature_mean = features * mask
|
55 |
-
feature_mean = feature_mean.sum(1)
|
56 |
-
while lens.ndim < feature_mean.ndim:
|
57 |
-
lens = lens.unsqueeze(1)
|
58 |
-
feature_mean = feature_mean / lens.to(features.device)
|
59 |
-
# feature_mean = features * mask.unsqueeze(-1)
|
60 |
-
# feature_mean = feature_mean.sum(1) / lens.unsqueeze(1).to(features.device)
|
61 |
-
return feature_mean
|
62 |
-
|
63 |
-
def max_with_lens(features, lens):
|
64 |
-
"""
|
65 |
-
features: [N, T, ...] (assume the second dimension represents length)
|
66 |
-
lens: [N,]
|
67 |
-
"""
|
68 |
-
lens = torch.as_tensor(lens)
|
69 |
-
mask = generate_length_mask(lens).to(features.device) # [N, T]
|
70 |
-
|
71 |
-
feature_max = features.clone()
|
72 |
-
feature_max[~mask] = float("-inf")
|
73 |
-
feature_max, _ = feature_max.max(1)
|
74 |
-
return feature_max
|
75 |
-
|
76 |
-
def repeat_tensor(x, n):
|
77 |
-
return x.unsqueeze(0).repeat(n, *([1] * len(x.shape)))
|
78 |
-
|
79 |
-
def init(m, method="kaiming"):
|
80 |
-
if isinstance(m, (nn.Conv2d, nn.Conv1d)):
|
81 |
-
if method == "kaiming":
|
82 |
-
nn.init.kaiming_uniform_(m.weight)
|
83 |
-
elif method == "xavier":
|
84 |
-
nn.init.xavier_uniform_(m.weight)
|
85 |
-
else:
|
86 |
-
raise Exception(f"initialization method {method} not supported")
|
87 |
-
if m.bias is not None:
|
88 |
-
nn.init.constant_(m.bias, 0)
|
89 |
-
elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)):
|
90 |
-
nn.init.constant_(m.weight, 1)
|
91 |
-
if m.bias is not None:
|
92 |
-
nn.init.constant_(m.bias, 0)
|
93 |
-
elif isinstance(m, nn.Linear):
|
94 |
-
if method == "kaiming":
|
95 |
-
nn.init.kaiming_uniform_(m.weight)
|
96 |
-
elif method == "xavier":
|
97 |
-
nn.init.xavier_uniform_(m.weight)
|
98 |
-
else:
|
99 |
-
raise Exception(f"initialization method {method} not supported")
|
100 |
-
if m.bias is not None:
|
101 |
-
nn.init.constant_(m.bias, 0)
|
102 |
-
elif isinstance(m, nn.Embedding):
|
103 |
-
if method == "kaiming":
|
104 |
-
nn.init.kaiming_uniform_(m.weight)
|
105 |
-
elif method == "xavier":
|
106 |
-
nn.init.xavier_uniform_(m.weight)
|
107 |
-
else:
|
108 |
-
raise Exception(f"initialization method {method} not supported")
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
class PositionalEncoding(nn.Module):
|
114 |
-
|
115 |
-
def __init__(self, d_model, dropout=0.1, max_len=100):
|
116 |
-
super(PositionalEncoding, self).__init__()
|
117 |
-
self.dropout = nn.Dropout(p=dropout)
|
118 |
-
|
119 |
-
pe = torch.zeros(max_len, d_model)
|
120 |
-
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
|
121 |
-
div_term = torch.exp(torch.arange(0, d_model, 2).float() * \
|
122 |
-
(-math.log(10000.0) / d_model))
|
123 |
-
pe[:, 0::2] = torch.sin(position * div_term)
|
124 |
-
pe[:, 1::2] = torch.cos(position * div_term)
|
125 |
-
pe = pe.unsqueeze(0).transpose(0, 1)
|
126 |
-
# self.register_buffer("pe", pe)
|
127 |
-
self.register_parameter("pe", nn.Parameter(pe, requires_grad=False))
|
128 |
-
|
129 |
-
def forward(self, x):
|
130 |
-
# x: [T, N, E]
|
131 |
-
x = x + self.pe[:x.size(0), :]
|
132 |
-
return self.dropout(x)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Afrihub/README/README.md
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: README
|
3 |
-
emoji: 🏢
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: green
|
6 |
-
sdk: static
|
7 |
-
pinned: false
|
8 |
-
---
|
9 |
-
|
10 |
-
Edit this `README.md` markdown file to author your organization card.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/reflection.py
DELETED
@@ -1,128 +0,0 @@
|
|
1 |
-
import asyncio
|
2 |
-
import logging
|
3 |
-
from typing import Any, Dict, List
|
4 |
-
|
5 |
-
from datetime import datetime as dt
|
6 |
-
import datetime
|
7 |
-
|
8 |
-
from pydantic import Field
|
9 |
-
|
10 |
-
from agentverse.agents.simulation_agent.conversation import BaseAgent
|
11 |
-
|
12 |
-
# from agentverse.environments.simulation_env.rules.base import Rule
|
13 |
-
from agentverse.environments.simulation_env.rules.base import SimulationRule as Rule
|
14 |
-
from agentverse.message import Message
|
15 |
-
|
16 |
-
from . import env_registry as EnvironmentRegistry
|
17 |
-
from ..base import BaseEnvironment
|
18 |
-
|
19 |
-
from pydantic import validator
|
20 |
-
|
21 |
-
|
22 |
-
@EnvironmentRegistry.register("reflection")
|
23 |
-
class ReflectionEnvironment(BaseEnvironment):
|
24 |
-
"""
|
25 |
-
Environment used in Observation-Planning-Reflection agent architecture.
|
26 |
-
|
27 |
-
Args:
|
28 |
-
agents: List of agents
|
29 |
-
rule: Rule for the environment
|
30 |
-
max_turns: Maximum number of turns
|
31 |
-
cnt_turn: Current turn number
|
32 |
-
last_messages: Messages from last turn
|
33 |
-
rule_params: Variables set by the rule
|
34 |
-
current_time
|
35 |
-
time_delta: time difference between steps
|
36 |
-
"""
|
37 |
-
|
38 |
-
agents: List[BaseAgent]
|
39 |
-
rule: Rule
|
40 |
-
max_turns: int = 10
|
41 |
-
cnt_turn: int = 0
|
42 |
-
last_messages: List[Message] = []
|
43 |
-
rule_params: Dict = {}
|
44 |
-
current_time: dt = dt.now()
|
45 |
-
time_delta: int = 120
|
46 |
-
#
|
47 |
-
|
48 |
-
# @validator("time_delta")
|
49 |
-
# def convert_str_to_timedelta(cls, string):
|
50 |
-
#
|
51 |
-
# return datetime.timedelta(seconds=int(string))
|
52 |
-
|
53 |
-
def __init__(self, rule, **kwargs):
|
54 |
-
rule_config = rule
|
55 |
-
order_config = rule_config.get("order", {"type": "sequential"})
|
56 |
-
visibility_config = rule_config.get("visibility", {"type": "all"})
|
57 |
-
selector_config = rule_config.get("selector", {"type": "basic"})
|
58 |
-
updater_config = rule_config.get("updater", {"type": "basic"})
|
59 |
-
describer_config = rule_config.get("describer", {"type": "basic"})
|
60 |
-
rule = Rule(
|
61 |
-
order_config,
|
62 |
-
visibility_config,
|
63 |
-
selector_config,
|
64 |
-
updater_config,
|
65 |
-
describer_config,
|
66 |
-
)
|
67 |
-
|
68 |
-
super().__init__(rule=rule, **kwargs)
|
69 |
-
|
70 |
-
async def step(self) -> List[Message]:
|
71 |
-
"""Run one step of the environment"""
|
72 |
-
|
73 |
-
logging.log(logging.INFO, f"Tick tock. Current time: {self.current_time}")
|
74 |
-
|
75 |
-
# Get the next agent index
|
76 |
-
agent_ids = self.rule.get_next_agent_idx(self)
|
77 |
-
|
78 |
-
# Generate current environment description
|
79 |
-
env_descriptions = self.rule.get_env_description(self)
|
80 |
-
|
81 |
-
# Generate the next message
|
82 |
-
messages = await asyncio.gather(
|
83 |
-
*[
|
84 |
-
self.agents[i].astep(self.current_time, env_descriptions[i])
|
85 |
-
for i in agent_ids
|
86 |
-
]
|
87 |
-
)
|
88 |
-
|
89 |
-
# Some rules will select certain messages from all the messages
|
90 |
-
selected_messages = self.rule.select_message(self, messages)
|
91 |
-
self.last_messages = selected_messages
|
92 |
-
self.print_messages(selected_messages)
|
93 |
-
|
94 |
-
# Update the memory of the agents
|
95 |
-
self.rule.update_memory(self)
|
96 |
-
|
97 |
-
# Update the set of visible agents for each agent
|
98 |
-
self.rule.update_visible_agents(self)
|
99 |
-
|
100 |
-
self.cnt_turn += 1
|
101 |
-
|
102 |
-
# update current_time
|
103 |
-
self.tick_tock()
|
104 |
-
|
105 |
-
return selected_messages
|
106 |
-
|
107 |
-
def print_messages(self, messages: List[Message]) -> None:
|
108 |
-
for message in messages:
|
109 |
-
if message is not None:
|
110 |
-
logging.info(f"{message.sender}: {message.content}")
|
111 |
-
|
112 |
-
def reset(self) -> None:
|
113 |
-
"""Reset the environment"""
|
114 |
-
self.cnt_turn = 0
|
115 |
-
self.rule.reset()
|
116 |
-
BaseAgent.update_forward_refs()
|
117 |
-
for agent in self.agents:
|
118 |
-
agent.reset(environment=self)
|
119 |
-
|
120 |
-
def is_done(self) -> bool:
|
121 |
-
"""Check if the environment is done"""
|
122 |
-
return self.cnt_turn >= self.max_turns
|
123 |
-
|
124 |
-
def tick_tock(self) -> None:
|
125 |
-
"""Increment the time"""
|
126 |
-
self.current_time = self.current_time + datetime.timedelta(
|
127 |
-
seconds=self.time_delta
|
128 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/agentverse/environments/tasksolving_env/rules/decision_maker/central.py
DELETED
@@ -1,56 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
import asyncio
|
3 |
-
from colorama import Fore
|
4 |
-
|
5 |
-
from typing import TYPE_CHECKING, List
|
6 |
-
|
7 |
-
from . import decision_maker_registry
|
8 |
-
from .base import BaseDecisionMaker
|
9 |
-
from agentverse.logging import typewriter_log, logger
|
10 |
-
from agentverse.message import Message
|
11 |
-
|
12 |
-
if TYPE_CHECKING:
|
13 |
-
from agentverse.agents import BaseAgent, SolverAgent, CriticAgent
|
14 |
-
from agentverse.message import SolverMessage
|
15 |
-
|
16 |
-
|
17 |
-
@decision_maker_registry.register("central")
|
18 |
-
class CentralDecisionMaker(BaseDecisionMaker):
|
19 |
-
"""
|
20 |
-
Discuss in a central manner.
|
21 |
-
"""
|
22 |
-
|
23 |
-
name: str = "central"
|
24 |
-
|
25 |
-
async def astep(
|
26 |
-
self,
|
27 |
-
agents: List[BaseAgent],
|
28 |
-
task_description: str,
|
29 |
-
previous_plan: str = "No solution yet.",
|
30 |
-
advice: str = "No advice yet.",
|
31 |
-
*args,
|
32 |
-
**kwargs,
|
33 |
-
) -> List[SolverMessage]:
|
34 |
-
if advice != "No advice yet.":
|
35 |
-
agents[1].add_message_to_memory(
|
36 |
-
[Message(content=advice, sender="Evaluator")]
|
37 |
-
)
|
38 |
-
result = await agents[1].astep(
|
39 |
-
previous_plan,
|
40 |
-
advice,
|
41 |
-
task_description,
|
42 |
-
roles=", ".join(
|
43 |
-
[
|
44 |
-
agent.role_description[0].lower() + agent.role_description[1:]
|
45 |
-
for agent in agents
|
46 |
-
]
|
47 |
-
),
|
48 |
-
)
|
49 |
-
agents[1].add_message_to_memory([result])
|
50 |
-
result = agents[0].step(
|
51 |
-
previous_plan, advice, task_description, chat_record=result.content
|
52 |
-
)
|
53 |
-
return [result]
|
54 |
-
|
55 |
-
def reset(self):
|
56 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/anchor-plugin.d.ts
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
import Anchor from './anchor'
|
2 |
-
|
3 |
-
export default class AnchorPlugin extends Phaser.Plugins.BasePlugin {
|
4 |
-
add(
|
5 |
-
gameObject: Phaser.GameObjects.GameObject,
|
6 |
-
config?: Anchor.IConfig
|
7 |
-
): Anchor;
|
8 |
-
|
9 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/criteria/id_loss.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch import nn
|
3 |
-
|
4 |
-
from models.facial_recognition.model_irse import Backbone
|
5 |
-
|
6 |
-
|
7 |
-
class IDLoss(nn.Module):
|
8 |
-
def __init__(self, opts):
|
9 |
-
super(IDLoss, self).__init__()
|
10 |
-
print('Loading ResNet ArcFace')
|
11 |
-
self.facenet = Backbone(input_size=112, num_layers=50, drop_ratio=0.6, mode='ir_se')
|
12 |
-
self.facenet.load_state_dict(torch.load(opts.ir_se50_weights))
|
13 |
-
self.pool = torch.nn.AdaptiveAvgPool2d((256, 256))
|
14 |
-
self.face_pool = torch.nn.AdaptiveAvgPool2d((112, 112))
|
15 |
-
self.facenet.eval()
|
16 |
-
self.opts = opts
|
17 |
-
|
18 |
-
def extract_feats(self, x):
|
19 |
-
if x.shape[2] != 256:
|
20 |
-
x = self.pool(x)
|
21 |
-
x = x[:, :, 35:223, 32:220] # Crop interesting region
|
22 |
-
x = self.face_pool(x)
|
23 |
-
x_feats = self.facenet(x)
|
24 |
-
return x_feats
|
25 |
-
|
26 |
-
def forward(self, y_hat, y):
|
27 |
-
n_samples = y.shape[0]
|
28 |
-
y_feats = self.extract_feats(y) # Otherwise use the feature from there
|
29 |
-
y_hat_feats = self.extract_feats(y_hat)
|
30 |
-
y_feats = y_feats.detach()
|
31 |
-
loss = 0
|
32 |
-
sim_improvement = 0
|
33 |
-
count = 0
|
34 |
-
for i in range(n_samples):
|
35 |
-
diff_target = y_hat_feats[i].dot(y_feats[i])
|
36 |
-
loss += 1 - diff_target
|
37 |
-
count += 1
|
38 |
-
|
39 |
-
return loss / count, sim_improvement / count
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/legacy.py
DELETED
@@ -1,369 +0,0 @@
|
|
1 |
-
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
"""Converting legacy network pickle into the new format."""
|
10 |
-
|
11 |
-
import click
|
12 |
-
import pickle
|
13 |
-
import re
|
14 |
-
import copy
|
15 |
-
import numpy as np
|
16 |
-
import torch
|
17 |
-
import dnnlib
|
18 |
-
from torch_utils import misc
|
19 |
-
|
20 |
-
# ----------------------------------------------------------------------------
|
21 |
-
|
22 |
-
|
23 |
-
def load_network_pkl(f, force_fp16=False):
|
24 |
-
data = _LegacyUnpickler(f).load()
|
25 |
-
|
26 |
-
# Legacy TensorFlow pickle => convert.
|
27 |
-
if isinstance(data, tuple) and len(data) == 3 and all(isinstance(net, _TFNetworkStub) for net in data):
|
28 |
-
tf_G, tf_D, tf_Gs = data
|
29 |
-
G = convert_tf_generator(tf_G)
|
30 |
-
D = convert_tf_discriminator(tf_D)
|
31 |
-
G_ema = convert_tf_generator(tf_Gs)
|
32 |
-
data = dict(G=G, D=D, G_ema=G_ema)
|
33 |
-
|
34 |
-
# Add missing fields.
|
35 |
-
if 'training_set_kwargs' not in data:
|
36 |
-
data['training_set_kwargs'] = None
|
37 |
-
if 'augment_pipe' not in data:
|
38 |
-
data['augment_pipe'] = None
|
39 |
-
|
40 |
-
# Validate contents.
|
41 |
-
assert isinstance(data['G'], torch.nn.Module)
|
42 |
-
assert isinstance(data['D'], torch.nn.Module)
|
43 |
-
assert isinstance(data['G_ema'], torch.nn.Module)
|
44 |
-
assert isinstance(data['training_set_kwargs'], (dict, type(None)))
|
45 |
-
assert isinstance(data['augment_pipe'], (torch.nn.Module, type(None)))
|
46 |
-
|
47 |
-
# Force FP16.
|
48 |
-
if force_fp16:
|
49 |
-
for key in ['G', 'D', 'G_ema']:
|
50 |
-
old = data[key]
|
51 |
-
kwargs = copy.deepcopy(old.init_kwargs)
|
52 |
-
fp16_kwargs = kwargs.get('synthesis_kwargs', kwargs)
|
53 |
-
fp16_kwargs.num_fp16_res = 4
|
54 |
-
fp16_kwargs.conv_clamp = 256
|
55 |
-
if kwargs != old.init_kwargs:
|
56 |
-
new = type(old)(**kwargs).eval().requires_grad_(False)
|
57 |
-
misc.copy_params_and_buffers(old, new, require_all=True)
|
58 |
-
data[key] = new
|
59 |
-
return data
|
60 |
-
|
61 |
-
# ----------------------------------------------------------------------------
|
62 |
-
|
63 |
-
|
64 |
-
class _TFNetworkStub(dnnlib.EasyDict):
|
65 |
-
pass
|
66 |
-
|
67 |
-
|
68 |
-
class _LegacyUnpickler(pickle.Unpickler):
|
69 |
-
def find_class(self, module, name):
|
70 |
-
if module == 'dnnlib.tflib.network' and name == 'Network':
|
71 |
-
return _TFNetworkStub
|
72 |
-
return super().find_class(module, name)
|
73 |
-
|
74 |
-
# ----------------------------------------------------------------------------
|
75 |
-
|
76 |
-
|
77 |
-
def _collect_tf_params(tf_net):
|
78 |
-
# pylint: disable=protected-access
|
79 |
-
tf_params = dict()
|
80 |
-
|
81 |
-
def recurse(prefix, tf_net):
|
82 |
-
for name, value in tf_net.variables:
|
83 |
-
tf_params[prefix + name] = value
|
84 |
-
for name, comp in tf_net.components.items():
|
85 |
-
recurse(prefix + name + '/', comp)
|
86 |
-
recurse('', tf_net)
|
87 |
-
return tf_params
|
88 |
-
|
89 |
-
# ----------------------------------------------------------------------------
|
90 |
-
|
91 |
-
|
92 |
-
def _populate_module_params(module, *patterns):
|
93 |
-
for name, tensor in misc.named_params_and_buffers(module):
|
94 |
-
found = False
|
95 |
-
value = None
|
96 |
-
for pattern, value_fn in zip(patterns[0::2], patterns[1::2]):
|
97 |
-
match = re.fullmatch(pattern, name)
|
98 |
-
if match:
|
99 |
-
found = True
|
100 |
-
if value_fn is not None:
|
101 |
-
value = value_fn(*match.groups())
|
102 |
-
break
|
103 |
-
try:
|
104 |
-
assert found
|
105 |
-
if value is not None:
|
106 |
-
tensor.copy_(torch.from_numpy(np.array(value)))
|
107 |
-
except:
|
108 |
-
print(name, list(tensor.shape))
|
109 |
-
raise
|
110 |
-
|
111 |
-
# ----------------------------------------------------------------------------
|
112 |
-
|
113 |
-
|
114 |
-
def convert_tf_generator(tf_G):
|
115 |
-
if tf_G.version < 4:
|
116 |
-
raise ValueError('TensorFlow pickle version too low')
|
117 |
-
|
118 |
-
# Collect kwargs.
|
119 |
-
tf_kwargs = tf_G.static_kwargs
|
120 |
-
known_kwargs = set()
|
121 |
-
|
122 |
-
def kwarg(tf_name, default=None, none=None):
|
123 |
-
known_kwargs.add(tf_name)
|
124 |
-
val = tf_kwargs.get(tf_name, default)
|
125 |
-
return val if val is not None else none
|
126 |
-
|
127 |
-
# Convert kwargs.
|
128 |
-
from training import networks_stylegan2
|
129 |
-
network_class = networks_stylegan2.Generator
|
130 |
-
kwargs = dnnlib.EasyDict(
|
131 |
-
z_dim=kwarg('latent_size', 512),
|
132 |
-
c_dim=kwarg('label_size', 0),
|
133 |
-
w_dim=kwarg('dlatent_size', 512),
|
134 |
-
img_resolution=kwarg('resolution', 1024),
|
135 |
-
img_channels=kwarg('num_channels', 3),
|
136 |
-
channel_base=kwarg('fmap_base', 16384) * 2,
|
137 |
-
channel_max=kwarg('fmap_max', 512),
|
138 |
-
num_fp16_res=kwarg('num_fp16_res', 0),
|
139 |
-
conv_clamp=kwarg('conv_clamp', None),
|
140 |
-
architecture=kwarg('architecture', 'skip'),
|
141 |
-
resample_filter=kwarg('resample_kernel', [1, 3, 3, 1]),
|
142 |
-
use_noise=kwarg('use_noise', True),
|
143 |
-
activation=kwarg('nonlinearity', 'lrelu'),
|
144 |
-
mapping_kwargs=dnnlib.EasyDict(
|
145 |
-
num_layers=kwarg('mapping_layers', 8),
|
146 |
-
embed_features=kwarg('label_fmaps', None),
|
147 |
-
layer_features=kwarg('mapping_fmaps', None),
|
148 |
-
activation=kwarg('mapping_nonlinearity', 'lrelu'),
|
149 |
-
lr_multiplier=kwarg('mapping_lrmul', 0.01),
|
150 |
-
w_avg_beta=kwarg('w_avg_beta', 0.995, none=1),
|
151 |
-
),
|
152 |
-
)
|
153 |
-
|
154 |
-
# Check for unknown kwargs.
|
155 |
-
kwarg('truncation_psi')
|
156 |
-
kwarg('truncation_cutoff')
|
157 |
-
kwarg('style_mixing_prob')
|
158 |
-
kwarg('structure')
|
159 |
-
kwarg('conditioning')
|
160 |
-
kwarg('fused_modconv')
|
161 |
-
unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs)
|
162 |
-
if len(unknown_kwargs) > 0:
|
163 |
-
raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0])
|
164 |
-
|
165 |
-
# Collect params.
|
166 |
-
tf_params = _collect_tf_params(tf_G)
|
167 |
-
for name, value in list(tf_params.items()):
|
168 |
-
match = re.fullmatch(r'ToRGB_lod(\d+)/(.*)', name)
|
169 |
-
if match:
|
170 |
-
r = kwargs.img_resolution // (2 ** int(match.group(1)))
|
171 |
-
tf_params[f'{r}x{r}/ToRGB/{match.group(2)}'] = value
|
172 |
-
kwargs.synthesis.kwargs.architecture = 'orig'
|
173 |
-
# for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}')
|
174 |
-
|
175 |
-
# Convert params.
|
176 |
-
G = network_class(**kwargs).eval().requires_grad_(False)
|
177 |
-
# pylint: disable=unnecessary-lambda
|
178 |
-
# pylint: disable=f-string-without-interpolation
|
179 |
-
_populate_module_params(G,
|
180 |
-
r'mapping\.w_avg', lambda: tf_params[f'dlatent_avg'],
|
181 |
-
r'mapping\.embed\.weight', lambda: tf_params[f'mapping/LabelEmbed/weight'].transpose(
|
182 |
-
),
|
183 |
-
r'mapping\.embed\.bias', lambda: tf_params[f'mapping/LabelEmbed/bias'],
|
184 |
-
r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'mapping/Dense{i}/weight'].transpose(
|
185 |
-
),
|
186 |
-
r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'mapping/Dense{i}/bias'],
|
187 |
-
r'synthesis\.b4\.const', lambda: tf_params[f'synthesis/4x4/Const/const'][0],
|
188 |
-
r'synthesis\.b4\.conv1\.weight', lambda: tf_params[f'synthesis/4x4/Conv/weight'].transpose(
|
189 |
-
3, 2, 0, 1),
|
190 |
-
r'synthesis\.b4\.conv1\.bias', lambda: tf_params[
|
191 |
-
f'synthesis/4x4/Conv/bias'],
|
192 |
-
r'synthesis\.b4\.conv1\.noise_const', lambda: tf_params[
|
193 |
-
f'synthesis/noise0'][0, 0],
|
194 |
-
r'synthesis\.b4\.conv1\.noise_strength', lambda: tf_params[
|
195 |
-
f'synthesis/4x4/Conv/noise_strength'],
|
196 |
-
r'synthesis\.b4\.conv1\.affine\.weight', lambda: tf_params[
|
197 |
-
f'synthesis/4x4/Conv/mod_weight'].transpose(),
|
198 |
-
r'synthesis\.b4\.conv1\.affine\.bias', lambda: tf_params[
|
199 |
-
f'synthesis/4x4/Conv/mod_bias'] + 1,
|
200 |
-
r'synthesis\.b(\d+)\.conv0\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/weight'][::-1, ::-1].transpose(
|
201 |
-
3, 2, 0, 1),
|
202 |
-
r'synthesis\.b(\d+)\.conv0\.bias', lambda r: tf_params[
|
203 |
-
f'synthesis/{r}x{r}/Conv0_up/bias'],
|
204 |
-
r'synthesis\.b(\d+)\.conv0\.noise_const', lambda r: tf_params[
|
205 |
-
f'synthesis/noise{int(np.log2(int(r)))*2-5}'][0, 0],
|
206 |
-
r'synthesis\.b(\d+)\.conv0\.noise_strength', lambda r: tf_params[
|
207 |
-
f'synthesis/{r}x{r}/Conv0_up/noise_strength'],
|
208 |
-
r'synthesis\.b(\d+)\.conv0\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/mod_weight'].transpose(
|
209 |
-
),
|
210 |
-
r'synthesis\.b(\d+)\.conv0\.affine\.bias', lambda r: tf_params[
|
211 |
-
f'synthesis/{r}x{r}/Conv0_up/mod_bias'] + 1,
|
212 |
-
r'synthesis\.b(\d+)\.conv1\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/weight'].transpose(
|
213 |
-
3, 2, 0, 1),
|
214 |
-
r'synthesis\.b(\d+)\.conv1\.bias', lambda r: tf_params[
|
215 |
-
f'synthesis/{r}x{r}/Conv1/bias'],
|
216 |
-
r'synthesis\.b(\d+)\.conv1\.noise_const', lambda r: tf_params[
|
217 |
-
f'synthesis/noise{int(np.log2(int(r)))*2-4}'][0, 0],
|
218 |
-
r'synthesis\.b(\d+)\.conv1\.noise_strength', lambda r: tf_params[
|
219 |
-
f'synthesis/{r}x{r}/Conv1/noise_strength'],
|
220 |
-
r'synthesis\.b(\d+)\.conv1\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/mod_weight'].transpose(
|
221 |
-
),
|
222 |
-
r'synthesis\.b(\d+)\.conv1\.affine\.bias', lambda r: tf_params[
|
223 |
-
f'synthesis/{r}x{r}/Conv1/mod_bias'] + 1,
|
224 |
-
r'synthesis\.b(\d+)\.torgb\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/weight'].transpose(
|
225 |
-
3, 2, 0, 1),
|
226 |
-
r'synthesis\.b(\d+)\.torgb\.bias', lambda r: tf_params[
|
227 |
-
f'synthesis/{r}x{r}/ToRGB/bias'],
|
228 |
-
r'synthesis\.b(\d+)\.torgb\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/mod_weight'].transpose(
|
229 |
-
),
|
230 |
-
r'synthesis\.b(\d+)\.torgb\.affine\.bias', lambda r: tf_params[
|
231 |
-
f'synthesis/{r}x{r}/ToRGB/mod_bias'] + 1,
|
232 |
-
r'synthesis\.b(\d+)\.skip\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Skip/weight'][::-1, ::-1].transpose(
|
233 |
-
3, 2, 0, 1),
|
234 |
-
r'.*\.resample_filter', None,
|
235 |
-
r'.*\.act_filter', None,
|
236 |
-
)
|
237 |
-
return G
|
238 |
-
|
239 |
-
# ----------------------------------------------------------------------------
|
240 |
-
|
241 |
-
|
242 |
-
def convert_tf_discriminator(tf_D):
|
243 |
-
if tf_D.version < 4:
|
244 |
-
raise ValueError('TensorFlow pickle version too low')
|
245 |
-
|
246 |
-
# Collect kwargs.
|
247 |
-
tf_kwargs = tf_D.static_kwargs
|
248 |
-
known_kwargs = set()
|
249 |
-
|
250 |
-
def kwarg(tf_name, default=None):
|
251 |
-
known_kwargs.add(tf_name)
|
252 |
-
return tf_kwargs.get(tf_name, default)
|
253 |
-
|
254 |
-
# Convert kwargs.
|
255 |
-
kwargs = dnnlib.EasyDict(
|
256 |
-
c_dim=kwarg('label_size', 0),
|
257 |
-
img_resolution=kwarg('resolution', 1024),
|
258 |
-
img_channels=kwarg('num_channels', 3),
|
259 |
-
architecture=kwarg('architecture', 'resnet'),
|
260 |
-
channel_base=kwarg('fmap_base', 16384) * 2,
|
261 |
-
channel_max=kwarg('fmap_max', 512),
|
262 |
-
num_fp16_res=kwarg('num_fp16_res', 0),
|
263 |
-
conv_clamp=kwarg('conv_clamp', None),
|
264 |
-
cmap_dim=kwarg('mapping_fmaps', None),
|
265 |
-
block_kwargs=dnnlib.EasyDict(
|
266 |
-
activation=kwarg('nonlinearity', 'lrelu'),
|
267 |
-
resample_filter=kwarg('resample_kernel', [1, 3, 3, 1]),
|
268 |
-
freeze_layers=kwarg('freeze_layers', 0),
|
269 |
-
),
|
270 |
-
mapping_kwargs=dnnlib.EasyDict(
|
271 |
-
num_layers=kwarg('mapping_layers', 0),
|
272 |
-
embed_features=kwarg('mapping_fmaps', None),
|
273 |
-
layer_features=kwarg('mapping_fmaps', None),
|
274 |
-
activation=kwarg('nonlinearity', 'lrelu'),
|
275 |
-
lr_multiplier=kwarg('mapping_lrmul', 0.1),
|
276 |
-
),
|
277 |
-
epilogue_kwargs=dnnlib.EasyDict(
|
278 |
-
mbstd_group_size=kwarg('mbstd_group_size', None),
|
279 |
-
mbstd_num_channels=kwarg('mbstd_num_features', 1),
|
280 |
-
activation=kwarg('nonlinearity', 'lrelu'),
|
281 |
-
),
|
282 |
-
)
|
283 |
-
|
284 |
-
# Check for unknown kwargs.
|
285 |
-
kwarg('structure')
|
286 |
-
kwarg('conditioning')
|
287 |
-
unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs)
|
288 |
-
if len(unknown_kwargs) > 0:
|
289 |
-
raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0])
|
290 |
-
|
291 |
-
# Collect params.
|
292 |
-
tf_params = _collect_tf_params(tf_D)
|
293 |
-
for name, value in list(tf_params.items()):
|
294 |
-
match = re.fullmatch(r'FromRGB_lod(\d+)/(.*)', name)
|
295 |
-
if match:
|
296 |
-
r = kwargs.img_resolution // (2 ** int(match.group(1)))
|
297 |
-
tf_params[f'{r}x{r}/FromRGB/{match.group(2)}'] = value
|
298 |
-
kwargs.architecture = 'orig'
|
299 |
-
# for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}')
|
300 |
-
|
301 |
-
# Convert params.
|
302 |
-
from training import networks_stylegan2
|
303 |
-
D = networks_stylegan2.Discriminator(**kwargs).eval().requires_grad_(False)
|
304 |
-
# pylint: disable=unnecessary-lambda
|
305 |
-
# pylint: disable=f-string-without-interpolation
|
306 |
-
_populate_module_params(D,
|
307 |
-
r'b(\d+)\.fromrgb\.weight', lambda r: tf_params[f'{r}x{r}/FromRGB/weight'].transpose(
|
308 |
-
3, 2, 0, 1),
|
309 |
-
r'b(\d+)\.fromrgb\.bias', lambda r: tf_params[f'{r}x{r}/FromRGB/bias'],
|
310 |
-
r'b(\d+)\.conv(\d+)\.weight', lambda r, i: tf_params[f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/weight'].transpose(
|
311 |
-
3, 2, 0, 1),
|
312 |
-
r'b(\d+)\.conv(\d+)\.bias', lambda r, i: tf_params[
|
313 |
-
f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/bias'],
|
314 |
-
r'b(\d+)\.skip\.weight', lambda r: tf_params[f'{r}x{r}/Skip/weight'].transpose(
|
315 |
-
3, 2, 0, 1),
|
316 |
-
r'mapping\.embed\.weight', lambda: tf_params[f'LabelEmbed/weight'].transpose(
|
317 |
-
),
|
318 |
-
r'mapping\.embed\.bias', lambda: tf_params[f'LabelEmbed/bias'],
|
319 |
-
r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'Mapping{i}/weight'].transpose(
|
320 |
-
),
|
321 |
-
r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'Mapping{i}/bias'],
|
322 |
-
r'b4\.conv\.weight', lambda: tf_params[f'4x4/Conv/weight'].transpose(
|
323 |
-
3, 2, 0, 1),
|
324 |
-
r'b4\.conv\.bias', lambda: tf_params[f'4x4/Conv/bias'],
|
325 |
-
r'b4\.fc\.weight', lambda: tf_params[f'4x4/Dense0/weight'].transpose(
|
326 |
-
),
|
327 |
-
r'b4\.fc\.bias', lambda: tf_params[f'4x4/Dense0/bias'],
|
328 |
-
r'b4\.out\.weight', lambda: tf_params[f'Output/weight'].transpose(
|
329 |
-
),
|
330 |
-
r'b4\.out\.bias', lambda: tf_params[f'Output/bias'],
|
331 |
-
r'.*\.resample_filter', None,
|
332 |
-
)
|
333 |
-
return D
|
334 |
-
|
335 |
-
# ----------------------------------------------------------------------------
|
336 |
-
|
337 |
-
|
338 |
-
@click.command()
|
339 |
-
@click.option('--source', help='Input pickle', required=True, metavar='PATH')
|
340 |
-
@click.option('--dest', help='Output pickle', required=True, metavar='PATH')
|
341 |
-
@click.option('--force-fp16', help='Force the networks to use FP16', type=bool, default=False, metavar='BOOL', show_default=True)
|
342 |
-
def convert_network_pickle(source, dest, force_fp16):
|
343 |
-
"""Convert legacy network pickle into the native PyTorch format.
|
344 |
-
|
345 |
-
The tool is able to load the main network configurations exported using the TensorFlow version of StyleGAN2 or StyleGAN2-ADA.
|
346 |
-
It does not support e.g. StyleGAN2-ADA comparison methods, StyleGAN2 configs A-D, or StyleGAN1 networks.
|
347 |
-
|
348 |
-
Example:
|
349 |
-
|
350 |
-
\b
|
351 |
-
python legacy.py \\
|
352 |
-
--source=https://nvlabs-fi-cdn.nvidia.com/stylegan2/networks/stylegan2-cat-config-f.pkl \\
|
353 |
-
--dest=stylegan2-cat-config-f.pkl
|
354 |
-
"""
|
355 |
-
print(f'Loading "{source}"...')
|
356 |
-
with dnnlib.util.open_url(source) as f:
|
357 |
-
data = load_network_pkl(f, force_fp16=force_fp16)
|
358 |
-
print(f'Saving "{dest}"...')
|
359 |
-
with open(dest, 'wb') as f:
|
360 |
-
pickle.dump(data, f)
|
361 |
-
print('Done.')
|
362 |
-
|
363 |
-
# ----------------------------------------------------------------------------
|
364 |
-
|
365 |
-
|
366 |
-
if __name__ == "__main__":
|
367 |
-
convert_network_pickle() # pylint: disable=no-value-for-parameter
|
368 |
-
|
369 |
-
# ----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/textual_inversion/textual_inversion_flax.py
DELETED
@@ -1,681 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import logging
|
3 |
-
import math
|
4 |
-
import os
|
5 |
-
import random
|
6 |
-
from pathlib import Path
|
7 |
-
|
8 |
-
import jax
|
9 |
-
import jax.numpy as jnp
|
10 |
-
import numpy as np
|
11 |
-
import optax
|
12 |
-
import PIL
|
13 |
-
import torch
|
14 |
-
import torch.utils.checkpoint
|
15 |
-
import transformers
|
16 |
-
from flax import jax_utils
|
17 |
-
from flax.training import train_state
|
18 |
-
from flax.training.common_utils import shard
|
19 |
-
from huggingface_hub import create_repo, upload_folder
|
20 |
-
|
21 |
-
# TODO: remove and import from diffusers.utils when the new version of diffusers is released
|
22 |
-
from packaging import version
|
23 |
-
from PIL import Image
|
24 |
-
from torch.utils.data import Dataset
|
25 |
-
from torchvision import transforms
|
26 |
-
from tqdm.auto import tqdm
|
27 |
-
from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel, set_seed
|
28 |
-
|
29 |
-
from diffusers import (
|
30 |
-
FlaxAutoencoderKL,
|
31 |
-
FlaxDDPMScheduler,
|
32 |
-
FlaxPNDMScheduler,
|
33 |
-
FlaxStableDiffusionPipeline,
|
34 |
-
FlaxUNet2DConditionModel,
|
35 |
-
)
|
36 |
-
from diffusers.pipelines.stable_diffusion import FlaxStableDiffusionSafetyChecker
|
37 |
-
from diffusers.utils import check_min_version
|
38 |
-
|
39 |
-
|
40 |
-
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
|
41 |
-
PIL_INTERPOLATION = {
|
42 |
-
"linear": PIL.Image.Resampling.BILINEAR,
|
43 |
-
"bilinear": PIL.Image.Resampling.BILINEAR,
|
44 |
-
"bicubic": PIL.Image.Resampling.BICUBIC,
|
45 |
-
"lanczos": PIL.Image.Resampling.LANCZOS,
|
46 |
-
"nearest": PIL.Image.Resampling.NEAREST,
|
47 |
-
}
|
48 |
-
else:
|
49 |
-
PIL_INTERPOLATION = {
|
50 |
-
"linear": PIL.Image.LINEAR,
|
51 |
-
"bilinear": PIL.Image.BILINEAR,
|
52 |
-
"bicubic": PIL.Image.BICUBIC,
|
53 |
-
"lanczos": PIL.Image.LANCZOS,
|
54 |
-
"nearest": PIL.Image.NEAREST,
|
55 |
-
}
|
56 |
-
# ------------------------------------------------------------------------------
|
57 |
-
|
58 |
-
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
59 |
-
check_min_version("0.19.0")
|
60 |
-
|
61 |
-
logger = logging.getLogger(__name__)
|
62 |
-
|
63 |
-
|
64 |
-
def parse_args():
|
65 |
-
parser = argparse.ArgumentParser(description="Simple example of a training script.")
|
66 |
-
parser.add_argument(
|
67 |
-
"--pretrained_model_name_or_path",
|
68 |
-
type=str,
|
69 |
-
default=None,
|
70 |
-
required=True,
|
71 |
-
help="Path to pretrained model or model identifier from huggingface.co/models.",
|
72 |
-
)
|
73 |
-
parser.add_argument(
|
74 |
-
"--tokenizer_name",
|
75 |
-
type=str,
|
76 |
-
default=None,
|
77 |
-
help="Pretrained tokenizer name or path if not the same as model_name",
|
78 |
-
)
|
79 |
-
parser.add_argument(
|
80 |
-
"--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data."
|
81 |
-
)
|
82 |
-
parser.add_argument(
|
83 |
-
"--placeholder_token",
|
84 |
-
type=str,
|
85 |
-
default=None,
|
86 |
-
required=True,
|
87 |
-
help="A token to use as a placeholder for the concept.",
|
88 |
-
)
|
89 |
-
parser.add_argument(
|
90 |
-
"--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word."
|
91 |
-
)
|
92 |
-
parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'")
|
93 |
-
parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.")
|
94 |
-
parser.add_argument(
|
95 |
-
"--output_dir",
|
96 |
-
type=str,
|
97 |
-
default="text-inversion-model",
|
98 |
-
help="The output directory where the model predictions and checkpoints will be written.",
|
99 |
-
)
|
100 |
-
parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.")
|
101 |
-
parser.add_argument(
|
102 |
-
"--resolution",
|
103 |
-
type=int,
|
104 |
-
default=512,
|
105 |
-
help=(
|
106 |
-
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
|
107 |
-
" resolution"
|
108 |
-
),
|
109 |
-
)
|
110 |
-
parser.add_argument(
|
111 |
-
"--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution."
|
112 |
-
)
|
113 |
-
parser.add_argument(
|
114 |
-
"--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
|
115 |
-
)
|
116 |
-
parser.add_argument("--num_train_epochs", type=int, default=100)
|
117 |
-
parser.add_argument(
|
118 |
-
"--max_train_steps",
|
119 |
-
type=int,
|
120 |
-
default=5000,
|
121 |
-
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
|
122 |
-
)
|
123 |
-
parser.add_argument(
|
124 |
-
"--save_steps",
|
125 |
-
type=int,
|
126 |
-
default=500,
|
127 |
-
help="Save learned_embeds.bin every X updates steps.",
|
128 |
-
)
|
129 |
-
parser.add_argument(
|
130 |
-
"--learning_rate",
|
131 |
-
type=float,
|
132 |
-
default=1e-4,
|
133 |
-
help="Initial learning rate (after the potential warmup period) to use.",
|
134 |
-
)
|
135 |
-
parser.add_argument(
|
136 |
-
"--scale_lr",
|
137 |
-
action="store_true",
|
138 |
-
default=True,
|
139 |
-
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
|
140 |
-
)
|
141 |
-
parser.add_argument(
|
142 |
-
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
|
143 |
-
)
|
144 |
-
parser.add_argument(
|
145 |
-
"--revision",
|
146 |
-
type=str,
|
147 |
-
default=None,
|
148 |
-
required=False,
|
149 |
-
help="Revision of pretrained model identifier from huggingface.co/models.",
|
150 |
-
)
|
151 |
-
parser.add_argument(
|
152 |
-
"--lr_scheduler",
|
153 |
-
type=str,
|
154 |
-
default="constant",
|
155 |
-
help=(
|
156 |
-
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
|
157 |
-
' "constant", "constant_with_warmup"]'
|
158 |
-
),
|
159 |
-
)
|
160 |
-
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
|
161 |
-
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
|
162 |
-
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
|
163 |
-
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
|
164 |
-
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
|
165 |
-
parser.add_argument(
|
166 |
-
"--use_auth_token",
|
167 |
-
action="store_true",
|
168 |
-
help=(
|
169 |
-
"Will use the token generated when running `huggingface-cli login` (necessary to use this script with"
|
170 |
-
" private models)."
|
171 |
-
),
|
172 |
-
)
|
173 |
-
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
|
174 |
-
parser.add_argument(
|
175 |
-
"--hub_model_id",
|
176 |
-
type=str,
|
177 |
-
default=None,
|
178 |
-
help="The name of the repository to keep in sync with the local `output_dir`.",
|
179 |
-
)
|
180 |
-
parser.add_argument(
|
181 |
-
"--logging_dir",
|
182 |
-
type=str,
|
183 |
-
default="logs",
|
184 |
-
help=(
|
185 |
-
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
|
186 |
-
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
|
187 |
-
),
|
188 |
-
)
|
189 |
-
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
|
190 |
-
|
191 |
-
args = parser.parse_args()
|
192 |
-
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
|
193 |
-
if env_local_rank != -1 and env_local_rank != args.local_rank:
|
194 |
-
args.local_rank = env_local_rank
|
195 |
-
|
196 |
-
if args.train_data_dir is None:
|
197 |
-
raise ValueError("You must specify a train data directory.")
|
198 |
-
|
199 |
-
return args
|
200 |
-
|
201 |
-
|
202 |
-
imagenet_templates_small = [
|
203 |
-
"a photo of a {}",
|
204 |
-
"a rendering of a {}",
|
205 |
-
"a cropped photo of the {}",
|
206 |
-
"the photo of a {}",
|
207 |
-
"a photo of a clean {}",
|
208 |
-
"a photo of a dirty {}",
|
209 |
-
"a dark photo of the {}",
|
210 |
-
"a photo of my {}",
|
211 |
-
"a photo of the cool {}",
|
212 |
-
"a close-up photo of a {}",
|
213 |
-
"a bright photo of the {}",
|
214 |
-
"a cropped photo of a {}",
|
215 |
-
"a photo of the {}",
|
216 |
-
"a good photo of the {}",
|
217 |
-
"a photo of one {}",
|
218 |
-
"a close-up photo of the {}",
|
219 |
-
"a rendition of the {}",
|
220 |
-
"a photo of the clean {}",
|
221 |
-
"a rendition of a {}",
|
222 |
-
"a photo of a nice {}",
|
223 |
-
"a good photo of a {}",
|
224 |
-
"a photo of the nice {}",
|
225 |
-
"a photo of the small {}",
|
226 |
-
"a photo of the weird {}",
|
227 |
-
"a photo of the large {}",
|
228 |
-
"a photo of a cool {}",
|
229 |
-
"a photo of a small {}",
|
230 |
-
]
|
231 |
-
|
232 |
-
imagenet_style_templates_small = [
|
233 |
-
"a painting in the style of {}",
|
234 |
-
"a rendering in the style of {}",
|
235 |
-
"a cropped painting in the style of {}",
|
236 |
-
"the painting in the style of {}",
|
237 |
-
"a clean painting in the style of {}",
|
238 |
-
"a dirty painting in the style of {}",
|
239 |
-
"a dark painting in the style of {}",
|
240 |
-
"a picture in the style of {}",
|
241 |
-
"a cool painting in the style of {}",
|
242 |
-
"a close-up painting in the style of {}",
|
243 |
-
"a bright painting in the style of {}",
|
244 |
-
"a cropped painting in the style of {}",
|
245 |
-
"a good painting in the style of {}",
|
246 |
-
"a close-up painting in the style of {}",
|
247 |
-
"a rendition in the style of {}",
|
248 |
-
"a nice painting in the style of {}",
|
249 |
-
"a small painting in the style of {}",
|
250 |
-
"a weird painting in the style of {}",
|
251 |
-
"a large painting in the style of {}",
|
252 |
-
]
|
253 |
-
|
254 |
-
|
255 |
-
class TextualInversionDataset(Dataset):
|
256 |
-
def __init__(
|
257 |
-
self,
|
258 |
-
data_root,
|
259 |
-
tokenizer,
|
260 |
-
learnable_property="object", # [object, style]
|
261 |
-
size=512,
|
262 |
-
repeats=100,
|
263 |
-
interpolation="bicubic",
|
264 |
-
flip_p=0.5,
|
265 |
-
set="train",
|
266 |
-
placeholder_token="*",
|
267 |
-
center_crop=False,
|
268 |
-
):
|
269 |
-
self.data_root = data_root
|
270 |
-
self.tokenizer = tokenizer
|
271 |
-
self.learnable_property = learnable_property
|
272 |
-
self.size = size
|
273 |
-
self.placeholder_token = placeholder_token
|
274 |
-
self.center_crop = center_crop
|
275 |
-
self.flip_p = flip_p
|
276 |
-
|
277 |
-
self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)]
|
278 |
-
|
279 |
-
self.num_images = len(self.image_paths)
|
280 |
-
self._length = self.num_images
|
281 |
-
|
282 |
-
if set == "train":
|
283 |
-
self._length = self.num_images * repeats
|
284 |
-
|
285 |
-
self.interpolation = {
|
286 |
-
"linear": PIL_INTERPOLATION["linear"],
|
287 |
-
"bilinear": PIL_INTERPOLATION["bilinear"],
|
288 |
-
"bicubic": PIL_INTERPOLATION["bicubic"],
|
289 |
-
"lanczos": PIL_INTERPOLATION["lanczos"],
|
290 |
-
}[interpolation]
|
291 |
-
|
292 |
-
self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small
|
293 |
-
self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p)
|
294 |
-
|
295 |
-
def __len__(self):
|
296 |
-
return self._length
|
297 |
-
|
298 |
-
def __getitem__(self, i):
|
299 |
-
example = {}
|
300 |
-
image = Image.open(self.image_paths[i % self.num_images])
|
301 |
-
|
302 |
-
if not image.mode == "RGB":
|
303 |
-
image = image.convert("RGB")
|
304 |
-
|
305 |
-
placeholder_string = self.placeholder_token
|
306 |
-
text = random.choice(self.templates).format(placeholder_string)
|
307 |
-
|
308 |
-
example["input_ids"] = self.tokenizer(
|
309 |
-
text,
|
310 |
-
padding="max_length",
|
311 |
-
truncation=True,
|
312 |
-
max_length=self.tokenizer.model_max_length,
|
313 |
-
return_tensors="pt",
|
314 |
-
).input_ids[0]
|
315 |
-
|
316 |
-
# default to score-sde preprocessing
|
317 |
-
img = np.array(image).astype(np.uint8)
|
318 |
-
|
319 |
-
if self.center_crop:
|
320 |
-
crop = min(img.shape[0], img.shape[1])
|
321 |
-
(
|
322 |
-
h,
|
323 |
-
w,
|
324 |
-
) = (
|
325 |
-
img.shape[0],
|
326 |
-
img.shape[1],
|
327 |
-
)
|
328 |
-
img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2]
|
329 |
-
|
330 |
-
image = Image.fromarray(img)
|
331 |
-
image = image.resize((self.size, self.size), resample=self.interpolation)
|
332 |
-
|
333 |
-
image = self.flip_transform(image)
|
334 |
-
image = np.array(image).astype(np.uint8)
|
335 |
-
image = (image / 127.5 - 1.0).astype(np.float32)
|
336 |
-
|
337 |
-
example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1)
|
338 |
-
return example
|
339 |
-
|
340 |
-
|
341 |
-
def resize_token_embeddings(model, new_num_tokens, initializer_token_id, placeholder_token_id, rng):
|
342 |
-
if model.config.vocab_size == new_num_tokens or new_num_tokens is None:
|
343 |
-
return
|
344 |
-
model.config.vocab_size = new_num_tokens
|
345 |
-
|
346 |
-
params = model.params
|
347 |
-
old_embeddings = params["text_model"]["embeddings"]["token_embedding"]["embedding"]
|
348 |
-
old_num_tokens, emb_dim = old_embeddings.shape
|
349 |
-
|
350 |
-
initializer = jax.nn.initializers.normal()
|
351 |
-
|
352 |
-
new_embeddings = initializer(rng, (new_num_tokens, emb_dim))
|
353 |
-
new_embeddings = new_embeddings.at[:old_num_tokens].set(old_embeddings)
|
354 |
-
new_embeddings = new_embeddings.at[placeholder_token_id].set(new_embeddings[initializer_token_id])
|
355 |
-
params["text_model"]["embeddings"]["token_embedding"]["embedding"] = new_embeddings
|
356 |
-
|
357 |
-
model.params = params
|
358 |
-
return model
|
359 |
-
|
360 |
-
|
361 |
-
def get_params_to_save(params):
|
362 |
-
return jax.device_get(jax.tree_util.tree_map(lambda x: x[0], params))
|
363 |
-
|
364 |
-
|
365 |
-
def main():
|
366 |
-
args = parse_args()
|
367 |
-
|
368 |
-
if args.seed is not None:
|
369 |
-
set_seed(args.seed)
|
370 |
-
|
371 |
-
if jax.process_index() == 0:
|
372 |
-
if args.output_dir is not None:
|
373 |
-
os.makedirs(args.output_dir, exist_ok=True)
|
374 |
-
|
375 |
-
if args.push_to_hub:
|
376 |
-
repo_id = create_repo(
|
377 |
-
repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
|
378 |
-
).repo_id
|
379 |
-
|
380 |
-
# Make one log on every process with the configuration for debugging.
|
381 |
-
logging.basicConfig(
|
382 |
-
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
383 |
-
datefmt="%m/%d/%Y %H:%M:%S",
|
384 |
-
level=logging.INFO,
|
385 |
-
)
|
386 |
-
# Setup logging, we only want one process per machine to log things on the screen.
|
387 |
-
logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
|
388 |
-
if jax.process_index() == 0:
|
389 |
-
transformers.utils.logging.set_verbosity_info()
|
390 |
-
else:
|
391 |
-
transformers.utils.logging.set_verbosity_error()
|
392 |
-
|
393 |
-
# Load the tokenizer and add the placeholder token as a additional special token
|
394 |
-
if args.tokenizer_name:
|
395 |
-
tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
|
396 |
-
elif args.pretrained_model_name_or_path:
|
397 |
-
tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
|
398 |
-
|
399 |
-
# Add the placeholder token in tokenizer
|
400 |
-
num_added_tokens = tokenizer.add_tokens(args.placeholder_token)
|
401 |
-
if num_added_tokens == 0:
|
402 |
-
raise ValueError(
|
403 |
-
f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different"
|
404 |
-
" `placeholder_token` that is not already in the tokenizer."
|
405 |
-
)
|
406 |
-
|
407 |
-
# Convert the initializer_token, placeholder_token to ids
|
408 |
-
token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False)
|
409 |
-
# Check if initializer_token is a single token or a sequence of tokens
|
410 |
-
if len(token_ids) > 1:
|
411 |
-
raise ValueError("The initializer token must be a single token.")
|
412 |
-
|
413 |
-
initializer_token_id = token_ids[0]
|
414 |
-
placeholder_token_id = tokenizer.convert_tokens_to_ids(args.placeholder_token)
|
415 |
-
|
416 |
-
# Load models and create wrapper for stable diffusion
|
417 |
-
text_encoder = FlaxCLIPTextModel.from_pretrained(
|
418 |
-
args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
|
419 |
-
)
|
420 |
-
vae, vae_params = FlaxAutoencoderKL.from_pretrained(
|
421 |
-
args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision
|
422 |
-
)
|
423 |
-
unet, unet_params = FlaxUNet2DConditionModel.from_pretrained(
|
424 |
-
args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
|
425 |
-
)
|
426 |
-
|
427 |
-
# Create sampling rng
|
428 |
-
rng = jax.random.PRNGKey(args.seed)
|
429 |
-
rng, _ = jax.random.split(rng)
|
430 |
-
# Resize the token embeddings as we are adding new special tokens to the tokenizer
|
431 |
-
text_encoder = resize_token_embeddings(
|
432 |
-
text_encoder, len(tokenizer), initializer_token_id, placeholder_token_id, rng
|
433 |
-
)
|
434 |
-
original_token_embeds = text_encoder.params["text_model"]["embeddings"]["token_embedding"]["embedding"]
|
435 |
-
|
436 |
-
train_dataset = TextualInversionDataset(
|
437 |
-
data_root=args.train_data_dir,
|
438 |
-
tokenizer=tokenizer,
|
439 |
-
size=args.resolution,
|
440 |
-
placeholder_token=args.placeholder_token,
|
441 |
-
repeats=args.repeats,
|
442 |
-
learnable_property=args.learnable_property,
|
443 |
-
center_crop=args.center_crop,
|
444 |
-
set="train",
|
445 |
-
)
|
446 |
-
|
447 |
-
def collate_fn(examples):
|
448 |
-
pixel_values = torch.stack([example["pixel_values"] for example in examples])
|
449 |
-
input_ids = torch.stack([example["input_ids"] for example in examples])
|
450 |
-
|
451 |
-
batch = {"pixel_values": pixel_values, "input_ids": input_ids}
|
452 |
-
batch = {k: v.numpy() for k, v in batch.items()}
|
453 |
-
|
454 |
-
return batch
|
455 |
-
|
456 |
-
total_train_batch_size = args.train_batch_size * jax.local_device_count()
|
457 |
-
train_dataloader = torch.utils.data.DataLoader(
|
458 |
-
train_dataset, batch_size=total_train_batch_size, shuffle=True, drop_last=True, collate_fn=collate_fn
|
459 |
-
)
|
460 |
-
|
461 |
-
# Optimization
|
462 |
-
if args.scale_lr:
|
463 |
-
args.learning_rate = args.learning_rate * total_train_batch_size
|
464 |
-
|
465 |
-
constant_scheduler = optax.constant_schedule(args.learning_rate)
|
466 |
-
|
467 |
-
optimizer = optax.adamw(
|
468 |
-
learning_rate=constant_scheduler,
|
469 |
-
b1=args.adam_beta1,
|
470 |
-
b2=args.adam_beta2,
|
471 |
-
eps=args.adam_epsilon,
|
472 |
-
weight_decay=args.adam_weight_decay,
|
473 |
-
)
|
474 |
-
|
475 |
-
def create_mask(params, label_fn):
|
476 |
-
def _map(params, mask, label_fn):
|
477 |
-
for k in params:
|
478 |
-
if label_fn(k):
|
479 |
-
mask[k] = "token_embedding"
|
480 |
-
else:
|
481 |
-
if isinstance(params[k], dict):
|
482 |
-
mask[k] = {}
|
483 |
-
_map(params[k], mask[k], label_fn)
|
484 |
-
else:
|
485 |
-
mask[k] = "zero"
|
486 |
-
|
487 |
-
mask = {}
|
488 |
-
_map(params, mask, label_fn)
|
489 |
-
return mask
|
490 |
-
|
491 |
-
def zero_grads():
|
492 |
-
# from https://github.com/deepmind/optax/issues/159#issuecomment-896459491
|
493 |
-
def init_fn(_):
|
494 |
-
return ()
|
495 |
-
|
496 |
-
def update_fn(updates, state, params=None):
|
497 |
-
return jax.tree_util.tree_map(jnp.zeros_like, updates), ()
|
498 |
-
|
499 |
-
return optax.GradientTransformation(init_fn, update_fn)
|
500 |
-
|
501 |
-
# Zero out gradients of layers other than the token embedding layer
|
502 |
-
tx = optax.multi_transform(
|
503 |
-
{"token_embedding": optimizer, "zero": zero_grads()},
|
504 |
-
create_mask(text_encoder.params, lambda s: s == "token_embedding"),
|
505 |
-
)
|
506 |
-
|
507 |
-
state = train_state.TrainState.create(apply_fn=text_encoder.__call__, params=text_encoder.params, tx=tx)
|
508 |
-
|
509 |
-
noise_scheduler = FlaxDDPMScheduler(
|
510 |
-
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000
|
511 |
-
)
|
512 |
-
noise_scheduler_state = noise_scheduler.create_state()
|
513 |
-
|
514 |
-
# Initialize our training
|
515 |
-
train_rngs = jax.random.split(rng, jax.local_device_count())
|
516 |
-
|
517 |
-
# Define gradient train step fn
|
518 |
-
def train_step(state, vae_params, unet_params, batch, train_rng):
|
519 |
-
dropout_rng, sample_rng, new_train_rng = jax.random.split(train_rng, 3)
|
520 |
-
|
521 |
-
def compute_loss(params):
|
522 |
-
vae_outputs = vae.apply(
|
523 |
-
{"params": vae_params}, batch["pixel_values"], deterministic=True, method=vae.encode
|
524 |
-
)
|
525 |
-
latents = vae_outputs.latent_dist.sample(sample_rng)
|
526 |
-
# (NHWC) -> (NCHW)
|
527 |
-
latents = jnp.transpose(latents, (0, 3, 1, 2))
|
528 |
-
latents = latents * vae.config.scaling_factor
|
529 |
-
|
530 |
-
noise_rng, timestep_rng = jax.random.split(sample_rng)
|
531 |
-
noise = jax.random.normal(noise_rng, latents.shape)
|
532 |
-
bsz = latents.shape[0]
|
533 |
-
timesteps = jax.random.randint(
|
534 |
-
timestep_rng,
|
535 |
-
(bsz,),
|
536 |
-
0,
|
537 |
-
noise_scheduler.config.num_train_timesteps,
|
538 |
-
)
|
539 |
-
noisy_latents = noise_scheduler.add_noise(noise_scheduler_state, latents, noise, timesteps)
|
540 |
-
encoder_hidden_states = state.apply_fn(
|
541 |
-
batch["input_ids"], params=params, dropout_rng=dropout_rng, train=True
|
542 |
-
)[0]
|
543 |
-
# Predict the noise residual and compute loss
|
544 |
-
model_pred = unet.apply(
|
545 |
-
{"params": unet_params}, noisy_latents, timesteps, encoder_hidden_states, train=False
|
546 |
-
).sample
|
547 |
-
|
548 |
-
# Get the target for loss depending on the prediction type
|
549 |
-
if noise_scheduler.config.prediction_type == "epsilon":
|
550 |
-
target = noise
|
551 |
-
elif noise_scheduler.config.prediction_type == "v_prediction":
|
552 |
-
target = noise_scheduler.get_velocity(noise_scheduler_state, latents, noise, timesteps)
|
553 |
-
else:
|
554 |
-
raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
|
555 |
-
|
556 |
-
loss = (target - model_pred) ** 2
|
557 |
-
loss = loss.mean()
|
558 |
-
|
559 |
-
return loss
|
560 |
-
|
561 |
-
grad_fn = jax.value_and_grad(compute_loss)
|
562 |
-
loss, grad = grad_fn(state.params)
|
563 |
-
grad = jax.lax.pmean(grad, "batch")
|
564 |
-
new_state = state.apply_gradients(grads=grad)
|
565 |
-
|
566 |
-
# Keep the token embeddings fixed except the newly added embeddings for the concept,
|
567 |
-
# as we only want to optimize the concept embeddings
|
568 |
-
token_embeds = original_token_embeds.at[placeholder_token_id].set(
|
569 |
-
new_state.params["text_model"]["embeddings"]["token_embedding"]["embedding"][placeholder_token_id]
|
570 |
-
)
|
571 |
-
new_state.params["text_model"]["embeddings"]["token_embedding"]["embedding"] = token_embeds
|
572 |
-
|
573 |
-
metrics = {"loss": loss}
|
574 |
-
metrics = jax.lax.pmean(metrics, axis_name="batch")
|
575 |
-
return new_state, metrics, new_train_rng
|
576 |
-
|
577 |
-
# Create parallel version of the train and eval step
|
578 |
-
p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,))
|
579 |
-
|
580 |
-
# Replicate the train state on each device
|
581 |
-
state = jax_utils.replicate(state)
|
582 |
-
vae_params = jax_utils.replicate(vae_params)
|
583 |
-
unet_params = jax_utils.replicate(unet_params)
|
584 |
-
|
585 |
-
# Train!
|
586 |
-
num_update_steps_per_epoch = math.ceil(len(train_dataloader))
|
587 |
-
|
588 |
-
# Scheduler and math around the number of training steps.
|
589 |
-
if args.max_train_steps is None:
|
590 |
-
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
591 |
-
|
592 |
-
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
|
593 |
-
|
594 |
-
logger.info("***** Running training *****")
|
595 |
-
logger.info(f" Num examples = {len(train_dataset)}")
|
596 |
-
logger.info(f" Num Epochs = {args.num_train_epochs}")
|
597 |
-
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
|
598 |
-
logger.info(f" Total train batch size (w. parallel & distributed) = {total_train_batch_size}")
|
599 |
-
logger.info(f" Total optimization steps = {args.max_train_steps}")
|
600 |
-
|
601 |
-
global_step = 0
|
602 |
-
|
603 |
-
epochs = tqdm(range(args.num_train_epochs), desc=f"Epoch ... (1/{args.num_train_epochs})", position=0)
|
604 |
-
for epoch in epochs:
|
605 |
-
# ======================== Training ================================
|
606 |
-
|
607 |
-
train_metrics = []
|
608 |
-
|
609 |
-
steps_per_epoch = len(train_dataset) // total_train_batch_size
|
610 |
-
train_step_progress_bar = tqdm(total=steps_per_epoch, desc="Training...", position=1, leave=False)
|
611 |
-
# train
|
612 |
-
for batch in train_dataloader:
|
613 |
-
batch = shard(batch)
|
614 |
-
state, train_metric, train_rngs = p_train_step(state, vae_params, unet_params, batch, train_rngs)
|
615 |
-
train_metrics.append(train_metric)
|
616 |
-
|
617 |
-
train_step_progress_bar.update(1)
|
618 |
-
global_step += 1
|
619 |
-
|
620 |
-
if global_step >= args.max_train_steps:
|
621 |
-
break
|
622 |
-
if global_step % args.save_steps == 0:
|
623 |
-
learned_embeds = get_params_to_save(state.params)["text_model"]["embeddings"]["token_embedding"][
|
624 |
-
"embedding"
|
625 |
-
][placeholder_token_id]
|
626 |
-
learned_embeds_dict = {args.placeholder_token: learned_embeds}
|
627 |
-
jnp.save(
|
628 |
-
os.path.join(args.output_dir, "learned_embeds-" + str(global_step) + ".npy"), learned_embeds_dict
|
629 |
-
)
|
630 |
-
|
631 |
-
train_metric = jax_utils.unreplicate(train_metric)
|
632 |
-
|
633 |
-
train_step_progress_bar.close()
|
634 |
-
epochs.write(f"Epoch... ({epoch + 1}/{args.num_train_epochs} | Loss: {train_metric['loss']})")
|
635 |
-
|
636 |
-
# Create the pipeline using using the trained modules and save it.
|
637 |
-
if jax.process_index() == 0:
|
638 |
-
scheduler = FlaxPNDMScheduler(
|
639 |
-
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", skip_prk_steps=True
|
640 |
-
)
|
641 |
-
safety_checker = FlaxStableDiffusionSafetyChecker.from_pretrained(
|
642 |
-
"CompVis/stable-diffusion-safety-checker", from_pt=True
|
643 |
-
)
|
644 |
-
pipeline = FlaxStableDiffusionPipeline(
|
645 |
-
text_encoder=text_encoder,
|
646 |
-
vae=vae,
|
647 |
-
unet=unet,
|
648 |
-
tokenizer=tokenizer,
|
649 |
-
scheduler=scheduler,
|
650 |
-
safety_checker=safety_checker,
|
651 |
-
feature_extractor=CLIPImageProcessor.from_pretrained("openai/clip-vit-base-patch32"),
|
652 |
-
)
|
653 |
-
|
654 |
-
pipeline.save_pretrained(
|
655 |
-
args.output_dir,
|
656 |
-
params={
|
657 |
-
"text_encoder": get_params_to_save(state.params),
|
658 |
-
"vae": get_params_to_save(vae_params),
|
659 |
-
"unet": get_params_to_save(unet_params),
|
660 |
-
"safety_checker": safety_checker.params,
|
661 |
-
},
|
662 |
-
)
|
663 |
-
|
664 |
-
# Also save the newly trained embeddings
|
665 |
-
learned_embeds = get_params_to_save(state.params)["text_model"]["embeddings"]["token_embedding"]["embedding"][
|
666 |
-
placeholder_token_id
|
667 |
-
]
|
668 |
-
learned_embeds_dict = {args.placeholder_token: learned_embeds}
|
669 |
-
jnp.save(os.path.join(args.output_dir, "learned_embeds.npy"), learned_embeds_dict)
|
670 |
-
|
671 |
-
if args.push_to_hub:
|
672 |
-
upload_folder(
|
673 |
-
repo_id=repo_id,
|
674 |
-
folder_path=args.output_dir,
|
675 |
-
commit_message="End of training",
|
676 |
-
ignore_patterns=["step_*", "epoch_*"],
|
677 |
-
)
|
678 |
-
|
679 |
-
|
680 |
-
if __name__ == "__main__":
|
681 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/controlnet/pipeline_controlnet.py
DELETED
@@ -1,1009 +0,0 @@
|
|
1 |
-
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
|
16 |
-
import inspect
|
17 |
-
import warnings
|
18 |
-
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
19 |
-
|
20 |
-
import numpy as np
|
21 |
-
import PIL.Image
|
22 |
-
import torch
|
23 |
-
import torch.nn.functional as F
|
24 |
-
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
25 |
-
|
26 |
-
from ...image_processor import VaeImageProcessor
|
27 |
-
from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
|
28 |
-
from ...models import AutoencoderKL, ControlNetModel, UNet2DConditionModel
|
29 |
-
from ...schedulers import KarrasDiffusionSchedulers
|
30 |
-
from ...utils import (
|
31 |
-
is_accelerate_available,
|
32 |
-
is_accelerate_version,
|
33 |
-
is_compiled_module,
|
34 |
-
logging,
|
35 |
-
randn_tensor,
|
36 |
-
replace_example_docstring,
|
37 |
-
)
|
38 |
-
from ..pipeline_utils import DiffusionPipeline
|
39 |
-
from ..stable_diffusion import StableDiffusionPipelineOutput
|
40 |
-
from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
41 |
-
from .multicontrolnet import MultiControlNetModel
|
42 |
-
|
43 |
-
|
44 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
45 |
-
|
46 |
-
|
47 |
-
EXAMPLE_DOC_STRING = """
|
48 |
-
Examples:
|
49 |
-
```py
|
50 |
-
>>> # !pip install opencv-python transformers accelerate
|
51 |
-
>>> from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
|
52 |
-
>>> from diffusers.utils import load_image
|
53 |
-
>>> import numpy as np
|
54 |
-
>>> import torch
|
55 |
-
|
56 |
-
>>> import cv2
|
57 |
-
>>> from PIL import Image
|
58 |
-
|
59 |
-
>>> # download an image
|
60 |
-
>>> image = load_image(
|
61 |
-
... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png"
|
62 |
-
... )
|
63 |
-
>>> image = np.array(image)
|
64 |
-
|
65 |
-
>>> # get canny image
|
66 |
-
>>> image = cv2.Canny(image, 100, 200)
|
67 |
-
>>> image = image[:, :, None]
|
68 |
-
>>> image = np.concatenate([image, image, image], axis=2)
|
69 |
-
>>> canny_image = Image.fromarray(image)
|
70 |
-
|
71 |
-
>>> # load control net and stable diffusion v1-5
|
72 |
-
>>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
|
73 |
-
>>> pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
74 |
-
... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
|
75 |
-
... )
|
76 |
-
|
77 |
-
>>> # speed up diffusion process with faster scheduler and memory optimization
|
78 |
-
>>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
79 |
-
>>> # remove following line if xformers is not installed
|
80 |
-
>>> pipe.enable_xformers_memory_efficient_attention()
|
81 |
-
|
82 |
-
>>> pipe.enable_model_cpu_offload()
|
83 |
-
|
84 |
-
>>> # generate image
|
85 |
-
>>> generator = torch.manual_seed(0)
|
86 |
-
>>> image = pipe(
|
87 |
-
... "futuristic-looking woman", num_inference_steps=20, generator=generator, image=canny_image
|
88 |
-
... ).images[0]
|
89 |
-
```
|
90 |
-
"""
|
91 |
-
|
92 |
-
|
93 |
-
class StableDiffusionControlNetPipeline(
|
94 |
-
DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
|
95 |
-
):
|
96 |
-
r"""
|
97 |
-
Pipeline for text-to-image generation using Stable Diffusion with ControlNet guidance.
|
98 |
-
|
99 |
-
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
100 |
-
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
101 |
-
|
102 |
-
In addition the pipeline inherits the following loading methods:
|
103 |
-
- *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`]
|
104 |
-
|
105 |
-
Args:
|
106 |
-
vae ([`AutoencoderKL`]):
|
107 |
-
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
108 |
-
text_encoder ([`CLIPTextModel`]):
|
109 |
-
Frozen text-encoder. Stable Diffusion uses the text portion of
|
110 |
-
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
111 |
-
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
112 |
-
tokenizer (`CLIPTokenizer`):
|
113 |
-
Tokenizer of class
|
114 |
-
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
115 |
-
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
116 |
-
controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):
|
117 |
-
Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets
|
118 |
-
as a list, the outputs from each ControlNet are added together to create one combined additional
|
119 |
-
conditioning.
|
120 |
-
scheduler ([`SchedulerMixin`]):
|
121 |
-
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
122 |
-
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
123 |
-
safety_checker ([`StableDiffusionSafetyChecker`]):
|
124 |
-
Classification module that estimates whether generated images could be considered offensive or harmful.
|
125 |
-
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
|
126 |
-
feature_extractor ([`CLIPImageProcessor`]):
|
127 |
-
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
128 |
-
"""
|
129 |
-
_optional_components = ["safety_checker", "feature_extractor"]
|
130 |
-
|
131 |
-
def __init__(
|
132 |
-
self,
|
133 |
-
vae: AutoencoderKL,
|
134 |
-
text_encoder: CLIPTextModel,
|
135 |
-
tokenizer: CLIPTokenizer,
|
136 |
-
unet: UNet2DConditionModel,
|
137 |
-
controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
|
138 |
-
scheduler: KarrasDiffusionSchedulers,
|
139 |
-
safety_checker: StableDiffusionSafetyChecker,
|
140 |
-
feature_extractor: CLIPImageProcessor,
|
141 |
-
requires_safety_checker: bool = True,
|
142 |
-
):
|
143 |
-
super().__init__()
|
144 |
-
|
145 |
-
if safety_checker is None and requires_safety_checker:
|
146 |
-
logger.warning(
|
147 |
-
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
148 |
-
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
149 |
-
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
150 |
-
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
151 |
-
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
152 |
-
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
153 |
-
)
|
154 |
-
|
155 |
-
if safety_checker is not None and feature_extractor is None:
|
156 |
-
raise ValueError(
|
157 |
-
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
158 |
-
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
159 |
-
)
|
160 |
-
|
161 |
-
if isinstance(controlnet, (list, tuple)):
|
162 |
-
controlnet = MultiControlNetModel(controlnet)
|
163 |
-
|
164 |
-
self.register_modules(
|
165 |
-
vae=vae,
|
166 |
-
text_encoder=text_encoder,
|
167 |
-
tokenizer=tokenizer,
|
168 |
-
unet=unet,
|
169 |
-
controlnet=controlnet,
|
170 |
-
scheduler=scheduler,
|
171 |
-
safety_checker=safety_checker,
|
172 |
-
feature_extractor=feature_extractor,
|
173 |
-
)
|
174 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
175 |
-
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
|
176 |
-
self.control_image_processor = VaeImageProcessor(
|
177 |
-
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
|
178 |
-
)
|
179 |
-
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
180 |
-
|
181 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
|
182 |
-
def enable_vae_slicing(self):
|
183 |
-
r"""
|
184 |
-
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
|
185 |
-
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
|
186 |
-
"""
|
187 |
-
self.vae.enable_slicing()
|
188 |
-
|
189 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
|
190 |
-
def disable_vae_slicing(self):
|
191 |
-
r"""
|
192 |
-
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
|
193 |
-
computing decoding in one step.
|
194 |
-
"""
|
195 |
-
self.vae.disable_slicing()
|
196 |
-
|
197 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
|
198 |
-
def enable_vae_tiling(self):
|
199 |
-
r"""
|
200 |
-
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
|
201 |
-
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
|
202 |
-
processing larger images.
|
203 |
-
"""
|
204 |
-
self.vae.enable_tiling()
|
205 |
-
|
206 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
|
207 |
-
def disable_vae_tiling(self):
|
208 |
-
r"""
|
209 |
-
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
|
210 |
-
computing decoding in one step.
|
211 |
-
"""
|
212 |
-
self.vae.disable_tiling()
|
213 |
-
|
214 |
-
def enable_model_cpu_offload(self, gpu_id=0):
|
215 |
-
r"""
|
216 |
-
Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
|
217 |
-
to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
|
218 |
-
method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
|
219 |
-
`enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
|
220 |
-
"""
|
221 |
-
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
|
222 |
-
from accelerate import cpu_offload_with_hook
|
223 |
-
else:
|
224 |
-
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
|
225 |
-
|
226 |
-
device = torch.device(f"cuda:{gpu_id}")
|
227 |
-
|
228 |
-
hook = None
|
229 |
-
for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
|
230 |
-
_, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
|
231 |
-
|
232 |
-
if self.safety_checker is not None:
|
233 |
-
# the safety checker can offload the vae again
|
234 |
-
_, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
|
235 |
-
|
236 |
-
# control net hook has be manually offloaded as it alternates with unet
|
237 |
-
cpu_offload_with_hook(self.controlnet, device)
|
238 |
-
|
239 |
-
# We'll offload the last model manually.
|
240 |
-
self.final_offload_hook = hook
|
241 |
-
|
242 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
|
243 |
-
def _encode_prompt(
|
244 |
-
self,
|
245 |
-
prompt,
|
246 |
-
device,
|
247 |
-
num_images_per_prompt,
|
248 |
-
do_classifier_free_guidance,
|
249 |
-
negative_prompt=None,
|
250 |
-
prompt_embeds: Optional[torch.FloatTensor] = None,
|
251 |
-
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
252 |
-
lora_scale: Optional[float] = None,
|
253 |
-
):
|
254 |
-
r"""
|
255 |
-
Encodes the prompt into text encoder hidden states.
|
256 |
-
|
257 |
-
Args:
|
258 |
-
prompt (`str` or `List[str]`, *optional*):
|
259 |
-
prompt to be encoded
|
260 |
-
device: (`torch.device`):
|
261 |
-
torch device
|
262 |
-
num_images_per_prompt (`int`):
|
263 |
-
number of images that should be generated per prompt
|
264 |
-
do_classifier_free_guidance (`bool`):
|
265 |
-
whether to use classifier free guidance or not
|
266 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
267 |
-
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
268 |
-
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
269 |
-
less than `1`).
|
270 |
-
prompt_embeds (`torch.FloatTensor`, *optional*):
|
271 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
272 |
-
provided, text embeddings will be generated from `prompt` input argument.
|
273 |
-
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
274 |
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
275 |
-
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
276 |
-
argument.
|
277 |
-
lora_scale (`float`, *optional*):
|
278 |
-
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
279 |
-
"""
|
280 |
-
# set lora scale so that monkey patched LoRA
|
281 |
-
# function of text encoder can correctly access it
|
282 |
-
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
|
283 |
-
self._lora_scale = lora_scale
|
284 |
-
|
285 |
-
if prompt is not None and isinstance(prompt, str):
|
286 |
-
batch_size = 1
|
287 |
-
elif prompt is not None and isinstance(prompt, list):
|
288 |
-
batch_size = len(prompt)
|
289 |
-
else:
|
290 |
-
batch_size = prompt_embeds.shape[0]
|
291 |
-
|
292 |
-
if prompt_embeds is None:
|
293 |
-
# textual inversion: procecss multi-vector tokens if necessary
|
294 |
-
if isinstance(self, TextualInversionLoaderMixin):
|
295 |
-
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
296 |
-
|
297 |
-
text_inputs = self.tokenizer(
|
298 |
-
prompt,
|
299 |
-
padding="max_length",
|
300 |
-
max_length=self.tokenizer.model_max_length,
|
301 |
-
truncation=True,
|
302 |
-
return_tensors="pt",
|
303 |
-
)
|
304 |
-
text_input_ids = text_inputs.input_ids
|
305 |
-
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
306 |
-
|
307 |
-
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
308 |
-
text_input_ids, untruncated_ids
|
309 |
-
):
|
310 |
-
removed_text = self.tokenizer.batch_decode(
|
311 |
-
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
312 |
-
)
|
313 |
-
logger.warning(
|
314 |
-
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
315 |
-
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
316 |
-
)
|
317 |
-
|
318 |
-
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
319 |
-
attention_mask = text_inputs.attention_mask.to(device)
|
320 |
-
else:
|
321 |
-
attention_mask = None
|
322 |
-
|
323 |
-
prompt_embeds = self.text_encoder(
|
324 |
-
text_input_ids.to(device),
|
325 |
-
attention_mask=attention_mask,
|
326 |
-
)
|
327 |
-
prompt_embeds = prompt_embeds[0]
|
328 |
-
|
329 |
-
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
|
330 |
-
|
331 |
-
bs_embed, seq_len, _ = prompt_embeds.shape
|
332 |
-
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
333 |
-
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
334 |
-
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
335 |
-
|
336 |
-
# get unconditional embeddings for classifier free guidance
|
337 |
-
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
338 |
-
uncond_tokens: List[str]
|
339 |
-
if negative_prompt is None:
|
340 |
-
uncond_tokens = [""] * batch_size
|
341 |
-
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
342 |
-
raise TypeError(
|
343 |
-
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
344 |
-
f" {type(prompt)}."
|
345 |
-
)
|
346 |
-
elif isinstance(negative_prompt, str):
|
347 |
-
uncond_tokens = [negative_prompt]
|
348 |
-
elif batch_size != len(negative_prompt):
|
349 |
-
raise ValueError(
|
350 |
-
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
351 |
-
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
352 |
-
" the batch size of `prompt`."
|
353 |
-
)
|
354 |
-
else:
|
355 |
-
uncond_tokens = negative_prompt
|
356 |
-
|
357 |
-
# textual inversion: procecss multi-vector tokens if necessary
|
358 |
-
if isinstance(self, TextualInversionLoaderMixin):
|
359 |
-
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
360 |
-
|
361 |
-
max_length = prompt_embeds.shape[1]
|
362 |
-
uncond_input = self.tokenizer(
|
363 |
-
uncond_tokens,
|
364 |
-
padding="max_length",
|
365 |
-
max_length=max_length,
|
366 |
-
truncation=True,
|
367 |
-
return_tensors="pt",
|
368 |
-
)
|
369 |
-
|
370 |
-
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
371 |
-
attention_mask = uncond_input.attention_mask.to(device)
|
372 |
-
else:
|
373 |
-
attention_mask = None
|
374 |
-
|
375 |
-
negative_prompt_embeds = self.text_encoder(
|
376 |
-
uncond_input.input_ids.to(device),
|
377 |
-
attention_mask=attention_mask,
|
378 |
-
)
|
379 |
-
negative_prompt_embeds = negative_prompt_embeds[0]
|
380 |
-
|
381 |
-
if do_classifier_free_guidance:
|
382 |
-
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
383 |
-
seq_len = negative_prompt_embeds.shape[1]
|
384 |
-
|
385 |
-
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
|
386 |
-
|
387 |
-
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
388 |
-
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
389 |
-
|
390 |
-
# For classifier free guidance, we need to do two forward passes.
|
391 |
-
# Here we concatenate the unconditional and text embeddings into a single batch
|
392 |
-
# to avoid doing two forward passes
|
393 |
-
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
394 |
-
|
395 |
-
return prompt_embeds
|
396 |
-
|
397 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
|
398 |
-
def run_safety_checker(self, image, device, dtype):
|
399 |
-
if self.safety_checker is None:
|
400 |
-
has_nsfw_concept = None
|
401 |
-
else:
|
402 |
-
if torch.is_tensor(image):
|
403 |
-
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
|
404 |
-
else:
|
405 |
-
feature_extractor_input = self.image_processor.numpy_to_pil(image)
|
406 |
-
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
|
407 |
-
image, has_nsfw_concept = self.safety_checker(
|
408 |
-
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
409 |
-
)
|
410 |
-
return image, has_nsfw_concept
|
411 |
-
|
412 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
|
413 |
-
def decode_latents(self, latents):
|
414 |
-
warnings.warn(
|
415 |
-
"The decode_latents method is deprecated and will be removed in a future version. Please"
|
416 |
-
" use VaeImageProcessor instead",
|
417 |
-
FutureWarning,
|
418 |
-
)
|
419 |
-
latents = 1 / self.vae.config.scaling_factor * latents
|
420 |
-
image = self.vae.decode(latents, return_dict=False)[0]
|
421 |
-
image = (image / 2 + 0.5).clamp(0, 1)
|
422 |
-
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
423 |
-
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
424 |
-
return image
|
425 |
-
|
426 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
427 |
-
def prepare_extra_step_kwargs(self, generator, eta):
|
428 |
-
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
429 |
-
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
430 |
-
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
431 |
-
# and should be between [0, 1]
|
432 |
-
|
433 |
-
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
434 |
-
extra_step_kwargs = {}
|
435 |
-
if accepts_eta:
|
436 |
-
extra_step_kwargs["eta"] = eta
|
437 |
-
|
438 |
-
# check if the scheduler accepts generator
|
439 |
-
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
440 |
-
if accepts_generator:
|
441 |
-
extra_step_kwargs["generator"] = generator
|
442 |
-
return extra_step_kwargs
|
443 |
-
|
444 |
-
def check_inputs(
|
445 |
-
self,
|
446 |
-
prompt,
|
447 |
-
image,
|
448 |
-
callback_steps,
|
449 |
-
negative_prompt=None,
|
450 |
-
prompt_embeds=None,
|
451 |
-
negative_prompt_embeds=None,
|
452 |
-
controlnet_conditioning_scale=1.0,
|
453 |
-
control_guidance_start=0.0,
|
454 |
-
control_guidance_end=1.0,
|
455 |
-
):
|
456 |
-
if (callback_steps is None) or (
|
457 |
-
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
458 |
-
):
|
459 |
-
raise ValueError(
|
460 |
-
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
461 |
-
f" {type(callback_steps)}."
|
462 |
-
)
|
463 |
-
|
464 |
-
if prompt is not None and prompt_embeds is not None:
|
465 |
-
raise ValueError(
|
466 |
-
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
467 |
-
" only forward one of the two."
|
468 |
-
)
|
469 |
-
elif prompt is None and prompt_embeds is None:
|
470 |
-
raise ValueError(
|
471 |
-
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
472 |
-
)
|
473 |
-
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
474 |
-
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
475 |
-
|
476 |
-
if negative_prompt is not None and negative_prompt_embeds is not None:
|
477 |
-
raise ValueError(
|
478 |
-
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
479 |
-
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
480 |
-
)
|
481 |
-
|
482 |
-
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
483 |
-
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
484 |
-
raise ValueError(
|
485 |
-
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
486 |
-
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
487 |
-
f" {negative_prompt_embeds.shape}."
|
488 |
-
)
|
489 |
-
|
490 |
-
# `prompt` needs more sophisticated handling when there are multiple
|
491 |
-
# conditionings.
|
492 |
-
if isinstance(self.controlnet, MultiControlNetModel):
|
493 |
-
if isinstance(prompt, list):
|
494 |
-
logger.warning(
|
495 |
-
f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
|
496 |
-
" prompts. The conditionings will be fixed across the prompts."
|
497 |
-
)
|
498 |
-
|
499 |
-
# Check `image`
|
500 |
-
is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
|
501 |
-
self.controlnet, torch._dynamo.eval_frame.OptimizedModule
|
502 |
-
)
|
503 |
-
if (
|
504 |
-
isinstance(self.controlnet, ControlNetModel)
|
505 |
-
or is_compiled
|
506 |
-
and isinstance(self.controlnet._orig_mod, ControlNetModel)
|
507 |
-
):
|
508 |
-
self.check_image(image, prompt, prompt_embeds)
|
509 |
-
elif (
|
510 |
-
isinstance(self.controlnet, MultiControlNetModel)
|
511 |
-
or is_compiled
|
512 |
-
and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
|
513 |
-
):
|
514 |
-
if not isinstance(image, list):
|
515 |
-
raise TypeError("For multiple controlnets: `image` must be type `list`")
|
516 |
-
|
517 |
-
# When `image` is a nested list:
|
518 |
-
# (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
|
519 |
-
elif any(isinstance(i, list) for i in image):
|
520 |
-
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
|
521 |
-
elif len(image) != len(self.controlnet.nets):
|
522 |
-
raise ValueError(
|
523 |
-
f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets."
|
524 |
-
)
|
525 |
-
|
526 |
-
for image_ in image:
|
527 |
-
self.check_image(image_, prompt, prompt_embeds)
|
528 |
-
else:
|
529 |
-
assert False
|
530 |
-
|
531 |
-
# Check `controlnet_conditioning_scale`
|
532 |
-
if (
|
533 |
-
isinstance(self.controlnet, ControlNetModel)
|
534 |
-
or is_compiled
|
535 |
-
and isinstance(self.controlnet._orig_mod, ControlNetModel)
|
536 |
-
):
|
537 |
-
if not isinstance(controlnet_conditioning_scale, float):
|
538 |
-
raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
|
539 |
-
elif (
|
540 |
-
isinstance(self.controlnet, MultiControlNetModel)
|
541 |
-
or is_compiled
|
542 |
-
and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
|
543 |
-
):
|
544 |
-
if isinstance(controlnet_conditioning_scale, list):
|
545 |
-
if any(isinstance(i, list) for i in controlnet_conditioning_scale):
|
546 |
-
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
|
547 |
-
elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
|
548 |
-
self.controlnet.nets
|
549 |
-
):
|
550 |
-
raise ValueError(
|
551 |
-
"For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
|
552 |
-
" the same length as the number of controlnets"
|
553 |
-
)
|
554 |
-
else:
|
555 |
-
assert False
|
556 |
-
|
557 |
-
if len(control_guidance_start) != len(control_guidance_end):
|
558 |
-
raise ValueError(
|
559 |
-
f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
|
560 |
-
)
|
561 |
-
|
562 |
-
if isinstance(self.controlnet, MultiControlNetModel):
|
563 |
-
if len(control_guidance_start) != len(self.controlnet.nets):
|
564 |
-
raise ValueError(
|
565 |
-
f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
|
566 |
-
)
|
567 |
-
|
568 |
-
for start, end in zip(control_guidance_start, control_guidance_end):
|
569 |
-
if start >= end:
|
570 |
-
raise ValueError(
|
571 |
-
f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
|
572 |
-
)
|
573 |
-
if start < 0.0:
|
574 |
-
raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
|
575 |
-
if end > 1.0:
|
576 |
-
raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
|
577 |
-
|
578 |
-
def check_image(self, image, prompt, prompt_embeds):
|
579 |
-
image_is_pil = isinstance(image, PIL.Image.Image)
|
580 |
-
image_is_tensor = isinstance(image, torch.Tensor)
|
581 |
-
image_is_np = isinstance(image, np.ndarray)
|
582 |
-
image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
|
583 |
-
image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
|
584 |
-
image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
|
585 |
-
|
586 |
-
if (
|
587 |
-
not image_is_pil
|
588 |
-
and not image_is_tensor
|
589 |
-
and not image_is_np
|
590 |
-
and not image_is_pil_list
|
591 |
-
and not image_is_tensor_list
|
592 |
-
and not image_is_np_list
|
593 |
-
):
|
594 |
-
raise TypeError(
|
595 |
-
f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
|
596 |
-
)
|
597 |
-
|
598 |
-
if image_is_pil:
|
599 |
-
image_batch_size = 1
|
600 |
-
else:
|
601 |
-
image_batch_size = len(image)
|
602 |
-
|
603 |
-
if prompt is not None and isinstance(prompt, str):
|
604 |
-
prompt_batch_size = 1
|
605 |
-
elif prompt is not None and isinstance(prompt, list):
|
606 |
-
prompt_batch_size = len(prompt)
|
607 |
-
elif prompt_embeds is not None:
|
608 |
-
prompt_batch_size = prompt_embeds.shape[0]
|
609 |
-
|
610 |
-
if image_batch_size != 1 and image_batch_size != prompt_batch_size:
|
611 |
-
raise ValueError(
|
612 |
-
f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
|
613 |
-
)
|
614 |
-
|
615 |
-
def prepare_image(
|
616 |
-
self,
|
617 |
-
image,
|
618 |
-
width,
|
619 |
-
height,
|
620 |
-
batch_size,
|
621 |
-
num_images_per_prompt,
|
622 |
-
device,
|
623 |
-
dtype,
|
624 |
-
do_classifier_free_guidance=False,
|
625 |
-
guess_mode=False,
|
626 |
-
):
|
627 |
-
image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
|
628 |
-
image_batch_size = image.shape[0]
|
629 |
-
|
630 |
-
if image_batch_size == 1:
|
631 |
-
repeat_by = batch_size
|
632 |
-
else:
|
633 |
-
# image batch size is the same as prompt batch size
|
634 |
-
repeat_by = num_images_per_prompt
|
635 |
-
|
636 |
-
image = image.repeat_interleave(repeat_by, dim=0)
|
637 |
-
|
638 |
-
image = image.to(device=device, dtype=dtype)
|
639 |
-
|
640 |
-
if do_classifier_free_guidance and not guess_mode:
|
641 |
-
image = torch.cat([image] * 2)
|
642 |
-
|
643 |
-
return image
|
644 |
-
|
645 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
646 |
-
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
647 |
-
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
648 |
-
if isinstance(generator, list) and len(generator) != batch_size:
|
649 |
-
raise ValueError(
|
650 |
-
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
651 |
-
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
652 |
-
)
|
653 |
-
|
654 |
-
if latents is None:
|
655 |
-
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
656 |
-
else:
|
657 |
-
latents = latents.to(device)
|
658 |
-
|
659 |
-
# scale the initial noise by the standard deviation required by the scheduler
|
660 |
-
latents = latents * self.scheduler.init_noise_sigma
|
661 |
-
return latents
|
662 |
-
|
663 |
-
@torch.no_grad()
|
664 |
-
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
665 |
-
def __call__(
|
666 |
-
self,
|
667 |
-
prompt: Union[str, List[str]] = None,
|
668 |
-
image: Union[
|
669 |
-
torch.FloatTensor,
|
670 |
-
PIL.Image.Image,
|
671 |
-
np.ndarray,
|
672 |
-
List[torch.FloatTensor],
|
673 |
-
List[PIL.Image.Image],
|
674 |
-
List[np.ndarray],
|
675 |
-
] = None,
|
676 |
-
height: Optional[int] = None,
|
677 |
-
width: Optional[int] = None,
|
678 |
-
num_inference_steps: int = 50,
|
679 |
-
guidance_scale: float = 7.5,
|
680 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
681 |
-
num_images_per_prompt: Optional[int] = 1,
|
682 |
-
eta: float = 0.0,
|
683 |
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
684 |
-
latents: Optional[torch.FloatTensor] = None,
|
685 |
-
prompt_embeds: Optional[torch.FloatTensor] = None,
|
686 |
-
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
687 |
-
output_type: Optional[str] = "pil",
|
688 |
-
return_dict: bool = True,
|
689 |
-
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
690 |
-
callback_steps: int = 1,
|
691 |
-
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
692 |
-
controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
|
693 |
-
guess_mode: bool = False,
|
694 |
-
control_guidance_start: Union[float, List[float]] = 0.0,
|
695 |
-
control_guidance_end: Union[float, List[float]] = 1.0,
|
696 |
-
):
|
697 |
-
r"""
|
698 |
-
Function invoked when calling the pipeline for generation.
|
699 |
-
|
700 |
-
Args:
|
701 |
-
prompt (`str` or `List[str]`, *optional*):
|
702 |
-
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
703 |
-
instead.
|
704 |
-
image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
|
705 |
-
`List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
|
706 |
-
The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
|
707 |
-
the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can
|
708 |
-
also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If
|
709 |
-
height and/or width are passed, `image` is resized according to them. If multiple ControlNets are
|
710 |
-
specified in init, images must be passed as a list such that each element of the list can be correctly
|
711 |
-
batched for input to a single controlnet.
|
712 |
-
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
713 |
-
The height in pixels of the generated image.
|
714 |
-
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
715 |
-
The width in pixels of the generated image.
|
716 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
717 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
718 |
-
expense of slower inference.
|
719 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
720 |
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
721 |
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
722 |
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
723 |
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
724 |
-
usually at the expense of lower image quality.
|
725 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
726 |
-
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
727 |
-
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
728 |
-
less than `1`).
|
729 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
730 |
-
The number of images to generate per prompt.
|
731 |
-
eta (`float`, *optional*, defaults to 0.0):
|
732 |
-
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
733 |
-
[`schedulers.DDIMScheduler`], will be ignored for others.
|
734 |
-
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
735 |
-
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
736 |
-
to make generation deterministic.
|
737 |
-
latents (`torch.FloatTensor`, *optional*):
|
738 |
-
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
739 |
-
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
740 |
-
tensor will ge generated by sampling using the supplied random `generator`.
|
741 |
-
prompt_embeds (`torch.FloatTensor`, *optional*):
|
742 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
743 |
-
provided, text embeddings will be generated from `prompt` input argument.
|
744 |
-
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
745 |
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
746 |
-
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
747 |
-
argument.
|
748 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
749 |
-
The output format of the generate image. Choose between
|
750 |
-
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
751 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
752 |
-
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
753 |
-
plain tuple.
|
754 |
-
callback (`Callable`, *optional*):
|
755 |
-
A function that will be called every `callback_steps` steps during inference. The function will be
|
756 |
-
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
757 |
-
callback_steps (`int`, *optional*, defaults to 1):
|
758 |
-
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
759 |
-
called at every step.
|
760 |
-
cross_attention_kwargs (`dict`, *optional*):
|
761 |
-
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
762 |
-
`self.processor` in
|
763 |
-
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
|
764 |
-
controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
|
765 |
-
The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
|
766 |
-
to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
|
767 |
-
corresponding scale as a list.
|
768 |
-
guess_mode (`bool`, *optional*, defaults to `False`):
|
769 |
-
In this mode, the ControlNet encoder will try best to recognize the content of the input image even if
|
770 |
-
you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended.
|
771 |
-
control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
|
772 |
-
The percentage of total steps at which the controlnet starts applying.
|
773 |
-
control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
|
774 |
-
The percentage of total steps at which the controlnet stops applying.
|
775 |
-
|
776 |
-
Examples:
|
777 |
-
|
778 |
-
Returns:
|
779 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
780 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
781 |
-
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
782 |
-
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
783 |
-
(nsfw) content, according to the `safety_checker`.
|
784 |
-
"""
|
785 |
-
controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
|
786 |
-
|
787 |
-
# align format for control guidance
|
788 |
-
if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
|
789 |
-
control_guidance_start = len(control_guidance_end) * [control_guidance_start]
|
790 |
-
elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
|
791 |
-
control_guidance_end = len(control_guidance_start) * [control_guidance_end]
|
792 |
-
elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
|
793 |
-
mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
|
794 |
-
control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [
|
795 |
-
control_guidance_end
|
796 |
-
]
|
797 |
-
|
798 |
-
# 1. Check inputs. Raise error if not correct
|
799 |
-
self.check_inputs(
|
800 |
-
prompt,
|
801 |
-
image,
|
802 |
-
callback_steps,
|
803 |
-
negative_prompt,
|
804 |
-
prompt_embeds,
|
805 |
-
negative_prompt_embeds,
|
806 |
-
controlnet_conditioning_scale,
|
807 |
-
control_guidance_start,
|
808 |
-
control_guidance_end,
|
809 |
-
)
|
810 |
-
|
811 |
-
# 2. Define call parameters
|
812 |
-
if prompt is not None and isinstance(prompt, str):
|
813 |
-
batch_size = 1
|
814 |
-
elif prompt is not None and isinstance(prompt, list):
|
815 |
-
batch_size = len(prompt)
|
816 |
-
else:
|
817 |
-
batch_size = prompt_embeds.shape[0]
|
818 |
-
|
819 |
-
device = self._execution_device
|
820 |
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
821 |
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
822 |
-
# corresponds to doing no classifier free guidance.
|
823 |
-
do_classifier_free_guidance = guidance_scale > 1.0
|
824 |
-
|
825 |
-
if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
|
826 |
-
controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
|
827 |
-
|
828 |
-
global_pool_conditions = (
|
829 |
-
controlnet.config.global_pool_conditions
|
830 |
-
if isinstance(controlnet, ControlNetModel)
|
831 |
-
else controlnet.nets[0].config.global_pool_conditions
|
832 |
-
)
|
833 |
-
guess_mode = guess_mode or global_pool_conditions
|
834 |
-
|
835 |
-
# 3. Encode input prompt
|
836 |
-
text_encoder_lora_scale = (
|
837 |
-
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
|
838 |
-
)
|
839 |
-
prompt_embeds = self._encode_prompt(
|
840 |
-
prompt,
|
841 |
-
device,
|
842 |
-
num_images_per_prompt,
|
843 |
-
do_classifier_free_guidance,
|
844 |
-
negative_prompt,
|
845 |
-
prompt_embeds=prompt_embeds,
|
846 |
-
negative_prompt_embeds=negative_prompt_embeds,
|
847 |
-
lora_scale=text_encoder_lora_scale,
|
848 |
-
)
|
849 |
-
|
850 |
-
# 4. Prepare image
|
851 |
-
if isinstance(controlnet, ControlNetModel):
|
852 |
-
image = self.prepare_image(
|
853 |
-
image=image,
|
854 |
-
width=width,
|
855 |
-
height=height,
|
856 |
-
batch_size=batch_size * num_images_per_prompt,
|
857 |
-
num_images_per_prompt=num_images_per_prompt,
|
858 |
-
device=device,
|
859 |
-
dtype=controlnet.dtype,
|
860 |
-
do_classifier_free_guidance=do_classifier_free_guidance,
|
861 |
-
guess_mode=guess_mode,
|
862 |
-
)
|
863 |
-
height, width = image.shape[-2:]
|
864 |
-
elif isinstance(controlnet, MultiControlNetModel):
|
865 |
-
images = []
|
866 |
-
|
867 |
-
for image_ in image:
|
868 |
-
image_ = self.prepare_image(
|
869 |
-
image=image_,
|
870 |
-
width=width,
|
871 |
-
height=height,
|
872 |
-
batch_size=batch_size * num_images_per_prompt,
|
873 |
-
num_images_per_prompt=num_images_per_prompt,
|
874 |
-
device=device,
|
875 |
-
dtype=controlnet.dtype,
|
876 |
-
do_classifier_free_guidance=do_classifier_free_guidance,
|
877 |
-
guess_mode=guess_mode,
|
878 |
-
)
|
879 |
-
|
880 |
-
images.append(image_)
|
881 |
-
|
882 |
-
image = images
|
883 |
-
height, width = image[0].shape[-2:]
|
884 |
-
else:
|
885 |
-
assert False
|
886 |
-
|
887 |
-
# 5. Prepare timesteps
|
888 |
-
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
889 |
-
timesteps = self.scheduler.timesteps
|
890 |
-
|
891 |
-
# 6. Prepare latent variables
|
892 |
-
num_channels_latents = self.unet.config.in_channels
|
893 |
-
latents = self.prepare_latents(
|
894 |
-
batch_size * num_images_per_prompt,
|
895 |
-
num_channels_latents,
|
896 |
-
height,
|
897 |
-
width,
|
898 |
-
prompt_embeds.dtype,
|
899 |
-
device,
|
900 |
-
generator,
|
901 |
-
latents,
|
902 |
-
)
|
903 |
-
|
904 |
-
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
905 |
-
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
906 |
-
|
907 |
-
# 7.1 Create tensor stating which controlnets to keep
|
908 |
-
controlnet_keep = []
|
909 |
-
for i in range(len(timesteps)):
|
910 |
-
keeps = [
|
911 |
-
1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
|
912 |
-
for s, e in zip(control_guidance_start, control_guidance_end)
|
913 |
-
]
|
914 |
-
controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
|
915 |
-
|
916 |
-
# 8. Denoising loop
|
917 |
-
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
918 |
-
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
919 |
-
for i, t in enumerate(timesteps):
|
920 |
-
# expand the latents if we are doing classifier free guidance
|
921 |
-
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
922 |
-
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
923 |
-
|
924 |
-
# controlnet(s) inference
|
925 |
-
if guess_mode and do_classifier_free_guidance:
|
926 |
-
# Infer ControlNet only for the conditional batch.
|
927 |
-
control_model_input = latents
|
928 |
-
control_model_input = self.scheduler.scale_model_input(control_model_input, t)
|
929 |
-
controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
|
930 |
-
else:
|
931 |
-
control_model_input = latent_model_input
|
932 |
-
controlnet_prompt_embeds = prompt_embeds
|
933 |
-
|
934 |
-
if isinstance(controlnet_keep[i], list):
|
935 |
-
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
|
936 |
-
else:
|
937 |
-
cond_scale = controlnet_conditioning_scale * controlnet_keep[i]
|
938 |
-
|
939 |
-
down_block_res_samples, mid_block_res_sample = self.controlnet(
|
940 |
-
control_model_input,
|
941 |
-
t,
|
942 |
-
encoder_hidden_states=controlnet_prompt_embeds,
|
943 |
-
controlnet_cond=image,
|
944 |
-
conditioning_scale=cond_scale,
|
945 |
-
guess_mode=guess_mode,
|
946 |
-
return_dict=False,
|
947 |
-
)
|
948 |
-
|
949 |
-
if guess_mode and do_classifier_free_guidance:
|
950 |
-
# Infered ControlNet only for the conditional batch.
|
951 |
-
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
952 |
-
# add 0 to the unconditional batch to keep it unchanged.
|
953 |
-
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
954 |
-
mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
|
955 |
-
|
956 |
-
# predict the noise residual
|
957 |
-
noise_pred = self.unet(
|
958 |
-
latent_model_input,
|
959 |
-
t,
|
960 |
-
encoder_hidden_states=prompt_embeds,
|
961 |
-
cross_attention_kwargs=cross_attention_kwargs,
|
962 |
-
down_block_additional_residuals=down_block_res_samples,
|
963 |
-
mid_block_additional_residual=mid_block_res_sample,
|
964 |
-
return_dict=False,
|
965 |
-
)[0]
|
966 |
-
|
967 |
-
# perform guidance
|
968 |
-
if do_classifier_free_guidance:
|
969 |
-
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
970 |
-
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
971 |
-
|
972 |
-
# compute the previous noisy sample x_t -> x_t-1
|
973 |
-
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
974 |
-
|
975 |
-
# call the callback, if provided
|
976 |
-
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
977 |
-
progress_bar.update()
|
978 |
-
if callback is not None and i % callback_steps == 0:
|
979 |
-
callback(i, t, latents)
|
980 |
-
|
981 |
-
# If we do sequential model offloading, let's offload unet and controlnet
|
982 |
-
# manually for max memory savings
|
983 |
-
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
984 |
-
self.unet.to("cpu")
|
985 |
-
self.controlnet.to("cpu")
|
986 |
-
torch.cuda.empty_cache()
|
987 |
-
|
988 |
-
if not output_type == "latent":
|
989 |
-
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
990 |
-
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
991 |
-
else:
|
992 |
-
image = latents
|
993 |
-
has_nsfw_concept = None
|
994 |
-
|
995 |
-
if has_nsfw_concept is None:
|
996 |
-
do_denormalize = [True] * image.shape[0]
|
997 |
-
else:
|
998 |
-
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
999 |
-
|
1000 |
-
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
1001 |
-
|
1002 |
-
# Offload last model to CPU
|
1003 |
-
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
1004 |
-
self.final_offload_hook.offload()
|
1005 |
-
|
1006 |
-
if not return_dict:
|
1007 |
-
return (image, has_nsfw_concept)
|
1008 |
-
|
1009 |
-
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/dynamic_modules_utils.py
DELETED
@@ -1,456 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 The HuggingFace Inc. team.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
"""Utilities to dynamically load objects from the Hub."""
|
16 |
-
|
17 |
-
import importlib
|
18 |
-
import inspect
|
19 |
-
import json
|
20 |
-
import os
|
21 |
-
import re
|
22 |
-
import shutil
|
23 |
-
import sys
|
24 |
-
from pathlib import Path
|
25 |
-
from typing import Dict, Optional, Union
|
26 |
-
from urllib import request
|
27 |
-
|
28 |
-
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
|
29 |
-
from packaging import version
|
30 |
-
|
31 |
-
from .. import __version__
|
32 |
-
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
|
33 |
-
|
34 |
-
|
35 |
-
COMMUNITY_PIPELINES_URL = (
|
36 |
-
"https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"
|
37 |
-
)
|
38 |
-
|
39 |
-
|
40 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
41 |
-
|
42 |
-
|
43 |
-
def get_diffusers_versions():
|
44 |
-
url = "https://pypi.org/pypi/diffusers/json"
|
45 |
-
releases = json.loads(request.urlopen(url).read())["releases"].keys()
|
46 |
-
return sorted(releases, key=lambda x: version.Version(x))
|
47 |
-
|
48 |
-
|
49 |
-
def init_hf_modules():
|
50 |
-
"""
|
51 |
-
Creates the cache directory for modules with an init, and adds it to the Python path.
|
52 |
-
"""
|
53 |
-
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
|
54 |
-
if HF_MODULES_CACHE in sys.path:
|
55 |
-
return
|
56 |
-
|
57 |
-
sys.path.append(HF_MODULES_CACHE)
|
58 |
-
os.makedirs(HF_MODULES_CACHE, exist_ok=True)
|
59 |
-
init_path = Path(HF_MODULES_CACHE) / "__init__.py"
|
60 |
-
if not init_path.exists():
|
61 |
-
init_path.touch()
|
62 |
-
|
63 |
-
|
64 |
-
def create_dynamic_module(name: Union[str, os.PathLike]):
|
65 |
-
"""
|
66 |
-
Creates a dynamic module in the cache directory for modules.
|
67 |
-
"""
|
68 |
-
init_hf_modules()
|
69 |
-
dynamic_module_path = Path(HF_MODULES_CACHE) / name
|
70 |
-
# If the parent module does not exist yet, recursively create it.
|
71 |
-
if not dynamic_module_path.parent.exists():
|
72 |
-
create_dynamic_module(dynamic_module_path.parent)
|
73 |
-
os.makedirs(dynamic_module_path, exist_ok=True)
|
74 |
-
init_path = dynamic_module_path / "__init__.py"
|
75 |
-
if not init_path.exists():
|
76 |
-
init_path.touch()
|
77 |
-
|
78 |
-
|
79 |
-
def get_relative_imports(module_file):
|
80 |
-
"""
|
81 |
-
Get the list of modules that are relatively imported in a module file.
|
82 |
-
|
83 |
-
Args:
|
84 |
-
module_file (`str` or `os.PathLike`): The module file to inspect.
|
85 |
-
"""
|
86 |
-
with open(module_file, "r", encoding="utf-8") as f:
|
87 |
-
content = f.read()
|
88 |
-
|
89 |
-
# Imports of the form `import .xxx`
|
90 |
-
relative_imports = re.findall("^\s*import\s+\.(\S+)\s*$", content, flags=re.MULTILINE)
|
91 |
-
# Imports of the form `from .xxx import yyy`
|
92 |
-
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import", content, flags=re.MULTILINE)
|
93 |
-
# Unique-ify
|
94 |
-
return list(set(relative_imports))
|
95 |
-
|
96 |
-
|
97 |
-
def get_relative_import_files(module_file):
|
98 |
-
"""
|
99 |
-
Get the list of all files that are needed for a given module. Note that this function recurses through the relative
|
100 |
-
imports (if a imports b and b imports c, it will return module files for b and c).
|
101 |
-
|
102 |
-
Args:
|
103 |
-
module_file (`str` or `os.PathLike`): The module file to inspect.
|
104 |
-
"""
|
105 |
-
no_change = False
|
106 |
-
files_to_check = [module_file]
|
107 |
-
all_relative_imports = []
|
108 |
-
|
109 |
-
# Let's recurse through all relative imports
|
110 |
-
while not no_change:
|
111 |
-
new_imports = []
|
112 |
-
for f in files_to_check:
|
113 |
-
new_imports.extend(get_relative_imports(f))
|
114 |
-
|
115 |
-
module_path = Path(module_file).parent
|
116 |
-
new_import_files = [str(module_path / m) for m in new_imports]
|
117 |
-
new_import_files = [f for f in new_import_files if f not in all_relative_imports]
|
118 |
-
files_to_check = [f"{f}.py" for f in new_import_files]
|
119 |
-
|
120 |
-
no_change = len(new_import_files) == 0
|
121 |
-
all_relative_imports.extend(files_to_check)
|
122 |
-
|
123 |
-
return all_relative_imports
|
124 |
-
|
125 |
-
|
126 |
-
def check_imports(filename):
|
127 |
-
"""
|
128 |
-
Check if the current Python environment contains all the libraries that are imported in a file.
|
129 |
-
"""
|
130 |
-
with open(filename, "r", encoding="utf-8") as f:
|
131 |
-
content = f.read()
|
132 |
-
|
133 |
-
# Imports of the form `import xxx`
|
134 |
-
imports = re.findall("^\s*import\s+(\S+)\s*$", content, flags=re.MULTILINE)
|
135 |
-
# Imports of the form `from xxx import yyy`
|
136 |
-
imports += re.findall("^\s*from\s+(\S+)\s+import", content, flags=re.MULTILINE)
|
137 |
-
# Only keep the top-level module
|
138 |
-
imports = [imp.split(".")[0] for imp in imports if not imp.startswith(".")]
|
139 |
-
|
140 |
-
# Unique-ify and test we got them all
|
141 |
-
imports = list(set(imports))
|
142 |
-
missing_packages = []
|
143 |
-
for imp in imports:
|
144 |
-
try:
|
145 |
-
importlib.import_module(imp)
|
146 |
-
except ImportError:
|
147 |
-
missing_packages.append(imp)
|
148 |
-
|
149 |
-
if len(missing_packages) > 0:
|
150 |
-
raise ImportError(
|
151 |
-
"This modeling file requires the following packages that were not found in your environment: "
|
152 |
-
f"{', '.join(missing_packages)}. Run `pip install {' '.join(missing_packages)}`"
|
153 |
-
)
|
154 |
-
|
155 |
-
return get_relative_imports(filename)
|
156 |
-
|
157 |
-
|
158 |
-
def get_class_in_module(class_name, module_path):
|
159 |
-
"""
|
160 |
-
Import a module on the cache directory for modules and extract a class from it.
|
161 |
-
"""
|
162 |
-
module_path = module_path.replace(os.path.sep, ".")
|
163 |
-
module = importlib.import_module(module_path)
|
164 |
-
|
165 |
-
if class_name is None:
|
166 |
-
return find_pipeline_class(module)
|
167 |
-
return getattr(module, class_name)
|
168 |
-
|
169 |
-
|
170 |
-
def find_pipeline_class(loaded_module):
|
171 |
-
"""
|
172 |
-
Retrieve pipeline class that inherits from `DiffusionPipeline`. Note that there has to be exactly one class
|
173 |
-
inheriting from `DiffusionPipeline`.
|
174 |
-
"""
|
175 |
-
from ..pipelines import DiffusionPipeline
|
176 |
-
|
177 |
-
cls_members = dict(inspect.getmembers(loaded_module, inspect.isclass))
|
178 |
-
|
179 |
-
pipeline_class = None
|
180 |
-
for cls_name, cls in cls_members.items():
|
181 |
-
if (
|
182 |
-
cls_name != DiffusionPipeline.__name__
|
183 |
-
and issubclass(cls, DiffusionPipeline)
|
184 |
-
and cls.__module__.split(".")[0] != "diffusers"
|
185 |
-
):
|
186 |
-
if pipeline_class is not None:
|
187 |
-
raise ValueError(
|
188 |
-
f"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"
|
189 |
-
f" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"
|
190 |
-
f" {loaded_module}."
|
191 |
-
)
|
192 |
-
pipeline_class = cls
|
193 |
-
|
194 |
-
return pipeline_class
|
195 |
-
|
196 |
-
|
197 |
-
def get_cached_module_file(
|
198 |
-
pretrained_model_name_or_path: Union[str, os.PathLike],
|
199 |
-
module_file: str,
|
200 |
-
cache_dir: Optional[Union[str, os.PathLike]] = None,
|
201 |
-
force_download: bool = False,
|
202 |
-
resume_download: bool = False,
|
203 |
-
proxies: Optional[Dict[str, str]] = None,
|
204 |
-
use_auth_token: Optional[Union[bool, str]] = None,
|
205 |
-
revision: Optional[str] = None,
|
206 |
-
local_files_only: bool = False,
|
207 |
-
):
|
208 |
-
"""
|
209 |
-
Prepares Downloads a module from a local folder or a distant repo and returns its path inside the cached
|
210 |
-
Transformers module.
|
211 |
-
|
212 |
-
Args:
|
213 |
-
pretrained_model_name_or_path (`str` or `os.PathLike`):
|
214 |
-
This can be either:
|
215 |
-
|
216 |
-
- a string, the *model id* of a pretrained model configuration hosted inside a model repo on
|
217 |
-
huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced
|
218 |
-
under a user or organization name, like `dbmdz/bert-base-german-cased`.
|
219 |
-
- a path to a *directory* containing a configuration file saved using the
|
220 |
-
[`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
|
221 |
-
|
222 |
-
module_file (`str`):
|
223 |
-
The name of the module file containing the class to look for.
|
224 |
-
cache_dir (`str` or `os.PathLike`, *optional*):
|
225 |
-
Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
|
226 |
-
cache should not be used.
|
227 |
-
force_download (`bool`, *optional*, defaults to `False`):
|
228 |
-
Whether or not to force to (re-)download the configuration files and override the cached versions if they
|
229 |
-
exist.
|
230 |
-
resume_download (`bool`, *optional*, defaults to `False`):
|
231 |
-
Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.
|
232 |
-
proxies (`Dict[str, str]`, *optional*):
|
233 |
-
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
|
234 |
-
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
|
235 |
-
use_auth_token (`str` or *bool*, *optional*):
|
236 |
-
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
|
237 |
-
when running `transformers-cli login` (stored in `~/.huggingface`).
|
238 |
-
revision (`str`, *optional*, defaults to `"main"`):
|
239 |
-
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
|
240 |
-
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
|
241 |
-
identifier allowed by git.
|
242 |
-
local_files_only (`bool`, *optional*, defaults to `False`):
|
243 |
-
If `True`, will only try to load the tokenizer configuration from local files.
|
244 |
-
|
245 |
-
<Tip>
|
246 |
-
|
247 |
-
You may pass a token in `use_auth_token` if you are not logged in (`huggingface-cli long`) and want to use private
|
248 |
-
or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models).
|
249 |
-
|
250 |
-
</Tip>
|
251 |
-
|
252 |
-
Returns:
|
253 |
-
`str`: The path to the module inside the cache.
|
254 |
-
"""
|
255 |
-
# Download and cache module_file from the repo `pretrained_model_name_or_path` of grab it if it's a local file.
|
256 |
-
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
|
257 |
-
|
258 |
-
module_file_or_url = os.path.join(pretrained_model_name_or_path, module_file)
|
259 |
-
|
260 |
-
if os.path.isfile(module_file_or_url):
|
261 |
-
resolved_module_file = module_file_or_url
|
262 |
-
submodule = "local"
|
263 |
-
elif pretrained_model_name_or_path.count("/") == 0:
|
264 |
-
available_versions = get_diffusers_versions()
|
265 |
-
# cut ".dev0"
|
266 |
-
latest_version = "v" + ".".join(__version__.split(".")[:3])
|
267 |
-
|
268 |
-
# retrieve github version that matches
|
269 |
-
if revision is None:
|
270 |
-
revision = latest_version if latest_version[1:] in available_versions else "main"
|
271 |
-
logger.info(f"Defaulting to latest_version: {revision}.")
|
272 |
-
elif revision in available_versions:
|
273 |
-
revision = f"v{revision}"
|
274 |
-
elif revision == "main":
|
275 |
-
revision = revision
|
276 |
-
else:
|
277 |
-
raise ValueError(
|
278 |
-
f"`custom_revision`: {revision} does not exist. Please make sure to choose one of"
|
279 |
-
f" {', '.join(available_versions + ['main'])}."
|
280 |
-
)
|
281 |
-
|
282 |
-
# community pipeline on GitHub
|
283 |
-
github_url = COMMUNITY_PIPELINES_URL.format(revision=revision, pipeline=pretrained_model_name_or_path)
|
284 |
-
try:
|
285 |
-
resolved_module_file = cached_download(
|
286 |
-
github_url,
|
287 |
-
cache_dir=cache_dir,
|
288 |
-
force_download=force_download,
|
289 |
-
proxies=proxies,
|
290 |
-
resume_download=resume_download,
|
291 |
-
local_files_only=local_files_only,
|
292 |
-
use_auth_token=False,
|
293 |
-
)
|
294 |
-
submodule = "git"
|
295 |
-
module_file = pretrained_model_name_or_path + ".py"
|
296 |
-
except EnvironmentError:
|
297 |
-
logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.")
|
298 |
-
raise
|
299 |
-
else:
|
300 |
-
try:
|
301 |
-
# Load from URL or cache if already cached
|
302 |
-
resolved_module_file = hf_hub_download(
|
303 |
-
pretrained_model_name_or_path,
|
304 |
-
module_file,
|
305 |
-
cache_dir=cache_dir,
|
306 |
-
force_download=force_download,
|
307 |
-
proxies=proxies,
|
308 |
-
resume_download=resume_download,
|
309 |
-
local_files_only=local_files_only,
|
310 |
-
use_auth_token=use_auth_token,
|
311 |
-
)
|
312 |
-
submodule = os.path.join("local", "--".join(pretrained_model_name_or_path.split("/")))
|
313 |
-
except EnvironmentError:
|
314 |
-
logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.")
|
315 |
-
raise
|
316 |
-
|
317 |
-
# Check we have all the requirements in our environment
|
318 |
-
modules_needed = check_imports(resolved_module_file)
|
319 |
-
|
320 |
-
# Now we move the module inside our cached dynamic modules.
|
321 |
-
full_submodule = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
|
322 |
-
create_dynamic_module(full_submodule)
|
323 |
-
submodule_path = Path(HF_MODULES_CACHE) / full_submodule
|
324 |
-
if submodule == "local" or submodule == "git":
|
325 |
-
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
|
326 |
-
# that hash, to only copy when there is a modification but it seems overkill for now).
|
327 |
-
# The only reason we do the copy is to avoid putting too many folders in sys.path.
|
328 |
-
shutil.copy(resolved_module_file, submodule_path / module_file)
|
329 |
-
for module_needed in modules_needed:
|
330 |
-
module_needed = f"{module_needed}.py"
|
331 |
-
shutil.copy(os.path.join(pretrained_model_name_or_path, module_needed), submodule_path / module_needed)
|
332 |
-
else:
|
333 |
-
# Get the commit hash
|
334 |
-
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
|
335 |
-
if isinstance(use_auth_token, str):
|
336 |
-
token = use_auth_token
|
337 |
-
elif use_auth_token is True:
|
338 |
-
token = HfFolder.get_token()
|
339 |
-
else:
|
340 |
-
token = None
|
341 |
-
|
342 |
-
commit_hash = model_info(pretrained_model_name_or_path, revision=revision, token=token).sha
|
343 |
-
|
344 |
-
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
|
345 |
-
# benefit of versioning.
|
346 |
-
submodule_path = submodule_path / commit_hash
|
347 |
-
full_submodule = full_submodule + os.path.sep + commit_hash
|
348 |
-
create_dynamic_module(full_submodule)
|
349 |
-
|
350 |
-
if not (submodule_path / module_file).exists():
|
351 |
-
shutil.copy(resolved_module_file, submodule_path / module_file)
|
352 |
-
# Make sure we also have every file with relative
|
353 |
-
for module_needed in modules_needed:
|
354 |
-
if not (submodule_path / module_needed).exists():
|
355 |
-
get_cached_module_file(
|
356 |
-
pretrained_model_name_or_path,
|
357 |
-
f"{module_needed}.py",
|
358 |
-
cache_dir=cache_dir,
|
359 |
-
force_download=force_download,
|
360 |
-
resume_download=resume_download,
|
361 |
-
proxies=proxies,
|
362 |
-
use_auth_token=use_auth_token,
|
363 |
-
revision=revision,
|
364 |
-
local_files_only=local_files_only,
|
365 |
-
)
|
366 |
-
return os.path.join(full_submodule, module_file)
|
367 |
-
|
368 |
-
|
369 |
-
def get_class_from_dynamic_module(
|
370 |
-
pretrained_model_name_or_path: Union[str, os.PathLike],
|
371 |
-
module_file: str,
|
372 |
-
class_name: Optional[str] = None,
|
373 |
-
cache_dir: Optional[Union[str, os.PathLike]] = None,
|
374 |
-
force_download: bool = False,
|
375 |
-
resume_download: bool = False,
|
376 |
-
proxies: Optional[Dict[str, str]] = None,
|
377 |
-
use_auth_token: Optional[Union[bool, str]] = None,
|
378 |
-
revision: Optional[str] = None,
|
379 |
-
local_files_only: bool = False,
|
380 |
-
**kwargs,
|
381 |
-
):
|
382 |
-
"""
|
383 |
-
Extracts a class from a module file, present in the local folder or repository of a model.
|
384 |
-
|
385 |
-
<Tip warning={true}>
|
386 |
-
|
387 |
-
Calling this function will execute the code in the module file found locally or downloaded from the Hub. It should
|
388 |
-
therefore only be called on trusted repos.
|
389 |
-
|
390 |
-
</Tip>
|
391 |
-
|
392 |
-
Args:
|
393 |
-
pretrained_model_name_or_path (`str` or `os.PathLike`):
|
394 |
-
This can be either:
|
395 |
-
|
396 |
-
- a string, the *model id* of a pretrained model configuration hosted inside a model repo on
|
397 |
-
huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced
|
398 |
-
under a user or organization name, like `dbmdz/bert-base-german-cased`.
|
399 |
-
- a path to a *directory* containing a configuration file saved using the
|
400 |
-
[`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
|
401 |
-
|
402 |
-
module_file (`str`):
|
403 |
-
The name of the module file containing the class to look for.
|
404 |
-
class_name (`str`):
|
405 |
-
The name of the class to import in the module.
|
406 |
-
cache_dir (`str` or `os.PathLike`, *optional*):
|
407 |
-
Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
|
408 |
-
cache should not be used.
|
409 |
-
force_download (`bool`, *optional*, defaults to `False`):
|
410 |
-
Whether or not to force to (re-)download the configuration files and override the cached versions if they
|
411 |
-
exist.
|
412 |
-
resume_download (`bool`, *optional*, defaults to `False`):
|
413 |
-
Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.
|
414 |
-
proxies (`Dict[str, str]`, *optional*):
|
415 |
-
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
|
416 |
-
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
|
417 |
-
use_auth_token (`str` or `bool`, *optional*):
|
418 |
-
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
|
419 |
-
when running `transformers-cli login` (stored in `~/.huggingface`).
|
420 |
-
revision (`str`, *optional*, defaults to `"main"`):
|
421 |
-
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
|
422 |
-
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
|
423 |
-
identifier allowed by git.
|
424 |
-
local_files_only (`bool`, *optional*, defaults to `False`):
|
425 |
-
If `True`, will only try to load the tokenizer configuration from local files.
|
426 |
-
|
427 |
-
<Tip>
|
428 |
-
|
429 |
-
You may pass a token in `use_auth_token` if you are not logged in (`huggingface-cli long`) and want to use private
|
430 |
-
or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models).
|
431 |
-
|
432 |
-
</Tip>
|
433 |
-
|
434 |
-
Returns:
|
435 |
-
`type`: The class, dynamically imported from the module.
|
436 |
-
|
437 |
-
Examples:
|
438 |
-
|
439 |
-
```python
|
440 |
-
# Download module `modeling.py` from huggingface.co and cache then extract the class `MyBertModel` from this
|
441 |
-
# module.
|
442 |
-
cls = get_class_from_dynamic_module("sgugger/my-bert-model", "modeling.py", "MyBertModel")
|
443 |
-
```"""
|
444 |
-
# And lastly we get the class inside our newly created module
|
445 |
-
final_module = get_cached_module_file(
|
446 |
-
pretrained_model_name_or_path,
|
447 |
-
module_file,
|
448 |
-
cache_dir=cache_dir,
|
449 |
-
force_download=force_download,
|
450 |
-
resume_download=resume_download,
|
451 |
-
proxies=proxies,
|
452 |
-
use_auth_token=use_auth_token,
|
453 |
-
revision=revision,
|
454 |
-
local_files_only=local_files_only,
|
455 |
-
)
|
456 |
-
return get_class_in_module(class_name, final_module.replace(".py", ""))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/_base_/datasets/lvis_v1_instance.py
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
_base_ = 'coco_instance.py'
|
2 |
-
dataset_type = 'LVISV1Dataset'
|
3 |
-
data_root = 'data/lvis_v1/'
|
4 |
-
data = dict(
|
5 |
-
samples_per_gpu=2,
|
6 |
-
workers_per_gpu=2,
|
7 |
-
train=dict(
|
8 |
-
_delete_=True,
|
9 |
-
type='ClassBalancedDataset',
|
10 |
-
oversample_thr=1e-3,
|
11 |
-
dataset=dict(
|
12 |
-
type=dataset_type,
|
13 |
-
ann_file=data_root + 'annotations/lvis_v1_train.json',
|
14 |
-
img_prefix=data_root)),
|
15 |
-
val=dict(
|
16 |
-
type=dataset_type,
|
17 |
-
ann_file=data_root + 'annotations/lvis_v1_val.json',
|
18 |
-
img_prefix=data_root),
|
19 |
-
test=dict(
|
20 |
-
type=dataset_type,
|
21 |
-
ann_file=data_root + 'annotations/lvis_v1_val.json',
|
22 |
-
img_prefix=data_root))
|
23 |
-
evaluation = dict(metric=['bbox', 'segm'])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/utils/env.py
DELETED
@@ -1,95 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
"""This file holding some environment constant for sharing by other files."""
|
3 |
-
|
4 |
-
import os.path as osp
|
5 |
-
import subprocess
|
6 |
-
import sys
|
7 |
-
from collections import defaultdict
|
8 |
-
|
9 |
-
import cv2
|
10 |
-
import torch
|
11 |
-
|
12 |
-
import annotator.uniformer.mmcv as mmcv
|
13 |
-
from .parrots_wrapper import get_build_config
|
14 |
-
|
15 |
-
|
16 |
-
def collect_env():
|
17 |
-
"""Collect the information of the running environments.
|
18 |
-
|
19 |
-
Returns:
|
20 |
-
dict: The environment information. The following fields are contained.
|
21 |
-
|
22 |
-
- sys.platform: The variable of ``sys.platform``.
|
23 |
-
- Python: Python version.
|
24 |
-
- CUDA available: Bool, indicating if CUDA is available.
|
25 |
-
- GPU devices: Device type of each GPU.
|
26 |
-
- CUDA_HOME (optional): The env var ``CUDA_HOME``.
|
27 |
-
- NVCC (optional): NVCC version.
|
28 |
-
- GCC: GCC version, "n/a" if GCC is not installed.
|
29 |
-
- PyTorch: PyTorch version.
|
30 |
-
- PyTorch compiling details: The output of \
|
31 |
-
``torch.__config__.show()``.
|
32 |
-
- TorchVision (optional): TorchVision version.
|
33 |
-
- OpenCV: OpenCV version.
|
34 |
-
- MMCV: MMCV version.
|
35 |
-
- MMCV Compiler: The GCC version for compiling MMCV ops.
|
36 |
-
- MMCV CUDA Compiler: The CUDA version for compiling MMCV ops.
|
37 |
-
"""
|
38 |
-
env_info = {}
|
39 |
-
env_info['sys.platform'] = sys.platform
|
40 |
-
env_info['Python'] = sys.version.replace('\n', '')
|
41 |
-
|
42 |
-
cuda_available = torch.cuda.is_available()
|
43 |
-
env_info['CUDA available'] = cuda_available
|
44 |
-
|
45 |
-
if cuda_available:
|
46 |
-
devices = defaultdict(list)
|
47 |
-
for k in range(torch.cuda.device_count()):
|
48 |
-
devices[torch.cuda.get_device_name(k)].append(str(k))
|
49 |
-
for name, device_ids in devices.items():
|
50 |
-
env_info['GPU ' + ','.join(device_ids)] = name
|
51 |
-
|
52 |
-
from annotator.uniformer.mmcv.utils.parrots_wrapper import _get_cuda_home
|
53 |
-
CUDA_HOME = _get_cuda_home()
|
54 |
-
env_info['CUDA_HOME'] = CUDA_HOME
|
55 |
-
|
56 |
-
if CUDA_HOME is not None and osp.isdir(CUDA_HOME):
|
57 |
-
try:
|
58 |
-
nvcc = osp.join(CUDA_HOME, 'bin/nvcc')
|
59 |
-
nvcc = subprocess.check_output(
|
60 |
-
f'"{nvcc}" -V | tail -n1', shell=True)
|
61 |
-
nvcc = nvcc.decode('utf-8').strip()
|
62 |
-
except subprocess.SubprocessError:
|
63 |
-
nvcc = 'Not Available'
|
64 |
-
env_info['NVCC'] = nvcc
|
65 |
-
|
66 |
-
try:
|
67 |
-
gcc = subprocess.check_output('gcc --version | head -n1', shell=True)
|
68 |
-
gcc = gcc.decode('utf-8').strip()
|
69 |
-
env_info['GCC'] = gcc
|
70 |
-
except subprocess.CalledProcessError: # gcc is unavailable
|
71 |
-
env_info['GCC'] = 'n/a'
|
72 |
-
|
73 |
-
env_info['PyTorch'] = torch.__version__
|
74 |
-
env_info['PyTorch compiling details'] = get_build_config()
|
75 |
-
|
76 |
-
try:
|
77 |
-
import torchvision
|
78 |
-
env_info['TorchVision'] = torchvision.__version__
|
79 |
-
except ModuleNotFoundError:
|
80 |
-
pass
|
81 |
-
|
82 |
-
env_info['OpenCV'] = cv2.__version__
|
83 |
-
|
84 |
-
env_info['MMCV'] = mmcv.__version__
|
85 |
-
|
86 |
-
try:
|
87 |
-
from annotator.uniformer.mmcv.ops import get_compiler_version, get_compiling_cuda_version
|
88 |
-
except ModuleNotFoundError:
|
89 |
-
env_info['MMCV Compiler'] = 'n/a'
|
90 |
-
env_info['MMCV CUDA Compiler'] = 'n/a'
|
91 |
-
else:
|
92 |
-
env_info['MMCV Compiler'] = get_compiler_version()
|
93 |
-
env_info['MMCV CUDA Compiler'] = get_compiling_cuda_version()
|
94 |
-
|
95 |
-
return env_info
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Araloak/fz/chat_completion.py
DELETED
@@ -1,62 +0,0 @@
|
|
1 |
-
import linecache
|
2 |
-
import re
|
3 |
-
from typing import Dict, List, Optional
|
4 |
-
|
5 |
-
import openai
|
6 |
-
|
7 |
-
|
8 |
-
class ChatCompletion:
|
9 |
-
def __init__(self, model: str = 'gpt-3.5-turbo',
|
10 |
-
api_key: Optional[str] = None, api_key_path: str = './openai_api_key'):
|
11 |
-
if api_key is None:
|
12 |
-
openai.api_key = api_key
|
13 |
-
api_key = linecache.getline(api_key_path, 2).strip('\n')
|
14 |
-
if len(api_key) == 0:
|
15 |
-
raise EnvironmentError
|
16 |
-
openai.api_key = api_key
|
17 |
-
|
18 |
-
self.model = model
|
19 |
-
self.system_messages = []
|
20 |
-
self.user_messages = []
|
21 |
-
|
22 |
-
def chat(self, msg: str, setting: Optional[str] = None, model: Optional[str] = None) -> str:
|
23 |
-
if self._context_length() > 2048:
|
24 |
-
self.reset()
|
25 |
-
if setting is not None:
|
26 |
-
if setting not in self.system_messages:
|
27 |
-
self.system_messages.append(setting)
|
28 |
-
if not self.user_messages or msg != self.user_messages[-1]:
|
29 |
-
self.user_messages.append(msg)
|
30 |
-
|
31 |
-
return self._run(model)
|
32 |
-
|
33 |
-
def retry(self, model: Optional[str] = None) -> str:
|
34 |
-
return self._run(model)
|
35 |
-
|
36 |
-
def reset(self):
|
37 |
-
self.system_messages.clear()
|
38 |
-
self.user_messages.clear()
|
39 |
-
|
40 |
-
def _make_message(self) -> List[Dict]:
|
41 |
-
sys_messages = [{'role': 'system', 'content': msg} for msg in self.system_messages]
|
42 |
-
user_messages = [{'role': 'user', 'content': msg} for msg in self.user_messages]
|
43 |
-
return sys_messages + user_messages
|
44 |
-
|
45 |
-
def _context_length(self) -> int:
|
46 |
-
return len(''.join(self.system_messages)) + len(''.join(self.user_messages))
|
47 |
-
|
48 |
-
def _run(self, model: Optional[str] = None) -> str:
|
49 |
-
if model is None:
|
50 |
-
model = self.model
|
51 |
-
try:
|
52 |
-
response = openai.ChatCompletion.create(model=model, messages=self._make_message())
|
53 |
-
ans = response['choices'][0]['message']['content']
|
54 |
-
ans = re.sub(r'^\n+', '', ans)
|
55 |
-
except openai.error.OpenAIError as e:
|
56 |
-
ans = e
|
57 |
-
except Exception as e:
|
58 |
-
print(e)
|
59 |
-
return ans
|
60 |
-
|
61 |
-
def __call__(self, msg: str, setting: Optional[str] = None, model: Optional[str] = None) -> str:
|
62 |
-
return self.chat(msg, setting, model)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Artrajz/vits-simple-api/templates/index.html
DELETED
@@ -1,535 +0,0 @@
|
|
1 |
-
<!DOCTYPE html>
|
2 |
-
<html lang="en">
|
3 |
-
<head>
|
4 |
-
<meta charset="UTF-8"/>
|
5 |
-
<meta name="viewport" content="width=device-width, initial-scale=1.0"/>
|
6 |
-
<title>vits-simple-api</title>
|
7 |
-
<link rel="stylesheet" href="/static/css/style.css">
|
8 |
-
<link rel="stylesheet" href="/static/css/bootstrap.min.css"/>
|
9 |
-
</head>
|
10 |
-
<body>
|
11 |
-
<main class="main-container">
|
12 |
-
<div class="container flex flex-wrap mx-auto">
|
13 |
-
<div class="text-center d-flex align-items-center w-100" style="height: 100px;" id="component-1">
|
14 |
-
<h1 class="w-100">
|
15 |
-
<a href="https://github.com/Artrajz/vits-simple-api" target="_blank"
|
16 |
-
style="text-decoration: none; color: black"> vits-simple-api </a>
|
17 |
-
</h1>
|
18 |
-
</div>
|
19 |
-
|
20 |
-
|
21 |
-
<div class="tabs w-100 border-b-2" id="component-2">
|
22 |
-
<button class="tab-button px-4 pb-2 pt-2 active " onclick="showContent(0)">VITS</button>
|
23 |
-
<button class="tab-button px-4 pb-2 pt-2" onclick="showContent(1)">W2V2-VITS</button>
|
24 |
-
<button class="tab-button px-4 pb-2 pt-2" onclick="showContent(2)">Bert-VITS2</button>
|
25 |
-
</div>
|
26 |
-
|
27 |
-
<div class="content w-100 border-lr-2 border-b-2" id="component-3">
|
28 |
-
<div class="content-pane active w-100 flex-wrap">
|
29 |
-
<form class="w-100">
|
30 |
-
<div class="form-group">
|
31 |
-
<label>text</label>
|
32 |
-
<textarea class="form-control" id="input_text1" rows="3"
|
33 |
-
oninput="updateLink()">你好,こんにちは</textarea>
|
34 |
-
</div>
|
35 |
-
<div class="form-group">
|
36 |
-
<label>id</label>
|
37 |
-
<select class="form-control" id="input_id1" oninput="updateLink()">
|
38 |
-
{% for speaker in speakers["VITS"] %}
|
39 |
-
<option value="{{ speaker["id"] }}">{{ speaker["id"] }} | {{ speaker["name"] }}
|
40 |
-
| {{ speaker["lang"] }}</option>
|
41 |
-
{% endfor %}
|
42 |
-
{% if vits_speakers_count <=0 %}
|
43 |
-
<option value="" disabled selected hidden>未加载模型</option>
|
44 |
-
{% endif %}
|
45 |
-
</select>
|
46 |
-
</div>
|
47 |
-
</form>
|
48 |
-
<form class="w-100">
|
49 |
-
<div class="row">
|
50 |
-
<div class="col-md-4 form-group">
|
51 |
-
<label data-toggle="tooltip" data-placement="top"
|
52 |
-
title="默认为wav">format</label>
|
53 |
-
<select class="form-control" id="input_format1" oninput="updateLink()">
|
54 |
-
<option></option>
|
55 |
-
<option>wav</option>
|
56 |
-
<option>mp3</option>
|
57 |
-
<option>ogg</option>
|
58 |
-
<option>silk</option>
|
59 |
-
<option>flac</option>
|
60 |
-
</select>
|
61 |
-
</div>
|
62 |
-
<div class="col-md-4 form-group">
|
63 |
-
<label data-toggle="tooltip" data-placement="top"
|
64 |
-
title="自动识别语言auto:可识别的语言根据不同speaker而不同,方言无法自动识别。方言模型需要手动指定语言,比如粤语Cantonese要指定参数lang=gd">lang</label>
|
65 |
-
<input type="text" class="form-control" id="input_lang1" oninput="updateLink()" value=""
|
66 |
-
placeholder="auto"/>
|
67 |
-
</div>
|
68 |
-
<div class="col-md-4 form-group">
|
69 |
-
<label data-toggle="tooltip" data-placement="top"
|
70 |
-
title="调节语音长度,相当于调节语速,该数值越大语速越慢。">length</label>
|
71 |
-
<input type="number" class="form-control" id="input_length1" oninput="updateLink()" value=""
|
72 |
-
placeholder="1" min="0" step="0.001"/>
|
73 |
-
</div>
|
74 |
-
</div>
|
75 |
-
<div class="row">
|
76 |
-
<div class="col-md-4 form-group">
|
77 |
-
<label data-toggle="tooltip" data-placement="top"
|
78 |
-
title="样本噪声,控制合成的随机性。">noise</label>
|
79 |
-
<input type="number" class="form-control" id="input_noise1" oninput="updateLink()" value=""
|
80 |
-
placeholder="0.33" min="0" step="0.001"/>
|
81 |
-
</div>
|
82 |
-
<div class="col-md-4 form-group">
|
83 |
-
<label data-toggle="tooltip" data-placement="top"
|
84 |
-
title="随机时长预测器噪声,控制音素发音长度。">noisew</label>
|
85 |
-
<input type="number" class="form-control" id="input_noisew1" oninput="updateLink()" value=""
|
86 |
-
placeholder="0.4" min="0" step="0.001"/>
|
87 |
-
</div>
|
88 |
-
<div class="col-md-4 form-group">
|
89 |
-
<label data-toggle="tooltip" data-placement="top"
|
90 |
-
title="按标点符号分段,加起来大于max时为一段文本。max<=0表示不分段。">max</label>
|
91 |
-
<input type="number" class="form-control" id="input_max1" oninput="updateLink()" value=""
|
92 |
-
placeholder="50" step="1"/>
|
93 |
-
</div>
|
94 |
-
</div>
|
95 |
-
</form>
|
96 |
-
|
97 |
-
|
98 |
-
<div class="flex flex-wrap w-100"
|
99 |
-
style="justify-content: center; align-items: center; height: 80px; margin-top: 20px; margin-bottom: 20px; border: 1px solid rgba(0,0,0,.125); border-radius: 0.25rem;">
|
100 |
-
<button type="button" class="btn btn-outline-secondary" onclick="setAudioSource()"
|
101 |
-
style="margin-right: 10px">
|
102 |
-
播放器生成
|
103 |
-
</button>
|
104 |
-
<audio id="audioPlayer1" controls>
|
105 |
-
<source src="" type="audio/mp3"/>
|
106 |
-
Your browser does not support the audio element.
|
107 |
-
</audio>
|
108 |
-
<div class="form-group form-check">
|
109 |
-
<input type="checkbox" id="streaming1" onchange="updateLink()">
|
110 |
-
<label class="form-check-label" data-toggle="tooltip" data-placement="top"
|
111 |
-
title="按照max分段推理文本,推理好一段即输出,无需等待所有文本都推理完毕">流式响应</label>
|
112 |
-
</div>
|
113 |
-
</div>
|
114 |
-
</div>
|
115 |
-
<div class="content-pane w-100 flex-wrap">
|
116 |
-
<form class="w-100">
|
117 |
-
<div class="form-group">
|
118 |
-
<label>text</label>
|
119 |
-
<textarea class="form-control" id="input_text2" rows="3"
|
120 |
-
oninput="updateLink()">你好,こんにちは</textarea>
|
121 |
-
</div>
|
122 |
-
<div class="form-group">
|
123 |
-
<label>id</label>
|
124 |
-
<select class="form-control" id="input_id2" oninput="updateLink()">
|
125 |
-
{% for speaker in speakers["W2V2-VITS"] %}
|
126 |
-
<option value="{{ speaker["id"] }}">{{ speaker["id"] }} | {{ speaker["name"] }}
|
127 |
-
| {{ speaker["lang"] }}</option>
|
128 |
-
{% endfor %}
|
129 |
-
{% if w2v2_speakers_count <=0 %}
|
130 |
-
<option value="" disabled selected hidden>未加载模型</option>
|
131 |
-
{% endif %}
|
132 |
-
</select>
|
133 |
-
</div>
|
134 |
-
<div class="form-group mb-3">
|
135 |
-
<label data-toggle="tooltip" data-placement="top"
|
136 |
-
title="情感嵌入,{% if w2v2_emotion_count > 0 %}
|
137 |
-
可输入范围是0-{{ w2v2_emotion_count-1 }}
|
138 |
-
{% else %}
|
139 |
-
未加载emotion
|
140 |
-
{% endif %}">emotion</label>
|
141 |
-
<input type="number" class="form-control" min="0" max="{{ w2v2_emotion_count-1 }}" step="1"
|
142 |
-
id="emotion" value="0" oninput="updateLink()">
|
143 |
-
</div>
|
144 |
-
</form>
|
145 |
-
|
146 |
-
|
147 |
-
<form class="w-100">
|
148 |
-
<div class="row">
|
149 |
-
<div class="col-md-4 form-group">
|
150 |
-
<label data-toggle="tooltip" data-placement="top"
|
151 |
-
title="默认为wav">format</label>
|
152 |
-
<select class="form-control" id="input_format2" oninput="updateLink()">
|
153 |
-
<option></option>
|
154 |
-
<option>wav</option>
|
155 |
-
<option>mp3</option>
|
156 |
-
<option>ogg</option>
|
157 |
-
<option>silk</option>
|
158 |
-
<option>flac</option>
|
159 |
-
</select>
|
160 |
-
</div>
|
161 |
-
<div class="col-md-4 form-group">
|
162 |
-
<label data-toggle="tooltip" data-placement="top"
|
163 |
-
title="自动识别语言auto:可识别的语言根据不同speaker而不同,方言无法自动识别。方言模型需要手动指定语言,比如粤语Cantonese要指定参数lang=gd">lang</label>
|
164 |
-
<input type="text" class="form-control" id="input_lang2" oninput="updateLink()" value=""
|
165 |
-
placeholder="auto"/>
|
166 |
-
</div>
|
167 |
-
<div class="col-md-4 form-group">
|
168 |
-
<label data-toggle="tooltip" data-placement="top"
|
169 |
-
title="调节语音长度,相当于调节语速,该数值越大语速越慢。">length</label>
|
170 |
-
<input type="number" class="form-control" id="input_length2" oninput="updateLink()" value=""
|
171 |
-
placeholder="1" min="0" step="0.001"/>
|
172 |
-
</div>
|
173 |
-
</div>
|
174 |
-
<div class="row">
|
175 |
-
<div class="col-md-4 form-group">
|
176 |
-
<label data-toggle="tooltip" data-placement="top"
|
177 |
-
title="样本噪声,控制合成的随机性。">noise</label>
|
178 |
-
<input type="number" class="form-control" id="input_noise2" oninput="updateLink()" value=""
|
179 |
-
placeholder="0.33" min="0" step="0.001"/>
|
180 |
-
</div>
|
181 |
-
<div class="col-md-4 form-group">
|
182 |
-
<label data-toggle="tooltip" data-placement="top"
|
183 |
-
title="随机时长预测器噪声,控制音素发音长度。">noisew</label>
|
184 |
-
<input type="number" class="form-control" id="input_noisew2" oninput="updateLink()" value=""
|
185 |
-
placeholder="0.4" min="0" step="0.001"/>
|
186 |
-
</div>
|
187 |
-
<div class="col-md-4 form-group">
|
188 |
-
<label data-toggle="tooltip" data-placement="top"
|
189 |
-
title="按标点符号分段,加起来大于max时为一段文本。max<=0表示不分段。">max</label>
|
190 |
-
<input type="number" class="form-control" id="input_max2" oninput="updateLink()" value=""
|
191 |
-
placeholder="50" step="1"/>
|
192 |
-
</div>
|
193 |
-
</div>
|
194 |
-
</form>
|
195 |
-
|
196 |
-
<div class="flex flex-wrap w-100"
|
197 |
-
style="justify-content: center; align-items: center; height: 80px; margin-top: 20px; margin-bottom: 20px; border: 1px solid rgba(0,0,0,.125); border-radius: 0.25rem;">
|
198 |
-
<button type="button" class="btn btn-outline-secondary" onclick="setAudioSource()"
|
199 |
-
style="margin-right: 10px">
|
200 |
-
播放器生成
|
201 |
-
</button>
|
202 |
-
<audio id="audioPlayer2" controls>
|
203 |
-
<source src="" type="audio/mp3"/>
|
204 |
-
Your browser does not support the audio element.
|
205 |
-
</audio>
|
206 |
-
</div>
|
207 |
-
</div>
|
208 |
-
<div class="content-pane w-100 flex-wrap">
|
209 |
-
<form class="w-100">
|
210 |
-
<div class="form-group">
|
211 |
-
<label>text</label>
|
212 |
-
<textarea class="form-control" id="input_text3" rows="3"
|
213 |
-
oninput="updateLink()">你好</textarea>
|
214 |
-
</div>
|
215 |
-
<div class="form-group">
|
216 |
-
<label>id</label>
|
217 |
-
<select class="form-control" id="input_id3" oninput="updateLink()">
|
218 |
-
{% for speaker in speakers["BERT-VITS2"] %}
|
219 |
-
<option value="{{ speaker["id"] }}">{{ speaker["id"] }} | {{ speaker["name"] }}
|
220 |
-
| {{ speaker["lang"] }}</option>
|
221 |
-
{% endfor %}
|
222 |
-
{% if bert_vits2_speakers_count <=0 %}
|
223 |
-
<option value="" disabled selected hidden>未加载模型</option>
|
224 |
-
{% endif %}
|
225 |
-
</select>
|
226 |
-
</div>
|
227 |
-
</form>
|
228 |
-
<form class="w-100">
|
229 |
-
<div class="row">
|
230 |
-
<div class="col-md-4 form-group">
|
231 |
-
<label data-toggle="tooltip" data-placement="top"
|
232 |
-
title="默认为wav">format</label>
|
233 |
-
<select class="form-control" id="input_format3" oninput="updateLink()">
|
234 |
-
<option></option>
|
235 |
-
<option>wav</option>
|
236 |
-
<option>mp3</option>
|
237 |
-
<option>ogg</option>
|
238 |
-
<option>silk</option>
|
239 |
-
<option>flac</option>
|
240 |
-
</select>
|
241 |
-
</div>
|
242 |
-
<div class="col-md-4 form-group">
|
243 |
-
<label data-toggle="tooltip" data-placement="top"
|
244 |
-
title="自动识别语言auto:可识别的语言根据不同speaker而不同,方言无法自动识别。方言模型需要手动指定语言,比如粤语Cantonese要指定参数lang=gd">lang</label>
|
245 |
-
<input type="text" class="form-control" id="input_lang3" oninput="updateLink()" value=""
|
246 |
-
placeholder="auto"/>
|
247 |
-
</div>
|
248 |
-
<div class="col-md-4 form-group">
|
249 |
-
<label data-toggle="tooltip" data-placement="top"
|
250 |
-
title="调节语音长度,相当于调节语速,该数值越大语速越慢。">length</label>
|
251 |
-
<input type="number" class="form-control" id="input_length3" oninput="updateLink()" value=""
|
252 |
-
placeholder="1" min="0" step="0.001"/>
|
253 |
-
</div>
|
254 |
-
</div>
|
255 |
-
<div class="row">
|
256 |
-
<div class="col-md-4 form-group">
|
257 |
-
<label data-toggle="tooltip" data-placement="top"
|
258 |
-
title="样本噪声,控制合成的随机性。">noise</label>
|
259 |
-
<input type="number" class="form-control" id="input_noise3" oninput="updateLink()" value=""
|
260 |
-
placeholder="0.5" min="0" step="0.001"/>
|
261 |
-
</div>
|
262 |
-
<div class="col-md-4 form-group">
|
263 |
-
<label data-toggle="tooltip" data-placement="top"
|
264 |
-
title="随机时长预测器噪声,控制音素发音长度。">noisew</label>
|
265 |
-
<input type="number" class="form-control" id="input_noisew3" oninput="updateLink()" value=""
|
266 |
-
placeholder="0.6" min="0" step="0.001"/>
|
267 |
-
</div>
|
268 |
-
<div class="col-md-4 form-group">
|
269 |
-
<label data-toggle="tooltip" data-placement="top"
|
270 |
-
title="按标点符号分段,加起来大于max时为一段文本。max<=0表示不分段。">max</label>
|
271 |
-
<input type="number" class="form-control" id="input_max3" oninput="updateLink()" value=""
|
272 |
-
placeholder="50" step="1"/>
|
273 |
-
</div>
|
274 |
-
|
275 |
-
</div>
|
276 |
-
<div class="row">
|
277 |
-
<div class="col-md-4 form-group">
|
278 |
-
<label data-toggle="tooltip" data-placement="top"
|
279 |
-
title="SDP/DP混合比:SDP在合成时的占比,理论上此比率越高,合成的语音语调方差越大。">sdp_radio</label>
|
280 |
-
<input type="number" class="form-control" id="input_sdp_ratio" oninput="updateLink()"
|
281 |
-
value=""
|
282 |
-
placeholder="0.2" step="0.01" min="0" max="1"/>
|
283 |
-
</div>
|
284 |
-
</div>
|
285 |
-
</form>
|
286 |
-
|
287 |
-
|
288 |
-
<div class="flex flex-wrap w-100"
|
289 |
-
style="justify-content: center; align-items: center; height: 80px; margin-top: 20px; margin-bottom: 20px; border: 1px solid rgba(0,0,0,.125); border-radius: 0.25rem;">
|
290 |
-
<button type="button" class="btn btn-outline-secondary" onclick="setAudioSource()"
|
291 |
-
style="margin-right: 10px">
|
292 |
-
播放器生成
|
293 |
-
</button>
|
294 |
-
<audio id="audioPlayer3" controls>
|
295 |
-
<source src="" type="audio/mp3"/>
|
296 |
-
Your browser does not support the audio element.
|
297 |
-
</audio>
|
298 |
-
</div>
|
299 |
-
</div>
|
300 |
-
</div>
|
301 |
-
|
302 |
-
<div class="mt-2">
|
303 |
-
{% if speakers_count == 0 %}
|
304 |
-
<div style="color: red;">未加载任何模型</div>
|
305 |
-
{% endif %}
|
306 |
-
<div>
|
307 |
-
<label>返回speakers(json):</label>
|
308 |
-
<a id="speakers_link" href="https://artrajz-vits-simple-api.hf.space/voice/speakers" target="_blank"
|
309 |
-
style="text-decoration: none; color: black">
|
310 |
-
https://artrajz-vits-simple-api.hf.space/voice/speakers
|
311 |
-
</a>
|
312 |
-
</div>
|
313 |
-
<div>
|
314 |
-
<label>API调用:</label>
|
315 |
-
<a id="vits_link" href="https://artrajz-vits-simple-api.hf.space/voice/vits?text=你好,こんにちは&id=164"
|
316 |
-
style="text-decoration: none; color: black">
|
317 |
-
https://artrajz-vits-simple-api.hf.space/voice/vits?text=你好,こんにちは&id=164
|
318 |
-
</a>
|
319 |
-
</div>
|
320 |
-
</div>
|
321 |
-
<div>
|
322 |
-
<h2>所有模型均为网络搜集,感谢模型原作者的付出!</h2>
|
323 |
-
<h2>请严格遵循模型原作者使用协议!模型一般都是禁止商用的!</h2>
|
324 |
-
|
325 |
-
<p>
|
326 |
-
Nene_Nanami_Rong_Tang:
|
327 |
-
<a href="https://github.com/CjangCjengh/TTSModels" rel="noreferrer" target="_blank">CjangCjengh/TTSModels</a>
|
328 |
-
</p>
|
329 |
-
<p>
|
330 |
-
louise:
|
331 |
-
<a href="https://github.com/CjangCjengh/TTSModels" rel="noreferrer" target="_blank">CjangCjengh/TTSModels</a>
|
332 |
-
</p>
|
333 |
-
<p>
|
334 |
-
Cantonese:
|
335 |
-
<a href="https://github.com/CjangCjengh/TTSModels" rel="noreferrer" target="_blank">CjangCjengh/TTSModels</a>
|
336 |
-
</p>
|
337 |
-
<p>
|
338 |
-
shanghainese:
|
339 |
-
<a href="https://github.com/CjangCjengh/TTSModels" rel="noreferrer" target="_blank">CjangCjengh/TTSModels</a>
|
340 |
-
</p>
|
341 |
-
<p>
|
342 |
-
w2v2-vits:
|
343 |
-
<a href="https://github.com/CjangCjengh/TTSModels" rel="noreferrer" target="_blank">CjangCjengh/TTSModels</a>
|
344 |
-
</p>
|
345 |
-
<p>
|
346 |
-
vctk:
|
347 |
-
<a href="https://github.com/jaywalnut310/vits" rel="noreferrer" target="_blank">jaywalnut310/vits</a>
|
348 |
-
</p>
|
349 |
-
<p>
|
350 |
-
Bishojo Mangekyo:
|
351 |
-
<a href="https://github.com/Francis-Komizu/VITS" rel="noreferrer" target="_blank">Francis-Komizu/VITS</a>
|
352 |
-
</p>
|
353 |
-
<p>
|
354 |
-
genshin:
|
355 |
-
<a href="https://huggingface.co/spaces/zomehwh/vits-uma-genshin-honkai" rel="noreferrer" target="_blank">zomehwh/vits-uma-genshin-honkai</a>
|
356 |
-
</p>
|
357 |
-
<p>
|
358 |
-
paimon:
|
359 |
-
<a href="https://github.com/zixiiu/Digital_Life_Server" rel="noreferrer" target="_blank">zixiiu/Digital_Life_Server</a>
|
360 |
-
</p>
|
361 |
-
<p>
|
362 |
-
vits_chinese:
|
363 |
-
<a href="https://github.com/PlayVoice/vits_chinese" rel="noreferrer" target="_blank">PlayVoice/vits_chinese</a>
|
364 |
-
</p>
|
365 |
-
</div>
|
366 |
-
</div>
|
367 |
-
|
368 |
-
<br/>
|
369 |
-
|
370 |
-
</main>
|
371 |
-
|
372 |
-
<script src="/static/js/jquery.slim.min.js"></script>
|
373 |
-
<script src="/static/js/bootstrap.bundle.min.js"></script>
|
374 |
-
|
375 |
-
<script>
|
376 |
-
$(function () {
|
377 |
-
$('[data-toggle="tooltip"]').tooltip()
|
378 |
-
})
|
379 |
-
|
380 |
-
function getProtocol() {
|
381 |
-
return 'https:' == location.protocol ? "https://" : "http://";
|
382 |
-
}
|
383 |
-
|
384 |
-
function getUrl() {
|
385 |
-
var url = window.location.host;
|
386 |
-
return url;
|
387 |
-
}
|
388 |
-
|
389 |
-
var baseUrl = getProtocol() + getUrl();
|
390 |
-
var model_type = 1;
|
391 |
-
var vits_status = false;
|
392 |
-
var w2v2_status = false;
|
393 |
-
var bert_vits2_status = false;
|
394 |
-
{% if vits_speakers_count > 0 %}
|
395 |
-
vits_status = true;
|
396 |
-
{% endif %}
|
397 |
-
{% if w2v2_speakers_count > 0 %}
|
398 |
-
w2v2_status = true;
|
399 |
-
{% endif %}
|
400 |
-
{% if bert_vits2_speakers_count > 0 %}
|
401 |
-
bert_vits2_status = true;
|
402 |
-
{% endif %}
|
403 |
-
|
404 |
-
setBaseUrl();
|
405 |
-
|
406 |
-
function setBaseUrl() {
|
407 |
-
var text = document.getElementById("input_text" + model_type).value;
|
408 |
-
var id = document.getElementById("input_id" + model_type).value;
|
409 |
-
|
410 |
-
var vits_link = document.getElementById("vits_link");
|
411 |
-
var speakers_link = document.getElementById("speakers_link");
|
412 |
-
|
413 |
-
var vits_url = baseUrl + "/voice/vits?text=" + text + "&id=" + id;
|
414 |
-
var speakers_url = baseUrl + "/voice/speakers";
|
415 |
-
|
416 |
-
vits_link.href = vits_url;
|
417 |
-
vits_link.textContent = vits_url;
|
418 |
-
|
419 |
-
speakers_link.href = speakers_url;
|
420 |
-
speakers_link.textContent = speakers_url;
|
421 |
-
}
|
422 |
-
|
423 |
-
function getLink() {
|
424 |
-
var text = document.getElementById("input_text" + model_type).value;
|
425 |
-
var id = document.getElementById("input_id" + model_type).value;
|
426 |
-
var format = document.getElementById("input_format" + model_type).value;
|
427 |
-
var lang = document.getElementById("input_lang" + model_type).value;
|
428 |
-
var length = document.getElementById("input_length" + model_type).value;
|
429 |
-
var noise = document.getElementById("input_noise" + model_type).value;
|
430 |
-
var noisew = document.getElementById("input_noisew" + model_type).value;
|
431 |
-
var max = document.getElementById("input_max" + model_type).value;
|
432 |
-
|
433 |
-
if (model_type == 1) {
|
434 |
-
var url = baseUrl + "/voice/vits?text=" + text + "&id=" + id;
|
435 |
-
var streaming = document.getElementById('streaming' + model_type);
|
436 |
-
} else if (model_type == 2) {
|
437 |
-
var emotion = document.getElementById('emotion').value;
|
438 |
-
var url = baseUrl + "/voice/w2v2-vits?text=" + text + "&id=" + id + "&emotion=" + emotion;
|
439 |
-
} else if (model_type == 3) {
|
440 |
-
var sdp_ratio = document.getElementById("input_sdp_ratio").value;
|
441 |
-
var url = baseUrl + "/voice/bert-vits2?text=" + text + "&id=" + id;
|
442 |
-
}
|
443 |
-
if (format != "") {
|
444 |
-
url += "&format=" + format;
|
445 |
-
}
|
446 |
-
if (lang != "") {
|
447 |
-
url += "&lang=" + lang;
|
448 |
-
}
|
449 |
-
if (length != "") {
|
450 |
-
url += "&length=" + length;
|
451 |
-
}
|
452 |
-
if (noise != "") {
|
453 |
-
url += "&noise=" + noise;
|
454 |
-
}
|
455 |
-
if (noisew != "") {
|
456 |
-
url += "&noisew=" + noisew;
|
457 |
-
}
|
458 |
-
if (max != "") {
|
459 |
-
url += "&max=" + max;
|
460 |
-
}
|
461 |
-
if (model_type == 1 && streaming.checked) {
|
462 |
-
url += '&streaming=true';
|
463 |
-
}
|
464 |
-
if (model_type == 3 && sdp_ratio != "") {
|
465 |
-
url += "&sdp_ratio=" + sdp_ratio;
|
466 |
-
}
|
467 |
-
|
468 |
-
return url;
|
469 |
-
}
|
470 |
-
|
471 |
-
function updateLink() {
|
472 |
-
var url = getLink();
|
473 |
-
var link = document.getElementById("vits_link");
|
474 |
-
link.href = url;
|
475 |
-
link.textContent = url;
|
476 |
-
}
|
477 |
-
|
478 |
-
function setAudioSource() {
|
479 |
-
if (model_type == 1 && !vits_status) {
|
480 |
-
alert("未加载VITS模型");
|
481 |
-
return;
|
482 |
-
}
|
483 |
-
if (model_type == 2 && !w2v2_status) {
|
484 |
-
alert("未加载W2V2-VITS模型");
|
485 |
-
return;
|
486 |
-
}
|
487 |
-
if (model_type == 3 && !bert_vits2_status) {
|
488 |
-
alert("未加载Bert-VITS2模型");
|
489 |
-
return;
|
490 |
-
}
|
491 |
-
var url = getLink();
|
492 |
-
|
493 |
-
// Add a timestamp parameter to prevent browser caching
|
494 |
-
var timestamp = new Date().getTime();
|
495 |
-
url += '&t=' + timestamp;
|
496 |
-
|
497 |
-
var audioPlayer = document.getElementById("audioPlayer" + model_type);
|
498 |
-
audioPlayer.src = url;
|
499 |
-
audioPlayer.play();
|
500 |
-
}
|
501 |
-
|
502 |
-
function showContent(index) {
|
503 |
-
const panes = document.querySelectorAll(".content-pane");
|
504 |
-
const buttons = document.querySelectorAll(".tab-button");
|
505 |
-
model_type = index + 1;
|
506 |
-
|
507 |
-
for (let i = 0; i < panes.length; i++) {
|
508 |
-
if (i === index) {
|
509 |
-
panes[i].classList.add("active");
|
510 |
-
buttons[i].classList.add("active");
|
511 |
-
|
512 |
-
} else {
|
513 |
-
panes[i].classList.remove("active");
|
514 |
-
buttons[i].classList.remove("active");
|
515 |
-
}
|
516 |
-
}
|
517 |
-
updateLink();
|
518 |
-
}
|
519 |
-
|
520 |
-
document.querySelectorAll('.slider-group').forEach(function (group) {
|
521 |
-
group.addEventListener("input", function (event) {
|
522 |
-
if (event.target.matches('.slider')) {
|
523 |
-
let value = event.target.value;
|
524 |
-
group.querySelector('.slider-input').value = value;
|
525 |
-
group.querySelector('.slider-value').textContent = value;
|
526 |
-
} else if (event.target.matches('.slider-input')) {
|
527 |
-
let value = event.target.value;
|
528 |
-
group.querySelector('.slider').value = value;
|
529 |
-
group.querySelector('.slider-value').textContent = value;
|
530 |
-
}
|
531 |
-
});
|
532 |
-
});
|
533 |
-
</script>
|
534 |
-
</body>
|
535 |
-
</html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AsakuraMizu/moe-tts/app.py
DELETED
@@ -1,320 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import json
|
3 |
-
import os
|
4 |
-
import re
|
5 |
-
import tempfile
|
6 |
-
from pathlib import Path
|
7 |
-
|
8 |
-
import librosa
|
9 |
-
import numpy as np
|
10 |
-
import torch
|
11 |
-
from torch import no_grad, LongTensor
|
12 |
-
import commons
|
13 |
-
import utils
|
14 |
-
import gradio as gr
|
15 |
-
import gradio.utils as gr_utils
|
16 |
-
import gradio.processing_utils as gr_processing_utils
|
17 |
-
from models import SynthesizerTrn
|
18 |
-
from text import text_to_sequence, _clean_text
|
19 |
-
from mel_processing import spectrogram_torch
|
20 |
-
|
21 |
-
limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces
|
22 |
-
|
23 |
-
audio_postprocess_ori = gr.Audio.postprocess
|
24 |
-
|
25 |
-
|
26 |
-
def audio_postprocess(self, y):
|
27 |
-
data = audio_postprocess_ori(self, y)
|
28 |
-
if data is None:
|
29 |
-
return None
|
30 |
-
return gr_processing_utils.encode_url_or_file_to_base64(data["name"])
|
31 |
-
|
32 |
-
|
33 |
-
gr.Audio.postprocess = audio_postprocess
|
34 |
-
|
35 |
-
|
36 |
-
def get_text(text, hps, is_symbol):
|
37 |
-
text_norm = text_to_sequence(text, hps.symbols, [] if is_symbol else hps.data.text_cleaners)
|
38 |
-
if hps.data.add_blank:
|
39 |
-
text_norm = commons.intersperse(text_norm, 0)
|
40 |
-
text_norm = LongTensor(text_norm)
|
41 |
-
return text_norm
|
42 |
-
|
43 |
-
|
44 |
-
def create_tts_fn(model, hps, speaker_ids):
|
45 |
-
def tts_fn(text, speaker, speed, is_symbol):
|
46 |
-
if limitation:
|
47 |
-
text_len = len(re.sub("\[([A-Z]{2})\]", "", text))
|
48 |
-
max_len = 150
|
49 |
-
if is_symbol:
|
50 |
-
max_len *= 3
|
51 |
-
if text_len > max_len:
|
52 |
-
return "Error: Text is too long", None
|
53 |
-
|
54 |
-
speaker_id = speaker_ids[speaker]
|
55 |
-
stn_tst = get_text(text, hps, is_symbol)
|
56 |
-
with no_grad():
|
57 |
-
x_tst = stn_tst.unsqueeze(0).to(device)
|
58 |
-
x_tst_lengths = LongTensor([stn_tst.size(0)]).to(device)
|
59 |
-
sid = LongTensor([speaker_id]).to(device)
|
60 |
-
audio = model.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8,
|
61 |
-
length_scale=1.0 / speed)[0][0, 0].data.cpu().float().numpy()
|
62 |
-
del stn_tst, x_tst, x_tst_lengths, sid
|
63 |
-
return "Success", (hps.data.sampling_rate, audio)
|
64 |
-
|
65 |
-
return tts_fn
|
66 |
-
|
67 |
-
|
68 |
-
def create_vc_fn(model, hps, speaker_ids):
|
69 |
-
def vc_fn(original_speaker, target_speaker, input_audio):
|
70 |
-
if input_audio is None:
|
71 |
-
return "You need to upload an audio", None
|
72 |
-
sampling_rate, audio = input_audio
|
73 |
-
duration = audio.shape[0] / sampling_rate
|
74 |
-
if limitation and duration > 30:
|
75 |
-
return "Error: Audio is too long", None
|
76 |
-
original_speaker_id = speaker_ids[original_speaker]
|
77 |
-
target_speaker_id = speaker_ids[target_speaker]
|
78 |
-
|
79 |
-
audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
|
80 |
-
if len(audio.shape) > 1:
|
81 |
-
audio = librosa.to_mono(audio.transpose(1, 0))
|
82 |
-
if sampling_rate != hps.data.sampling_rate:
|
83 |
-
audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=hps.data.sampling_rate)
|
84 |
-
with no_grad():
|
85 |
-
y = torch.FloatTensor(audio)
|
86 |
-
y = y.unsqueeze(0)
|
87 |
-
spec = spectrogram_torch(y, hps.data.filter_length,
|
88 |
-
hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length,
|
89 |
-
center=False).to(device)
|
90 |
-
spec_lengths = LongTensor([spec.size(-1)]).to(device)
|
91 |
-
sid_src = LongTensor([original_speaker_id]).to(device)
|
92 |
-
sid_tgt = LongTensor([target_speaker_id]).to(device)
|
93 |
-
audio = model.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt)[0][
|
94 |
-
0, 0].data.cpu().float().numpy()
|
95 |
-
del y, spec, spec_lengths, sid_src, sid_tgt
|
96 |
-
return "Success", (hps.data.sampling_rate, audio)
|
97 |
-
|
98 |
-
return vc_fn
|
99 |
-
|
100 |
-
|
101 |
-
def create_soft_vc_fn(model, hps, speaker_ids):
|
102 |
-
def soft_vc_fn(target_speaker, input_audio1, input_audio2):
|
103 |
-
input_audio = input_audio1
|
104 |
-
if input_audio is None:
|
105 |
-
input_audio = input_audio2
|
106 |
-
if input_audio is None:
|
107 |
-
return "You need to upload an audio", None
|
108 |
-
sampling_rate, audio = input_audio
|
109 |
-
duration = audio.shape[0] / sampling_rate
|
110 |
-
if limitation and duration > 30:
|
111 |
-
return "Error: Audio is too long", None
|
112 |
-
target_speaker_id = speaker_ids[target_speaker]
|
113 |
-
|
114 |
-
audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
|
115 |
-
if len(audio.shape) > 1:
|
116 |
-
audio = librosa.to_mono(audio.transpose(1, 0))
|
117 |
-
if sampling_rate != 16000:
|
118 |
-
audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
|
119 |
-
with torch.inference_mode():
|
120 |
-
units = hubert.units(torch.FloatTensor(audio).unsqueeze(0).unsqueeze(0).to(device))
|
121 |
-
with no_grad():
|
122 |
-
unit_lengths = LongTensor([units.size(1)]).to(device)
|
123 |
-
sid = LongTensor([target_speaker_id]).to(device)
|
124 |
-
audio = model.infer(units, unit_lengths, sid=sid, noise_scale=.667,
|
125 |
-
noise_scale_w=0.8)[0][0, 0].data.cpu().float().numpy()
|
126 |
-
del units, unit_lengths, sid
|
127 |
-
return "Success", (hps.data.sampling_rate, audio)
|
128 |
-
|
129 |
-
return soft_vc_fn
|
130 |
-
|
131 |
-
|
132 |
-
def create_to_symbol_fn(hps):
|
133 |
-
def to_symbol_fn(is_symbol_input, input_text, temp_text):
|
134 |
-
return (_clean_text(input_text, hps.data.text_cleaners), input_text) if is_symbol_input \
|
135 |
-
else (temp_text, temp_text)
|
136 |
-
|
137 |
-
return to_symbol_fn
|
138 |
-
|
139 |
-
|
140 |
-
download_audio_js = """
|
141 |
-
() =>{{
|
142 |
-
let root = document.querySelector("body > gradio-app");
|
143 |
-
if (root.shadowRoot != null)
|
144 |
-
root = root.shadowRoot;
|
145 |
-
let audio = root.querySelector("#{audio_id}").querySelector("audio");
|
146 |
-
if (audio == undefined)
|
147 |
-
return;
|
148 |
-
audio = audio.src;
|
149 |
-
let oA = document.createElement("a");
|
150 |
-
oA.download = Math.floor(Math.random()*100000000)+'.wav';
|
151 |
-
oA.href = audio;
|
152 |
-
document.body.appendChild(oA);
|
153 |
-
oA.click();
|
154 |
-
oA.remove();
|
155 |
-
}}
|
156 |
-
"""
|
157 |
-
|
158 |
-
if __name__ == '__main__':
|
159 |
-
parser = argparse.ArgumentParser()
|
160 |
-
parser.add_argument('--device', type=str, default='cpu')
|
161 |
-
parser.add_argument("--share", action="store_true", default=False, help="share gradio app")
|
162 |
-
args = parser.parse_args()
|
163 |
-
|
164 |
-
device = torch.device(args.device)
|
165 |
-
models_tts = []
|
166 |
-
models_vc = []
|
167 |
-
models_soft_vc = []
|
168 |
-
with open("saved_model/info.json", "r", encoding="utf-8") as f:
|
169 |
-
models_info = json.load(f)
|
170 |
-
for i, info in models_info.items():
|
171 |
-
name = info["title"]
|
172 |
-
author = info["author"]
|
173 |
-
lang = info["lang"]
|
174 |
-
example = info["example"]
|
175 |
-
config_path = f"saved_model/{i}/config.json"
|
176 |
-
model_path = f"saved_model/{i}/model.pth"
|
177 |
-
cover = info["cover"]
|
178 |
-
cover_path = f"saved_model/{i}/{cover}" if cover else None
|
179 |
-
hps = utils.get_hparams_from_file(config_path)
|
180 |
-
model = SynthesizerTrn(
|
181 |
-
len(hps.symbols),
|
182 |
-
hps.data.filter_length // 2 + 1,
|
183 |
-
hps.train.segment_size // hps.data.hop_length,
|
184 |
-
n_speakers=hps.data.n_speakers,
|
185 |
-
**hps.model)
|
186 |
-
utils.load_checkpoint(model_path, model, None)
|
187 |
-
model.eval().to(device)
|
188 |
-
speaker_ids = [sid for sid, name in enumerate(hps.speakers) if name != "None"]
|
189 |
-
speakers = [name for sid, name in enumerate(hps.speakers) if name != "None"]
|
190 |
-
|
191 |
-
t = info["type"]
|
192 |
-
if t == "vits":
|
193 |
-
models_tts.append((name, author, cover_path, speakers, lang, example,
|
194 |
-
hps.symbols, create_tts_fn(model, hps, speaker_ids),
|
195 |
-
create_to_symbol_fn(hps)))
|
196 |
-
models_vc.append((name, author, cover_path, speakers, create_vc_fn(model, hps, speaker_ids)))
|
197 |
-
elif t == "soft-vits-vc":
|
198 |
-
models_soft_vc.append((name, author, cover_path, speakers, create_soft_vc_fn(model, hps, speaker_ids)))
|
199 |
-
|
200 |
-
hubert = torch.hub.load("bshall/hubert:main", "hubert_soft", trust_repo=True).to(device)
|
201 |
-
|
202 |
-
app = gr.Blocks()
|
203 |
-
|
204 |
-
with app:
|
205 |
-
gr.Markdown("# Moe TTS And Voice Conversion Using VITS Model\n\n"
|
206 |
-
"\n\n"
|
207 |
-
"[Open In Colab]"
|
208 |
-
"(https://colab.research.google.com/drive/14Pb8lpmwZL-JI5Ub6jpG4sz2-8KS0kbS?usp=sharing)"
|
209 |
-
" without queue and length limitation.\n\n"
|
210 |
-
"Feel free to [open discussion](https://huggingface.co/spaces/skytnt/moe-tts/discussions/new) "
|
211 |
-
"if you want to add your model to this app.")
|
212 |
-
with gr.Tabs():
|
213 |
-
with gr.TabItem("TTS"):
|
214 |
-
with gr.Tabs():
|
215 |
-
for i, (name, author, cover_path, speakers, lang, example, symbols, tts_fn,
|
216 |
-
to_symbol_fn) in enumerate(models_tts):
|
217 |
-
with gr.TabItem(f"model{i}"):
|
218 |
-
with gr.Column():
|
219 |
-
cover_markdown = f"\n\n" if cover_path else ""
|
220 |
-
gr.Markdown(f"## {name}\n\n"
|
221 |
-
f"{cover_markdown}"
|
222 |
-
f"model author: {author}\n\n"
|
223 |
-
f"language: {lang}")
|
224 |
-
tts_input1 = gr.TextArea(label="Text (150 words limitation)", value=example,
|
225 |
-
elem_id=f"tts-input{i}")
|
226 |
-
tts_input2 = gr.Dropdown(label="Speaker", choices=speakers,
|
227 |
-
type="index", value=speakers[0])
|
228 |
-
tts_input3 = gr.Slider(label="Speed", value=1, minimum=0.5, maximum=2, step=0.1)
|
229 |
-
with gr.Accordion(label="Advanced Options", open=False):
|
230 |
-
temp_text_var = gr.Variable()
|
231 |
-
symbol_input = gr.Checkbox(value=False, label="Symbol input")
|
232 |
-
symbol_list = gr.Dataset(label="Symbol list", components=[tts_input1],
|
233 |
-
samples=[[x] for x in symbols],
|
234 |
-
elem_id=f"symbol-list{i}")
|
235 |
-
symbol_list_json = gr.Json(value=symbols, visible=False)
|
236 |
-
tts_submit = gr.Button("Generate", variant="primary")
|
237 |
-
tts_output1 = gr.Textbox(label="Output Message")
|
238 |
-
tts_output2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio{i}")
|
239 |
-
download = gr.Button("Download Audio")
|
240 |
-
download.click(None, [], [], _js=download_audio_js.format(audio_id=f"tts-audio{i}"))
|
241 |
-
|
242 |
-
tts_submit.click(tts_fn, [tts_input1, tts_input2, tts_input3, symbol_input],
|
243 |
-
[tts_output1, tts_output2], api_name=f"tts-model{i}")
|
244 |
-
symbol_input.change(to_symbol_fn,
|
245 |
-
[symbol_input, tts_input1, temp_text_var],
|
246 |
-
[tts_input1, temp_text_var])
|
247 |
-
symbol_list.click(None, [symbol_list, symbol_list_json], [],
|
248 |
-
_js=f"""
|
249 |
-
(i,symbols) => {{
|
250 |
-
let root = document.querySelector("body > gradio-app");
|
251 |
-
if (root.shadowRoot != null)
|
252 |
-
root = root.shadowRoot;
|
253 |
-
let text_input = root.querySelector("#tts-input{i}").querySelector("textarea");
|
254 |
-
let startPos = text_input.selectionStart;
|
255 |
-
let endPos = text_input.selectionEnd;
|
256 |
-
let oldTxt = text_input.value;
|
257 |
-
let result = oldTxt.substring(0, startPos) + symbols[i] + oldTxt.substring(endPos);
|
258 |
-
text_input.value = result;
|
259 |
-
let x = window.scrollX, y = window.scrollY;
|
260 |
-
text_input.focus();
|
261 |
-
text_input.selectionStart = startPos + symbols[i].length;
|
262 |
-
text_input.selectionEnd = startPos + symbols[i].length;
|
263 |
-
text_input.blur();
|
264 |
-
window.scrollTo(x, y);
|
265 |
-
return [];
|
266 |
-
}}""")
|
267 |
-
|
268 |
-
with gr.TabItem("Voice Conversion"):
|
269 |
-
with gr.Tabs():
|
270 |
-
for i, (name, author, cover_path, speakers, vc_fn) in enumerate(models_vc):
|
271 |
-
with gr.TabItem(f"model{i}"):
|
272 |
-
cover_markdown = f"\n\n" if cover_path else ""
|
273 |
-
gr.Markdown(f"## {name}\n\n"
|
274 |
-
f"{cover_markdown}"
|
275 |
-
f"model author: {author}")
|
276 |
-
vc_input1 = gr.Dropdown(label="Original Speaker", choices=speakers, type="index",
|
277 |
-
value=speakers[0])
|
278 |
-
vc_input2 = gr.Dropdown(label="Target Speaker", choices=speakers, type="index",
|
279 |
-
value=speakers[min(len(speakers) - 1, 1)])
|
280 |
-
vc_input3 = gr.Audio(label="Input Audio (30s limitation)")
|
281 |
-
vc_submit = gr.Button("Convert", variant="primary")
|
282 |
-
vc_output1 = gr.Textbox(label="Output Message")
|
283 |
-
vc_output2 = gr.Audio(label="Output Audio", elem_id=f"vc-audio{i}")
|
284 |
-
download = gr.Button("Download Audio")
|
285 |
-
download.click(None, [], [], _js=download_audio_js.format(audio_id=f"vc-audio{i}"))
|
286 |
-
vc_submit.click(vc_fn, [vc_input1, vc_input2, vc_input3], [vc_output1, vc_output2], api_name=f"vc-model{i}")
|
287 |
-
with gr.TabItem("Soft Voice Conversion"):
|
288 |
-
with gr.Tabs():
|
289 |
-
for i, (name, author, cover_path, speakers, soft_vc_fn) in enumerate(models_soft_vc):
|
290 |
-
with gr.TabItem(f"model{i}"):
|
291 |
-
cover_markdown = f"\n\n" if cover_path else ""
|
292 |
-
gr.Markdown(f"## {name}\n\n"
|
293 |
-
f"{cover_markdown}"
|
294 |
-
f"model author: {author}")
|
295 |
-
vc_input1 = gr.Dropdown(label="Target Speaker", choices=speakers, type="index",
|
296 |
-
value=speakers[0])
|
297 |
-
source_tabs = gr.Tabs()
|
298 |
-
with source_tabs:
|
299 |
-
with gr.TabItem("microphone"):
|
300 |
-
vc_input2 = gr.Audio(label="Input Audio (30s limitation)", source="microphone")
|
301 |
-
with gr.TabItem("upload"):
|
302 |
-
vc_input3 = gr.Audio(label="Input Audio (30s limitation)", source="upload")
|
303 |
-
vc_submit = gr.Button("Convert", variant="primary")
|
304 |
-
vc_output1 = gr.Textbox(label="Output Message")
|
305 |
-
vc_output2 = gr.Audio(label="Output Audio", elem_id=f"svc-audio{i}")
|
306 |
-
download = gr.Button("Download Audio")
|
307 |
-
download.click(None, [], [], _js=download_audio_js.format(audio_id=f"svc-audio{i}"))
|
308 |
-
# clear inputs
|
309 |
-
source_tabs.set_event_trigger("change", None, [], [vc_input2, vc_input3],
|
310 |
-
js="()=>[null,null]")
|
311 |
-
vc_submit.click(soft_vc_fn, [vc_input1, vc_input2, vc_input3],
|
312 |
-
[vc_output1, vc_output2], api_name=f"svc-model{i}")
|
313 |
-
gr.Markdown(
|
314 |
-
"unofficial demo for \n\n"
|
315 |
-
"- [https://github.com/CjangCjengh/MoeGoe](https://github.com/CjangCjengh/MoeGoe)\n"
|
316 |
-
"- [https://github.com/Francis-Komizu/VITS](https://github.com/Francis-Komizu/VITS)\n"
|
317 |
-
"- [https://github.com/luoyily/MoeTTS](https://github.com/luoyily/MoeTTS)\n"
|
318 |
-
"- [https://github.com/Francis-Komizu/Sovits](https://github.com/Francis-Komizu/Sovits)"
|
319 |
-
)
|
320 |
-
app.queue(concurrency_count=3).launch(share=args.share)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ash123/stable-diffusion-nano/README.md
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Stable Diffusion Nano
|
3 |
-
emoji: 📊
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.28.3
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: creativeml-openrail-m
|
11 |
-
tags:
|
12 |
-
- jax-diffusers-event
|
13 |
-
- stable-diffusion
|
14 |
-
---
|
15 |
-
|
16 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/idna/uts46data.py
DELETED
The diff for this file is too large to render.
See raw diff
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/packaging/markers.py
DELETED
@@ -1,304 +0,0 @@
|
|
1 |
-
# This file is dual licensed under the terms of the Apache License, Version
|
2 |
-
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
3 |
-
# for complete details.
|
4 |
-
|
5 |
-
import operator
|
6 |
-
import os
|
7 |
-
import platform
|
8 |
-
import sys
|
9 |
-
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
10 |
-
|
11 |
-
from pip._vendor.pyparsing import ( # noqa: N817
|
12 |
-
Forward,
|
13 |
-
Group,
|
14 |
-
Literal as L,
|
15 |
-
ParseException,
|
16 |
-
ParseResults,
|
17 |
-
QuotedString,
|
18 |
-
ZeroOrMore,
|
19 |
-
stringEnd,
|
20 |
-
stringStart,
|
21 |
-
)
|
22 |
-
|
23 |
-
from .specifiers import InvalidSpecifier, Specifier
|
24 |
-
|
25 |
-
__all__ = [
|
26 |
-
"InvalidMarker",
|
27 |
-
"UndefinedComparison",
|
28 |
-
"UndefinedEnvironmentName",
|
29 |
-
"Marker",
|
30 |
-
"default_environment",
|
31 |
-
]
|
32 |
-
|
33 |
-
Operator = Callable[[str, str], bool]
|
34 |
-
|
35 |
-
|
36 |
-
class InvalidMarker(ValueError):
|
37 |
-
"""
|
38 |
-
An invalid marker was found, users should refer to PEP 508.
|
39 |
-
"""
|
40 |
-
|
41 |
-
|
42 |
-
class UndefinedComparison(ValueError):
|
43 |
-
"""
|
44 |
-
An invalid operation was attempted on a value that doesn't support it.
|
45 |
-
"""
|
46 |
-
|
47 |
-
|
48 |
-
class UndefinedEnvironmentName(ValueError):
|
49 |
-
"""
|
50 |
-
A name was attempted to be used that does not exist inside of the
|
51 |
-
environment.
|
52 |
-
"""
|
53 |
-
|
54 |
-
|
55 |
-
class Node:
|
56 |
-
def __init__(self, value: Any) -> None:
|
57 |
-
self.value = value
|
58 |
-
|
59 |
-
def __str__(self) -> str:
|
60 |
-
return str(self.value)
|
61 |
-
|
62 |
-
def __repr__(self) -> str:
|
63 |
-
return f"<{self.__class__.__name__}('{self}')>"
|
64 |
-
|
65 |
-
def serialize(self) -> str:
|
66 |
-
raise NotImplementedError
|
67 |
-
|
68 |
-
|
69 |
-
class Variable(Node):
|
70 |
-
def serialize(self) -> str:
|
71 |
-
return str(self)
|
72 |
-
|
73 |
-
|
74 |
-
class Value(Node):
|
75 |
-
def serialize(self) -> str:
|
76 |
-
return f'"{self}"'
|
77 |
-
|
78 |
-
|
79 |
-
class Op(Node):
|
80 |
-
def serialize(self) -> str:
|
81 |
-
return str(self)
|
82 |
-
|
83 |
-
|
84 |
-
VARIABLE = (
|
85 |
-
L("implementation_version")
|
86 |
-
| L("platform_python_implementation")
|
87 |
-
| L("implementation_name")
|
88 |
-
| L("python_full_version")
|
89 |
-
| L("platform_release")
|
90 |
-
| L("platform_version")
|
91 |
-
| L("platform_machine")
|
92 |
-
| L("platform_system")
|
93 |
-
| L("python_version")
|
94 |
-
| L("sys_platform")
|
95 |
-
| L("os_name")
|
96 |
-
| L("os.name") # PEP-345
|
97 |
-
| L("sys.platform") # PEP-345
|
98 |
-
| L("platform.version") # PEP-345
|
99 |
-
| L("platform.machine") # PEP-345
|
100 |
-
| L("platform.python_implementation") # PEP-345
|
101 |
-
| L("python_implementation") # undocumented setuptools legacy
|
102 |
-
| L("extra") # PEP-508
|
103 |
-
)
|
104 |
-
ALIASES = {
|
105 |
-
"os.name": "os_name",
|
106 |
-
"sys.platform": "sys_platform",
|
107 |
-
"platform.version": "platform_version",
|
108 |
-
"platform.machine": "platform_machine",
|
109 |
-
"platform.python_implementation": "platform_python_implementation",
|
110 |
-
"python_implementation": "platform_python_implementation",
|
111 |
-
}
|
112 |
-
VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
|
113 |
-
|
114 |
-
VERSION_CMP = (
|
115 |
-
L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
|
116 |
-
)
|
117 |
-
|
118 |
-
MARKER_OP = VERSION_CMP | L("not in") | L("in")
|
119 |
-
MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
|
120 |
-
|
121 |
-
MARKER_VALUE = QuotedString("'") | QuotedString('"')
|
122 |
-
MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
|
123 |
-
|
124 |
-
BOOLOP = L("and") | L("or")
|
125 |
-
|
126 |
-
MARKER_VAR = VARIABLE | MARKER_VALUE
|
127 |
-
|
128 |
-
MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
|
129 |
-
MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
|
130 |
-
|
131 |
-
LPAREN = L("(").suppress()
|
132 |
-
RPAREN = L(")").suppress()
|
133 |
-
|
134 |
-
MARKER_EXPR = Forward()
|
135 |
-
MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
|
136 |
-
MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
|
137 |
-
|
138 |
-
MARKER = stringStart + MARKER_EXPR + stringEnd
|
139 |
-
|
140 |
-
|
141 |
-
def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]:
|
142 |
-
if isinstance(results, ParseResults):
|
143 |
-
return [_coerce_parse_result(i) for i in results]
|
144 |
-
else:
|
145 |
-
return results
|
146 |
-
|
147 |
-
|
148 |
-
def _format_marker(
|
149 |
-
marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True
|
150 |
-
) -> str:
|
151 |
-
|
152 |
-
assert isinstance(marker, (list, tuple, str))
|
153 |
-
|
154 |
-
# Sometimes we have a structure like [[...]] which is a single item list
|
155 |
-
# where the single item is itself it's own list. In that case we want skip
|
156 |
-
# the rest of this function so that we don't get extraneous () on the
|
157 |
-
# outside.
|
158 |
-
if (
|
159 |
-
isinstance(marker, list)
|
160 |
-
and len(marker) == 1
|
161 |
-
and isinstance(marker[0], (list, tuple))
|
162 |
-
):
|
163 |
-
return _format_marker(marker[0])
|
164 |
-
|
165 |
-
if isinstance(marker, list):
|
166 |
-
inner = (_format_marker(m, first=False) for m in marker)
|
167 |
-
if first:
|
168 |
-
return " ".join(inner)
|
169 |
-
else:
|
170 |
-
return "(" + " ".join(inner) + ")"
|
171 |
-
elif isinstance(marker, tuple):
|
172 |
-
return " ".join([m.serialize() for m in marker])
|
173 |
-
else:
|
174 |
-
return marker
|
175 |
-
|
176 |
-
|
177 |
-
_operators: Dict[str, Operator] = {
|
178 |
-
"in": lambda lhs, rhs: lhs in rhs,
|
179 |
-
"not in": lambda lhs, rhs: lhs not in rhs,
|
180 |
-
"<": operator.lt,
|
181 |
-
"<=": operator.le,
|
182 |
-
"==": operator.eq,
|
183 |
-
"!=": operator.ne,
|
184 |
-
">=": operator.ge,
|
185 |
-
">": operator.gt,
|
186 |
-
}
|
187 |
-
|
188 |
-
|
189 |
-
def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
|
190 |
-
try:
|
191 |
-
spec = Specifier("".join([op.serialize(), rhs]))
|
192 |
-
except InvalidSpecifier:
|
193 |
-
pass
|
194 |
-
else:
|
195 |
-
return spec.contains(lhs)
|
196 |
-
|
197 |
-
oper: Optional[Operator] = _operators.get(op.serialize())
|
198 |
-
if oper is None:
|
199 |
-
raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.")
|
200 |
-
|
201 |
-
return oper(lhs, rhs)
|
202 |
-
|
203 |
-
|
204 |
-
class Undefined:
|
205 |
-
pass
|
206 |
-
|
207 |
-
|
208 |
-
_undefined = Undefined()
|
209 |
-
|
210 |
-
|
211 |
-
def _get_env(environment: Dict[str, str], name: str) -> str:
|
212 |
-
value: Union[str, Undefined] = environment.get(name, _undefined)
|
213 |
-
|
214 |
-
if isinstance(value, Undefined):
|
215 |
-
raise UndefinedEnvironmentName(
|
216 |
-
f"{name!r} does not exist in evaluation environment."
|
217 |
-
)
|
218 |
-
|
219 |
-
return value
|
220 |
-
|
221 |
-
|
222 |
-
def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool:
|
223 |
-
groups: List[List[bool]] = [[]]
|
224 |
-
|
225 |
-
for marker in markers:
|
226 |
-
assert isinstance(marker, (list, tuple, str))
|
227 |
-
|
228 |
-
if isinstance(marker, list):
|
229 |
-
groups[-1].append(_evaluate_markers(marker, environment))
|
230 |
-
elif isinstance(marker, tuple):
|
231 |
-
lhs, op, rhs = marker
|
232 |
-
|
233 |
-
if isinstance(lhs, Variable):
|
234 |
-
lhs_value = _get_env(environment, lhs.value)
|
235 |
-
rhs_value = rhs.value
|
236 |
-
else:
|
237 |
-
lhs_value = lhs.value
|
238 |
-
rhs_value = _get_env(environment, rhs.value)
|
239 |
-
|
240 |
-
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
|
241 |
-
else:
|
242 |
-
assert marker in ["and", "or"]
|
243 |
-
if marker == "or":
|
244 |
-
groups.append([])
|
245 |
-
|
246 |
-
return any(all(item) for item in groups)
|
247 |
-
|
248 |
-
|
249 |
-
def format_full_version(info: "sys._version_info") -> str:
|
250 |
-
version = "{0.major}.{0.minor}.{0.micro}".format(info)
|
251 |
-
kind = info.releaselevel
|
252 |
-
if kind != "final":
|
253 |
-
version += kind[0] + str(info.serial)
|
254 |
-
return version
|
255 |
-
|
256 |
-
|
257 |
-
def default_environment() -> Dict[str, str]:
|
258 |
-
iver = format_full_version(sys.implementation.version)
|
259 |
-
implementation_name = sys.implementation.name
|
260 |
-
return {
|
261 |
-
"implementation_name": implementation_name,
|
262 |
-
"implementation_version": iver,
|
263 |
-
"os_name": os.name,
|
264 |
-
"platform_machine": platform.machine(),
|
265 |
-
"platform_release": platform.release(),
|
266 |
-
"platform_system": platform.system(),
|
267 |
-
"platform_version": platform.version(),
|
268 |
-
"python_full_version": platform.python_version(),
|
269 |
-
"platform_python_implementation": platform.python_implementation(),
|
270 |
-
"python_version": ".".join(platform.python_version_tuple()[:2]),
|
271 |
-
"sys_platform": sys.platform,
|
272 |
-
}
|
273 |
-
|
274 |
-
|
275 |
-
class Marker:
|
276 |
-
def __init__(self, marker: str) -> None:
|
277 |
-
try:
|
278 |
-
self._markers = _coerce_parse_result(MARKER.parseString(marker))
|
279 |
-
except ParseException as e:
|
280 |
-
raise InvalidMarker(
|
281 |
-
f"Invalid marker: {marker!r}, parse error at "
|
282 |
-
f"{marker[e.loc : e.loc + 8]!r}"
|
283 |
-
)
|
284 |
-
|
285 |
-
def __str__(self) -> str:
|
286 |
-
return _format_marker(self._markers)
|
287 |
-
|
288 |
-
def __repr__(self) -> str:
|
289 |
-
return f"<Marker('{self}')>"
|
290 |
-
|
291 |
-
def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool:
|
292 |
-
"""Evaluate a marker.
|
293 |
-
|
294 |
-
Return the boolean from evaluating the given marker against the
|
295 |
-
environment. environment is an optional argument to override all or
|
296 |
-
part of the determined environment.
|
297 |
-
|
298 |
-
The environment is determined from the current Python process.
|
299 |
-
"""
|
300 |
-
current_environment = default_environment()
|
301 |
-
if environment is not None:
|
302 |
-
current_environment.update(environment)
|
303 |
-
|
304 |
-
return _evaluate_markers(self._markers, current_environment)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/highlighter.py
DELETED
@@ -1,232 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
from abc import ABC, abstractmethod
|
3 |
-
from typing import List, Union
|
4 |
-
|
5 |
-
from .text import Span, Text
|
6 |
-
|
7 |
-
|
8 |
-
def _combine_regex(*regexes: str) -> str:
|
9 |
-
"""Combine a number of regexes in to a single regex.
|
10 |
-
|
11 |
-
Returns:
|
12 |
-
str: New regex with all regexes ORed together.
|
13 |
-
"""
|
14 |
-
return "|".join(regexes)
|
15 |
-
|
16 |
-
|
17 |
-
class Highlighter(ABC):
|
18 |
-
"""Abstract base class for highlighters."""
|
19 |
-
|
20 |
-
def __call__(self, text: Union[str, Text]) -> Text:
|
21 |
-
"""Highlight a str or Text instance.
|
22 |
-
|
23 |
-
Args:
|
24 |
-
text (Union[str, ~Text]): Text to highlight.
|
25 |
-
|
26 |
-
Raises:
|
27 |
-
TypeError: If not called with text or str.
|
28 |
-
|
29 |
-
Returns:
|
30 |
-
Text: A test instance with highlighting applied.
|
31 |
-
"""
|
32 |
-
if isinstance(text, str):
|
33 |
-
highlight_text = Text(text)
|
34 |
-
elif isinstance(text, Text):
|
35 |
-
highlight_text = text.copy()
|
36 |
-
else:
|
37 |
-
raise TypeError(f"str or Text instance required, not {text!r}")
|
38 |
-
self.highlight(highlight_text)
|
39 |
-
return highlight_text
|
40 |
-
|
41 |
-
@abstractmethod
|
42 |
-
def highlight(self, text: Text) -> None:
|
43 |
-
"""Apply highlighting in place to text.
|
44 |
-
|
45 |
-
Args:
|
46 |
-
text (~Text): A text object highlight.
|
47 |
-
"""
|
48 |
-
|
49 |
-
|
50 |
-
class NullHighlighter(Highlighter):
|
51 |
-
"""A highlighter object that doesn't highlight.
|
52 |
-
|
53 |
-
May be used to disable highlighting entirely.
|
54 |
-
|
55 |
-
"""
|
56 |
-
|
57 |
-
def highlight(self, text: Text) -> None:
|
58 |
-
"""Nothing to do"""
|
59 |
-
|
60 |
-
|
61 |
-
class RegexHighlighter(Highlighter):
|
62 |
-
"""Applies highlighting from a list of regular expressions."""
|
63 |
-
|
64 |
-
highlights: List[str] = []
|
65 |
-
base_style: str = ""
|
66 |
-
|
67 |
-
def highlight(self, text: Text) -> None:
|
68 |
-
"""Highlight :class:`rich.text.Text` using regular expressions.
|
69 |
-
|
70 |
-
Args:
|
71 |
-
text (~Text): Text to highlighted.
|
72 |
-
|
73 |
-
"""
|
74 |
-
|
75 |
-
highlight_regex = text.highlight_regex
|
76 |
-
for re_highlight in self.highlights:
|
77 |
-
highlight_regex(re_highlight, style_prefix=self.base_style)
|
78 |
-
|
79 |
-
|
80 |
-
class ReprHighlighter(RegexHighlighter):
|
81 |
-
"""Highlights the text typically produced from ``__repr__`` methods."""
|
82 |
-
|
83 |
-
base_style = "repr."
|
84 |
-
highlights = [
|
85 |
-
r"(?P<tag_start><)(?P<tag_name>[-\w.:|]*)(?P<tag_contents>[\w\W]*)(?P<tag_end>>)",
|
86 |
-
r'(?P<attrib_name>[\w_]{1,50})=(?P<attrib_value>"?[\w_]+"?)?',
|
87 |
-
r"(?P<brace>[][{}()])",
|
88 |
-
_combine_regex(
|
89 |
-
r"(?P<ipv4>[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})",
|
90 |
-
r"(?P<ipv6>([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4})",
|
91 |
-
r"(?P<eui64>(?:[0-9A-Fa-f]{1,2}-){7}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{1,2}:){7}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{4}\.){3}[0-9A-Fa-f]{4})",
|
92 |
-
r"(?P<eui48>(?:[0-9A-Fa-f]{1,2}-){5}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{1,2}:){5}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{4}\.){2}[0-9A-Fa-f]{4})",
|
93 |
-
r"(?P<uuid>[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12})",
|
94 |
-
r"(?P<call>[\w.]*?)\(",
|
95 |
-
r"\b(?P<bool_true>True)\b|\b(?P<bool_false>False)\b|\b(?P<none>None)\b",
|
96 |
-
r"(?P<ellipsis>\.\.\.)",
|
97 |
-
r"(?P<number_complex>(?<!\w)(?:\-?[0-9]+\.?[0-9]*(?:e[-+]?\d+?)?)(?:[-+](?:[0-9]+\.?[0-9]*(?:e[-+]?\d+)?))?j)",
|
98 |
-
r"(?P<number>(?<!\w)\-?[0-9]+\.?[0-9]*(e[-+]?\d+?)?\b|0x[0-9a-fA-F]*)",
|
99 |
-
r"(?P<path>\B(/[-\w._+]+)*\/)(?P<filename>[-\w._+]*)?",
|
100 |
-
r"(?<![\\\w])(?P<str>b?'''.*?(?<!\\)'''|b?'.*?(?<!\\)'|b?\"\"\".*?(?<!\\)\"\"\"|b?\".*?(?<!\\)\")",
|
101 |
-
r"(?P<url>(file|https|http|ws|wss)://[-0-9a-zA-Z$_+!`(),.?/;:&=%#]*)",
|
102 |
-
),
|
103 |
-
]
|
104 |
-
|
105 |
-
|
106 |
-
class JSONHighlighter(RegexHighlighter):
|
107 |
-
"""Highlights JSON"""
|
108 |
-
|
109 |
-
# Captures the start and end of JSON strings, handling escaped quotes
|
110 |
-
JSON_STR = r"(?<![\\\w])(?P<str>b?\".*?(?<!\\)\")"
|
111 |
-
JSON_WHITESPACE = {" ", "\n", "\r", "\t"}
|
112 |
-
|
113 |
-
base_style = "json."
|
114 |
-
highlights = [
|
115 |
-
_combine_regex(
|
116 |
-
r"(?P<brace>[\{\[\(\)\]\}])",
|
117 |
-
r"\b(?P<bool_true>true)\b|\b(?P<bool_false>false)\b|\b(?P<null>null)\b",
|
118 |
-
r"(?P<number>(?<!\w)\-?[0-9]+\.?[0-9]*(e[\-\+]?\d+?)?\b|0x[0-9a-fA-F]*)",
|
119 |
-
JSON_STR,
|
120 |
-
),
|
121 |
-
]
|
122 |
-
|
123 |
-
def highlight(self, text: Text) -> None:
|
124 |
-
super().highlight(text)
|
125 |
-
|
126 |
-
# Additional work to handle highlighting JSON keys
|
127 |
-
plain = text.plain
|
128 |
-
append = text.spans.append
|
129 |
-
whitespace = self.JSON_WHITESPACE
|
130 |
-
for match in re.finditer(self.JSON_STR, plain):
|
131 |
-
start, end = match.span()
|
132 |
-
cursor = end
|
133 |
-
while cursor < len(plain):
|
134 |
-
char = plain[cursor]
|
135 |
-
cursor += 1
|
136 |
-
if char == ":":
|
137 |
-
append(Span(start, end, "json.key"))
|
138 |
-
elif char in whitespace:
|
139 |
-
continue
|
140 |
-
break
|
141 |
-
|
142 |
-
|
143 |
-
class ISO8601Highlighter(RegexHighlighter):
|
144 |
-
"""Highlights the ISO8601 date time strings.
|
145 |
-
Regex reference: https://www.oreilly.com/library/view/regular-expressions-cookbook/9781449327453/ch04s07.html
|
146 |
-
"""
|
147 |
-
|
148 |
-
base_style = "iso8601."
|
149 |
-
highlights = [
|
150 |
-
#
|
151 |
-
# Dates
|
152 |
-
#
|
153 |
-
# Calendar month (e.g. 2008-08). The hyphen is required
|
154 |
-
r"^(?P<year>[0-9]{4})-(?P<month>1[0-2]|0[1-9])$",
|
155 |
-
# Calendar date w/o hyphens (e.g. 20080830)
|
156 |
-
r"^(?P<date>(?P<year>[0-9]{4})(?P<month>1[0-2]|0[1-9])(?P<day>3[01]|0[1-9]|[12][0-9]))$",
|
157 |
-
# Ordinal date (e.g. 2008-243). The hyphen is optional
|
158 |
-
r"^(?P<date>(?P<year>[0-9]{4})-?(?P<day>36[0-6]|3[0-5][0-9]|[12][0-9]{2}|0[1-9][0-9]|00[1-9]))$",
|
159 |
-
#
|
160 |
-
# Weeks
|
161 |
-
#
|
162 |
-
# Week of the year (e.g., 2008-W35). The hyphen is optional
|
163 |
-
r"^(?P<date>(?P<year>[0-9]{4})-?W(?P<week>5[0-3]|[1-4][0-9]|0[1-9]))$",
|
164 |
-
# Week date (e.g., 2008-W35-6). The hyphens are optional
|
165 |
-
r"^(?P<date>(?P<year>[0-9]{4})-?W(?P<week>5[0-3]|[1-4][0-9]|0[1-9])-?(?P<day>[1-7]))$",
|
166 |
-
#
|
167 |
-
# Times
|
168 |
-
#
|
169 |
-
# Hours and minutes (e.g., 17:21). The colon is optional
|
170 |
-
r"^(?P<time>(?P<hour>2[0-3]|[01][0-9]):?(?P<minute>[0-5][0-9]))$",
|
171 |
-
# Hours, minutes, and seconds w/o colons (e.g., 172159)
|
172 |
-
r"^(?P<time>(?P<hour>2[0-3]|[01][0-9])(?P<minute>[0-5][0-9])(?P<second>[0-5][0-9]))$",
|
173 |
-
# Time zone designator (e.g., Z, +07 or +07:00). The colons and the minutes are optional
|
174 |
-
r"^(?P<timezone>(Z|[+-](?:2[0-3]|[01][0-9])(?::?(?:[0-5][0-9]))?))$",
|
175 |
-
# Hours, minutes, and seconds with time zone designator (e.g., 17:21:59+07:00).
|
176 |
-
# All the colons are optional. The minutes in the time zone designator are also optional
|
177 |
-
r"^(?P<time>(?P<hour>2[0-3]|[01][0-9])(?P<minute>[0-5][0-9])(?P<second>[0-5][0-9]))(?P<timezone>Z|[+-](?:2[0-3]|[01][0-9])(?::?(?:[0-5][0-9]))?)$",
|
178 |
-
#
|
179 |
-
# Date and Time
|
180 |
-
#
|
181 |
-
# Calendar date with hours, minutes, and seconds (e.g., 2008-08-30 17:21:59 or 20080830 172159).
|
182 |
-
# A space is required between the date and the time. The hyphens and colons are optional.
|
183 |
-
# This regex matches dates and times that specify some hyphens or colons but omit others.
|
184 |
-
# This does not follow ISO 8601
|
185 |
-
r"^(?P<date>(?P<year>[0-9]{4})(?P<hyphen>-)?(?P<month>1[0-2]|0[1-9])(?(hyphen)-)(?P<day>3[01]|0[1-9]|[12][0-9])) (?P<time>(?P<hour>2[0-3]|[01][0-9])(?(hyphen):)(?P<minute>[0-5][0-9])(?(hyphen):)(?P<second>[0-5][0-9]))$",
|
186 |
-
#
|
187 |
-
# XML Schema dates and times
|
188 |
-
#
|
189 |
-
# Date, with optional time zone (e.g., 2008-08-30 or 2008-08-30+07:00).
|
190 |
-
# Hyphens are required. This is the XML Schema 'date' type
|
191 |
-
r"^(?P<date>(?P<year>-?(?:[1-9][0-9]*)?[0-9]{4})-(?P<month>1[0-2]|0[1-9])-(?P<day>3[01]|0[1-9]|[12][0-9]))(?P<timezone>Z|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$",
|
192 |
-
# Time, with optional fractional seconds and time zone (e.g., 01:45:36 or 01:45:36.123+07:00).
|
193 |
-
# There is no limit on the number of digits for the fractional seconds. This is the XML Schema 'time' type
|
194 |
-
r"^(?P<time>(?P<hour>2[0-3]|[01][0-9]):(?P<minute>[0-5][0-9]):(?P<second>[0-5][0-9])(?P<frac>\.[0-9]+)?)(?P<timezone>Z|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$",
|
195 |
-
# Date and time, with optional fractional seconds and time zone (e.g., 2008-08-30T01:45:36 or 2008-08-30T01:45:36.123Z).
|
196 |
-
# This is the XML Schema 'dateTime' type
|
197 |
-
r"^(?P<date>(?P<year>-?(?:[1-9][0-9]*)?[0-9]{4})-(?P<month>1[0-2]|0[1-9])-(?P<day>3[01]|0[1-9]|[12][0-9]))T(?P<time>(?P<hour>2[0-3]|[01][0-9]):(?P<minute>[0-5][0-9]):(?P<second>[0-5][0-9])(?P<ms>\.[0-9]+)?)(?P<timezone>Z|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$",
|
198 |
-
]
|
199 |
-
|
200 |
-
|
201 |
-
if __name__ == "__main__": # pragma: no cover
|
202 |
-
from .console import Console
|
203 |
-
|
204 |
-
console = Console()
|
205 |
-
console.print("[bold green]hello world![/bold green]")
|
206 |
-
console.print("'[bold green]hello world![/bold green]'")
|
207 |
-
|
208 |
-
console.print(" /foo")
|
209 |
-
console.print("/foo/")
|
210 |
-
console.print("/foo/bar")
|
211 |
-
console.print("foo/bar/baz")
|
212 |
-
|
213 |
-
console.print("/foo/bar/baz?foo=bar+egg&egg=baz")
|
214 |
-
console.print("/foo/bar/baz/")
|
215 |
-
console.print("/foo/bar/baz/egg")
|
216 |
-
console.print("/foo/bar/baz/egg.py")
|
217 |
-
console.print("/foo/bar/baz/egg.py word")
|
218 |
-
console.print(" /foo/bar/baz/egg.py word")
|
219 |
-
console.print("foo /foo/bar/baz/egg.py word")
|
220 |
-
console.print("foo /foo/bar/ba._++z/egg+.py word")
|
221 |
-
console.print("https://example.org?foo=bar#header")
|
222 |
-
|
223 |
-
console.print(1234567.34)
|
224 |
-
console.print(1 / 2)
|
225 |
-
console.print(-1 / 123123123123)
|
226 |
-
|
227 |
-
console.print(
|
228 |
-
"127.0.1.1 bar 192.168.1.4 2001:0db8:85a3:0000:0000:8a2e:0370:7334 foo"
|
229 |
-
)
|
230 |
-
import json
|
231 |
-
|
232 |
-
console.print_json(json.dumps(obj={"name": "apple", "count": 1}), indent=None)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/actions.py
DELETED
@@ -1,207 +0,0 @@
|
|
1 |
-
# actions.py
|
2 |
-
|
3 |
-
from .exceptions import ParseException
|
4 |
-
from .util import col
|
5 |
-
|
6 |
-
|
7 |
-
class OnlyOnce:
|
8 |
-
"""
|
9 |
-
Wrapper for parse actions, to ensure they are only called once.
|
10 |
-
"""
|
11 |
-
|
12 |
-
def __init__(self, method_call):
|
13 |
-
from .core import _trim_arity
|
14 |
-
|
15 |
-
self.callable = _trim_arity(method_call)
|
16 |
-
self.called = False
|
17 |
-
|
18 |
-
def __call__(self, s, l, t):
|
19 |
-
if not self.called:
|
20 |
-
results = self.callable(s, l, t)
|
21 |
-
self.called = True
|
22 |
-
return results
|
23 |
-
raise ParseException(s, l, "OnlyOnce obj called multiple times w/out reset")
|
24 |
-
|
25 |
-
def reset(self):
|
26 |
-
"""
|
27 |
-
Allow the associated parse action to be called once more.
|
28 |
-
"""
|
29 |
-
|
30 |
-
self.called = False
|
31 |
-
|
32 |
-
|
33 |
-
def match_only_at_col(n):
|
34 |
-
"""
|
35 |
-
Helper method for defining parse actions that require matching at
|
36 |
-
a specific column in the input text.
|
37 |
-
"""
|
38 |
-
|
39 |
-
def verify_col(strg, locn, toks):
|
40 |
-
if col(locn, strg) != n:
|
41 |
-
raise ParseException(strg, locn, "matched token not at column {}".format(n))
|
42 |
-
|
43 |
-
return verify_col
|
44 |
-
|
45 |
-
|
46 |
-
def replace_with(repl_str):
|
47 |
-
"""
|
48 |
-
Helper method for common parse actions that simply return
|
49 |
-
a literal value. Especially useful when used with
|
50 |
-
:class:`transform_string<ParserElement.transform_string>` ().
|
51 |
-
|
52 |
-
Example::
|
53 |
-
|
54 |
-
num = Word(nums).set_parse_action(lambda toks: int(toks[0]))
|
55 |
-
na = one_of("N/A NA").set_parse_action(replace_with(math.nan))
|
56 |
-
term = na | num
|
57 |
-
|
58 |
-
term[1, ...].parse_string("324 234 N/A 234") # -> [324, 234, nan, 234]
|
59 |
-
"""
|
60 |
-
return lambda s, l, t: [repl_str]
|
61 |
-
|
62 |
-
|
63 |
-
def remove_quotes(s, l, t):
|
64 |
-
"""
|
65 |
-
Helper parse action for removing quotation marks from parsed
|
66 |
-
quoted strings.
|
67 |
-
|
68 |
-
Example::
|
69 |
-
|
70 |
-
# by default, quotation marks are included in parsed results
|
71 |
-
quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
|
72 |
-
|
73 |
-
# use remove_quotes to strip quotation marks from parsed results
|
74 |
-
quoted_string.set_parse_action(remove_quotes)
|
75 |
-
quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
|
76 |
-
"""
|
77 |
-
return t[0][1:-1]
|
78 |
-
|
79 |
-
|
80 |
-
def with_attribute(*args, **attr_dict):
|
81 |
-
"""
|
82 |
-
Helper to create a validating parse action to be used with start
|
83 |
-
tags created with :class:`make_xml_tags` or
|
84 |
-
:class:`make_html_tags`. Use ``with_attribute`` to qualify
|
85 |
-
a starting tag with a required attribute value, to avoid false
|
86 |
-
matches on common tags such as ``<TD>`` or ``<DIV>``.
|
87 |
-
|
88 |
-
Call ``with_attribute`` with a series of attribute names and
|
89 |
-
values. Specify the list of filter attributes names and values as:
|
90 |
-
|
91 |
-
- keyword arguments, as in ``(align="right")``, or
|
92 |
-
- as an explicit dict with ``**`` operator, when an attribute
|
93 |
-
name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}``
|
94 |
-
- a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))``
|
95 |
-
|
96 |
-
For attribute names with a namespace prefix, you must use the second
|
97 |
-
form. Attribute names are matched insensitive to upper/lower case.
|
98 |
-
|
99 |
-
If just testing for ``class`` (with or without a namespace), use
|
100 |
-
:class:`with_class`.
|
101 |
-
|
102 |
-
To verify that the attribute exists, but without specifying a value,
|
103 |
-
pass ``with_attribute.ANY_VALUE`` as the value.
|
104 |
-
|
105 |
-
Example::
|
106 |
-
|
107 |
-
html = '''
|
108 |
-
<div>
|
109 |
-
Some text
|
110 |
-
<div type="grid">1 4 0 1 0</div>
|
111 |
-
<div type="graph">1,3 2,3 1,1</div>
|
112 |
-
<div>this has no type</div>
|
113 |
-
</div>
|
114 |
-
|
115 |
-
'''
|
116 |
-
div,div_end = make_html_tags("div")
|
117 |
-
|
118 |
-
# only match div tag having a type attribute with value "grid"
|
119 |
-
div_grid = div().set_parse_action(with_attribute(type="grid"))
|
120 |
-
grid_expr = div_grid + SkipTo(div | div_end)("body")
|
121 |
-
for grid_header in grid_expr.search_string(html):
|
122 |
-
print(grid_header.body)
|
123 |
-
|
124 |
-
# construct a match with any div tag having a type attribute, regardless of the value
|
125 |
-
div_any_type = div().set_parse_action(with_attribute(type=with_attribute.ANY_VALUE))
|
126 |
-
div_expr = div_any_type + SkipTo(div | div_end)("body")
|
127 |
-
for div_header in div_expr.search_string(html):
|
128 |
-
print(div_header.body)
|
129 |
-
|
130 |
-
prints::
|
131 |
-
|
132 |
-
1 4 0 1 0
|
133 |
-
|
134 |
-
1 4 0 1 0
|
135 |
-
1,3 2,3 1,1
|
136 |
-
"""
|
137 |
-
if args:
|
138 |
-
attrs = args[:]
|
139 |
-
else:
|
140 |
-
attrs = attr_dict.items()
|
141 |
-
attrs = [(k, v) for k, v in attrs]
|
142 |
-
|
143 |
-
def pa(s, l, tokens):
|
144 |
-
for attrName, attrValue in attrs:
|
145 |
-
if attrName not in tokens:
|
146 |
-
raise ParseException(s, l, "no matching attribute " + attrName)
|
147 |
-
if attrValue != with_attribute.ANY_VALUE and tokens[attrName] != attrValue:
|
148 |
-
raise ParseException(
|
149 |
-
s,
|
150 |
-
l,
|
151 |
-
"attribute {!r} has value {!r}, must be {!r}".format(
|
152 |
-
attrName, tokens[attrName], attrValue
|
153 |
-
),
|
154 |
-
)
|
155 |
-
|
156 |
-
return pa
|
157 |
-
|
158 |
-
|
159 |
-
with_attribute.ANY_VALUE = object()
|
160 |
-
|
161 |
-
|
162 |
-
def with_class(classname, namespace=""):
|
163 |
-
"""
|
164 |
-
Simplified version of :class:`with_attribute` when
|
165 |
-
matching on a div class - made difficult because ``class`` is
|
166 |
-
a reserved word in Python.
|
167 |
-
|
168 |
-
Example::
|
169 |
-
|
170 |
-
html = '''
|
171 |
-
<div>
|
172 |
-
Some text
|
173 |
-
<div class="grid">1 4 0 1 0</div>
|
174 |
-
<div class="graph">1,3 2,3 1,1</div>
|
175 |
-
<div>this <div> has no class</div>
|
176 |
-
</div>
|
177 |
-
|
178 |
-
'''
|
179 |
-
div,div_end = make_html_tags("div")
|
180 |
-
div_grid = div().set_parse_action(with_class("grid"))
|
181 |
-
|
182 |
-
grid_expr = div_grid + SkipTo(div | div_end)("body")
|
183 |
-
for grid_header in grid_expr.search_string(html):
|
184 |
-
print(grid_header.body)
|
185 |
-
|
186 |
-
div_any_type = div().set_parse_action(with_class(withAttribute.ANY_VALUE))
|
187 |
-
div_expr = div_any_type + SkipTo(div | div_end)("body")
|
188 |
-
for div_header in div_expr.search_string(html):
|
189 |
-
print(div_header.body)
|
190 |
-
|
191 |
-
prints::
|
192 |
-
|
193 |
-
1 4 0 1 0
|
194 |
-
|
195 |
-
1 4 0 1 0
|
196 |
-
1,3 2,3 1,1
|
197 |
-
"""
|
198 |
-
classattr = "{}:class".format(namespace) if namespace else "class"
|
199 |
-
return with_attribute(**{classattr: classname})
|
200 |
-
|
201 |
-
|
202 |
-
# pre-PEP8 compatibility symbols
|
203 |
-
replaceWith = replace_with
|
204 |
-
removeQuotes = remove_quotes
|
205 |
-
withAttribute = with_attribute
|
206 |
-
withClass = with_class
|
207 |
-
matchOnlyAtCol = match_only_at_col
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/modeling/test_mmdet.py
DELETED
@@ -1,186 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import unittest
|
3 |
-
|
4 |
-
from detectron2.layers import ShapeSpec
|
5 |
-
from detectron2.modeling.mmdet_wrapper import MMDetBackbone, MMDetDetector
|
6 |
-
|
7 |
-
try:
|
8 |
-
import mmdet.models # noqa
|
9 |
-
|
10 |
-
HAS_MMDET = True
|
11 |
-
except ImportError:
|
12 |
-
HAS_MMDET = False
|
13 |
-
|
14 |
-
|
15 |
-
@unittest.skipIf(not HAS_MMDET, "mmdet not available")
|
16 |
-
class TestMMDetWrapper(unittest.TestCase):
|
17 |
-
def test_backbone(self):
|
18 |
-
MMDetBackbone(
|
19 |
-
backbone=dict(
|
20 |
-
type="DetectoRS_ResNet",
|
21 |
-
conv_cfg=dict(type="ConvAWS"),
|
22 |
-
sac=dict(type="SAC", use_deform=True),
|
23 |
-
stage_with_sac=(False, True, True, True),
|
24 |
-
depth=50,
|
25 |
-
num_stages=4,
|
26 |
-
out_indices=(0, 1, 2, 3),
|
27 |
-
frozen_stages=1,
|
28 |
-
norm_cfg=dict(type="BN", requires_grad=True),
|
29 |
-
norm_eval=True,
|
30 |
-
style="pytorch",
|
31 |
-
),
|
32 |
-
neck=dict(
|
33 |
-
type="FPN",
|
34 |
-
in_channels=[256, 512, 1024, 2048],
|
35 |
-
out_channels=256,
|
36 |
-
num_outs=5,
|
37 |
-
),
|
38 |
-
# skip pretrained model for tests
|
39 |
-
# pretrained_backbone="torchvision://resnet50",
|
40 |
-
output_shapes=[ShapeSpec(channels=256, stride=s) for s in [4, 8, 16, 32, 64]],
|
41 |
-
output_names=["p2", "p3", "p4", "p5", "p6"],
|
42 |
-
)
|
43 |
-
|
44 |
-
def test_detector(self):
|
45 |
-
# a basic R50 Mask R-CNN
|
46 |
-
MMDetDetector(
|
47 |
-
detector=dict(
|
48 |
-
type="MaskRCNN",
|
49 |
-
backbone=dict(
|
50 |
-
type="ResNet",
|
51 |
-
depth=50,
|
52 |
-
num_stages=4,
|
53 |
-
out_indices=(0, 1, 2, 3),
|
54 |
-
frozen_stages=1,
|
55 |
-
norm_cfg=dict(type="BN", requires_grad=True),
|
56 |
-
norm_eval=True,
|
57 |
-
style="pytorch",
|
58 |
-
# skip pretrained model for tests
|
59 |
-
# init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'))
|
60 |
-
),
|
61 |
-
neck=dict(
|
62 |
-
type="FPN", in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5
|
63 |
-
),
|
64 |
-
rpn_head=dict(
|
65 |
-
type="RPNHead",
|
66 |
-
in_channels=256,
|
67 |
-
feat_channels=256,
|
68 |
-
anchor_generator=dict(
|
69 |
-
type="AnchorGenerator",
|
70 |
-
scales=[8],
|
71 |
-
ratios=[0.5, 1.0, 2.0],
|
72 |
-
strides=[4, 8, 16, 32, 64],
|
73 |
-
),
|
74 |
-
bbox_coder=dict(
|
75 |
-
type="DeltaXYWHBBoxCoder",
|
76 |
-
target_means=[0.0, 0.0, 0.0, 0.0],
|
77 |
-
target_stds=[1.0, 1.0, 1.0, 1.0],
|
78 |
-
),
|
79 |
-
loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0),
|
80 |
-
loss_bbox=dict(type="L1Loss", loss_weight=1.0),
|
81 |
-
),
|
82 |
-
roi_head=dict(
|
83 |
-
type="StandardRoIHead",
|
84 |
-
bbox_roi_extractor=dict(
|
85 |
-
type="SingleRoIExtractor",
|
86 |
-
roi_layer=dict(type="RoIAlign", output_size=7, sampling_ratio=0),
|
87 |
-
out_channels=256,
|
88 |
-
featmap_strides=[4, 8, 16, 32],
|
89 |
-
),
|
90 |
-
bbox_head=dict(
|
91 |
-
type="Shared2FCBBoxHead",
|
92 |
-
in_channels=256,
|
93 |
-
fc_out_channels=1024,
|
94 |
-
roi_feat_size=7,
|
95 |
-
num_classes=80,
|
96 |
-
bbox_coder=dict(
|
97 |
-
type="DeltaXYWHBBoxCoder",
|
98 |
-
target_means=[0.0, 0.0, 0.0, 0.0],
|
99 |
-
target_stds=[0.1, 0.1, 0.2, 0.2],
|
100 |
-
),
|
101 |
-
reg_class_agnostic=False,
|
102 |
-
loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0),
|
103 |
-
loss_bbox=dict(type="L1Loss", loss_weight=1.0),
|
104 |
-
),
|
105 |
-
mask_roi_extractor=dict(
|
106 |
-
type="SingleRoIExtractor",
|
107 |
-
roi_layer=dict(type="RoIAlign", output_size=14, sampling_ratio=0),
|
108 |
-
out_channels=256,
|
109 |
-
featmap_strides=[4, 8, 16, 32],
|
110 |
-
),
|
111 |
-
mask_head=dict(
|
112 |
-
type="FCNMaskHead",
|
113 |
-
num_convs=4,
|
114 |
-
in_channels=256,
|
115 |
-
conv_out_channels=256,
|
116 |
-
num_classes=80,
|
117 |
-
loss_mask=dict(type="CrossEntropyLoss", use_mask=True, loss_weight=1.0),
|
118 |
-
),
|
119 |
-
),
|
120 |
-
# model training and testing settings
|
121 |
-
train_cfg=dict(
|
122 |
-
rpn=dict(
|
123 |
-
assigner=dict(
|
124 |
-
type="MaxIoUAssigner",
|
125 |
-
pos_iou_thr=0.7,
|
126 |
-
neg_iou_thr=0.3,
|
127 |
-
min_pos_iou=0.3,
|
128 |
-
match_low_quality=True,
|
129 |
-
ignore_iof_thr=-1,
|
130 |
-
),
|
131 |
-
sampler=dict(
|
132 |
-
type="RandomSampler",
|
133 |
-
num=256,
|
134 |
-
pos_fraction=0.5,
|
135 |
-
neg_pos_ub=-1,
|
136 |
-
add_gt_as_proposals=False,
|
137 |
-
),
|
138 |
-
allowed_border=-1,
|
139 |
-
pos_weight=-1,
|
140 |
-
debug=False,
|
141 |
-
),
|
142 |
-
rpn_proposal=dict(
|
143 |
-
nms_pre=2000,
|
144 |
-
max_per_img=1000,
|
145 |
-
nms=dict(type="nms", iou_threshold=0.7),
|
146 |
-
min_bbox_size=0,
|
147 |
-
),
|
148 |
-
rcnn=dict(
|
149 |
-
assigner=dict(
|
150 |
-
type="MaxIoUAssigner",
|
151 |
-
pos_iou_thr=0.5,
|
152 |
-
neg_iou_thr=0.5,
|
153 |
-
min_pos_iou=0.5,
|
154 |
-
match_low_quality=True,
|
155 |
-
ignore_iof_thr=-1,
|
156 |
-
),
|
157 |
-
sampler=dict(
|
158 |
-
type="RandomSampler",
|
159 |
-
num=512,
|
160 |
-
pos_fraction=0.25,
|
161 |
-
neg_pos_ub=-1,
|
162 |
-
add_gt_as_proposals=True,
|
163 |
-
),
|
164 |
-
mask_size=28,
|
165 |
-
pos_weight=-1,
|
166 |
-
debug=False,
|
167 |
-
),
|
168 |
-
),
|
169 |
-
test_cfg=dict(
|
170 |
-
rpn=dict(
|
171 |
-
nms_pre=1000,
|
172 |
-
max_per_img=1000,
|
173 |
-
nms=dict(type="nms", iou_threshold=0.7),
|
174 |
-
min_bbox_size=0,
|
175 |
-
),
|
176 |
-
rcnn=dict(
|
177 |
-
score_thr=0.05,
|
178 |
-
nms=dict(type="nms", iou_threshold=0.5),
|
179 |
-
max_per_img=100,
|
180 |
-
mask_thr_binary=0.5,
|
181 |
-
),
|
182 |
-
),
|
183 |
-
),
|
184 |
-
pixel_mean=[1, 2, 3],
|
185 |
-
pixel_std=[1, 2, 3],
|
186 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Cmo Hacer Un Android Sin Verificacin.md
DELETED
@@ -1,37 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Forza Horizon 5 APK Descargar para Android sin verificación</h1>
|
3 |
-
<p>Si eres un fan de los juegos de carreras, es posible que hayas oído hablar de <strong>Forza Horizon 5</strong>, la última entrega de la popular serie Forza. Este juego te permite explorar los vibrantes y diversos paisajes del mundo abierto de México con cientos de los mejores coches del mundo. Pero lo que si desea jugar este juego en su dispositivo Android sin pasar por el proceso de verificación? En este artículo, le mostraremos cómo descargar Forza Horizon 5 APK para Android sin verificación, y cuáles son los pros y los contras de hacerlo. </p>
|
4 |
-
<h2>¿Qué es Forza Horizon 5?</h2>
|
5 |
-
<h3>Una breve introducción al juego y sus características</h3>
|
6 |
-
<p>Forza Horizon 5 es un juego de carreras desarrollado por Playground Games y publicado por Xbox Game Studios. Es la quinta entrada principal en la sub-serie Forza Horizon, que es un spin-off de la serie Forza Motorsport. El juego fue lanzado el 8 de noviembre de 2021, para Windows, Xbox One, Xbox Series X/S y Xbox Cloud Gaming.</p>
|
7 |
-
<h2>Cómo hacer un Android sin verificación</h2><br /><p><b><b>DOWNLOAD</b> ►►► <a href="https://bltlly.com/2v6KZU">https://bltlly.com/2v6KZU</a></b></p><br /><br />
|
8 |
-
<p>El juego presenta un mundo abierto dinámico y en constante evolución ambientado en México, donde puedes dirigir expediciones impresionantes a través de varios terrenos, como desiertos, selvas, ciudades, ruinas, playas, cañones y volcanes. También puedes participar en cientos de desafíos que te recompensan por participar en las actividades que te gustan, como las carreras, la deriva, el retraso en el crecimiento, la exploración y más. También puedes personalizar tu propio personaje, colección de coches, lista de reproducción de música y sitio del festival. </p>
|
9 |
-
<p>El juego también es compatible con los modos multijugador en línea, donde puede formar equipo con otros jugadores y entrar en el Horizon árcade para una serie de desafíos divertidos y exagerados. También puedes crear tus propios eventos y compartirlos con la comunidad. Además, el juego ofrece dos expansiones que añaden nuevos coches, pistas y modos de juego: Hot Wheels y Rally.</p>
|
10 |
-
<h3>Los requisitos y disponibilidad del juego en diferentes plataformas</h3>
|
11 |
-
|
12 |
-
<ul>
|
13 |
-
<li>OS: Windows 10 versión 15063.0 o superior</li>
|
14 |
-
<li>Procesador: Intel i3-4170 @ 3.7Ghz o Intel i5-750 @ 2.67Ghz</li>
|
15 |
-
<li>Memoria: 8 GB RAM</li>
|
16 |
-
<li>Gráficos: NVidia GTX 650 Ti o AMD R7 250x</li>
|
17 |
-
<li>DirectX: Versión 12</li>
|
18 |
-
<li>Almacenamiento: 80 GB de espacio disponible</li>
|
19 |
-
</ul>
|
20 |
-
<p>Para jugar a Forza Horizon 5 en Xbox One o Xbox Series X/S, necesitas una suscripción Xbox Live Gold o una suscripción Xbox Game Pass Ultimate. También puedes jugar en tu dispositivo Android a través de Xbox Cloud Gaming, que requiere un controlador compatible y una conexión a Internet estable. </p>
|
21 |
-
<p>Puedes comprar Forza Horizon 5 de varias fuentes, como Steam, Xbox o Uptodown. Sin embargo, si desea descargar un archivo APK para Forza Horizon 5 para su versión actualizada y compatible con su dispositivo y Android. Un archivo APK es una versión no oficial y no verificada de una aplicación Android que se descarga desde un sitio web de terceros o de origen. Un archivo APK puede ser inseguro, desactualizado o incompatible con su dispositivo o versión de Android. </p>
|
22 |
-
<h3>¿Es legal descargar un archivo APK para Forza Horizon 5?</h3>
|
23 |
-
<p>Depende de las leyes y regulaciones de su país o región. En algunos lugares, puede ser legal descargar un archivo APK para Forza Horizon 5 siempre y cuando usted es dueño de una copia legítima del juego en otra plataforma. En otros lugares, puede ser ilegal descargar un archivo APK para Forza Horizon 5, ya que puede violar los derechos de propiedad intelectual del desarrollador o editor del juego. Por lo tanto, debe comprobar el estado legal de la descarga de un archivo APK para Forza Horizon 5 en su ubicación antes de hacerlo. </p>
|
24 |
-
<h3>¿Cómo puedo actualizar el archivo APK para Forza Horizon 5?</h3>
|
25 |
-
|
26 |
-
<h3>¿Cómo puedo desinstalar el archivo APK para Forza Horizon 5?</h3>
|
27 |
-
<p>Para desinstalar el archivo APK para Forza Horizon 5 desde su dispositivo, debe seguir estos pasos:</p>
|
28 |
-
<p></p>
|
29 |
-
<ol>
|
30 |
-
<li>Ir a Configuración > Aplicaciones y encontrar Forza Horizon 5 en la lista de aplicaciones instaladas. </li>
|
31 |
-
<li>Toque en Forza Horizon 5 y seleccione Desinstalar.</li>
|
32 |
-
<li>Confirme su acción y espere a que se complete el proceso de desinstalación. </li>
|
33 |
-
</ol>
|
34 |
-
<h3>¿Dónde puedo encontrar más información sobre Forza Horizon 5?</h3>
|
35 |
-
<p>Si quieres saber más sobre Forza Horizon 5, puedes visitar el sitio web oficial del juego, donde puedes encontrar noticias, trailers, capturas de pantalla, características y más. También puedes seguir las cuentas oficiales de redes sociales del juego, donde puedes obtener actualizaciones, consejos e interactuar con otros fans. También puedes ver vídeos de gameplay y reseñas en YouTube o Twitch, donde puedes ver cómo se ve el juego y cómo se juega en diferentes plataformas. </p> 64aa2da5cf<br />
|
36 |
-
<br />
|
37 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Chicos Stumble 2023 Apk.md
DELETED
@@ -1,75 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Descargar Stumble Guys 2023 APK: Cómo unirse a la fiesta en su dispositivo Android</h1>
|
3 |
-
<p>¿Te encanta jugar juegos de fiesta con tus amigos en línea? ¿Te gusta tropezar con diferentes niveles de caos y diversión? ¿Quieres experimentar el último juego knockout en tu dispositivo Android? Si respondiste sí a cualquiera de estas preguntas, entonces usted debe descargar Stumble Guys 2023 APK ahora mismo! </p>
|
4 |
-
<h2>descargar chicos stumble 2023 apk</h2><br /><p><b><b>DOWNLOAD</b> ->>->>->> <a href="https://bltlly.com/2v6MIK">https://bltlly.com/2v6MIK</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es Stumble Guys? </h2>
|
6 |
-
<p>Stumble Guys es un juego masivo de eliminación de fiesta multijugador con hasta 32 jugadores en línea. Puedes unirte ronda tras ronda de caos creciente para tropezar a través de diferentes niveles hasta que un vencedor sea coronado. También puedes invitar a tus amigos y competir contra millones de jugadores de todo el mundo. </p>
|
7 |
-
<h3>Un partido multijugador masivo juego knockout</h3>
|
8 |
-
<p>Stumble Guys no es tu típico juego de battle royale. Es más como un juego de fiesta donde tienes que correr, correr, deslizarte y esquivar a tus oponentes y obstáculos que se aproximan. Tienes que sobrevivir tanto como puedas y ser el último en pie. También puedes formar equipo con tus amigos y jugar en diferentes modos como 4v4, capturar la bandera o rey de la colina. </p>
|
9 |
-
<h3>Un diseño colorido y loco</h3>
|
10 |
-
<p>Stumble Guys tiene un diseño colorido y loco que te hará sonreír y reír. El juego tiene un estilo de dibujos animados que es brillante y alegre. Los niveles son variados y creativos, desde islas tropicales hasta montañas nevadas. Los personajes son lindos y divertidos, con diferentes trajes y accesorios. También puedes personalizar tu propio personaje con diferentes pieles, sombreros, gafas, zapatos y más. </p>
|
11 |
-
<p></p>
|
12 |
-
<h3>Un juego cómicamente físico</h3>
|
13 |
-
|
14 |
-
<h3>Muchas opciones de personalización</h3>
|
15 |
-
<p>Stumble Guys tiene muchas opciones de personalización que te harán destacar entre la multitud. Puede elegir entre cientos de pieles, sombreros, gafas, zapatos y otros artículos para crear su propio carácter único. También puede recoger tarjetas y pegatinas para desbloquear más artículos y recompensas. También puede consultar la tienda web para ofertas exclusivas y ofertas que solo están disponibles en el sitio web oficial. </p>
|
16 |
-
<h2>¿Por qué descargar Stumble Guys 2023 APK? </h2>
|
17 |
-
<p>Si ya eres un fan de Stumble Guys, es posible que se pregunte por qué debe descargar Stumble Guys 2023 APK en lugar de solo actualizar el juego de la Google Play Store. Bueno, hay algunas buenas razones por las que deberías hacer eso. </p>
|
18 |
-
<h3>La última versión del juego</h3>
|
19 |
-
<p>Stumble Guys 2023 APK es la última versión del juego que ha sido lanzado en junio de 2023. Tiene todas las nuevas características y mejoras que se han añadido al juego desde su lanzamiento en agosto de 2020. También tiene todas las correcciones de errores y optimizaciones que se han hecho para garantizar un juego suave y estable. </p>
|
20 |
-
<h3>Las nuevas características y mejoras</h3>
|
21 |
-
<p>Stumble Guys 2023 APK tiene algunas nuevas características y mejoras que harán que su experiencia de juego aún mejor. Algunos de ellos son:</p <ul>
|
22 |
-
<li>Un nuevo nivel llamado Stumble City que se inspira en el entorno urbano. Tienes que navegar por calles concurridas, rascacielos, subterráneos y parques mientras evitas autos, trenes, palomas y otros peligros. </li>
|
23 |
-
<li>Un nuevo modo llamado Stumble Royale que es un giro en el género clásico battle royale. Tienes que sobrevivir tanto como puedas en un mapa que se encoge mientras recoges armas, municiones y paquetes de salud. También puedes usar vehículos, trampas y explosivos para eliminar a tus enemigos. </li>
|
24 |
-
|
25 |
-
<li>Un nuevo sistema llamado Stumble Rewards que te da más incentivos para jugar y ganar. Puedes ganar monedas, gemas, tarjetas, pegatinas y otros objetos completando misiones diarias, desafíos semanales y eventos de temporada. También puede obtener recompensas de bonificación al ver anuncios, invitar a amigos o unirse al club web. </li>
|
26 |
-
</ul>
|
27 |
-
<h3>Ofertas y ofertas web exclusivas</h3>
|
28 |
-
<p>Stumble Guys 2023 APK también tiene algunas ofertas web exclusivas y ofertas que solo se puede obtener mediante la descarga del juego desde el sitio web oficial. Algunos de ellos son:</p>
|
29 |
-
<ul>
|
30 |
-
<li>Un Stumble Pass gratuito que te da acceso a contenido y funciones premium por un tiempo limitado. Puedes desbloquear más niveles, modos, pieles, sombreros, gafas, zapatos y otros artículos jugando el juego y ganando estrellas. También puede actualizar al Stumble Pass Plus para obtener más beneficios y recompensas. </li>
|
31 |
-
<li>Un descuento del 50% en el Stumble Bundle que le da una gran cantidad de monedas, gemas, tarjetas, pegatinas y otros artículos a un precio bajo. Puedes utilizarlos para comprar más pieles, sombreros, gafas, zapatos y otros artículos en la tienda web o en la tienda del juego. También puedes usarlos para desbloquear más niveles, modos, potenciadores y gadgets. </li>
|
32 |
-
<li>Una oferta especial en el Stumble Club que le da una membresía de por vida para un pago único. Puede disfrutar de acceso ilimitado a todo el contenido premium y características del juego sin ningún tipo de anuncios o interrupciones. También puede obtener actualizaciones exclusivas, noticias, consejos, trucos y secretos de los desarrolladores y la comunidad. </li>
|
33 |
-
</ul>
|
34 |
-
<h2>Cómo descargar Stumble Guys 2023 APK? </h2>
|
35 |
-
<p>Si usted está convencido de que Stumble Guys 2023 APK es la mejor versión del juego para usted, es posible que se pregunte cómo descargarlo en su dispositivo Android. Bueno, no es muy difícil si sigues estos sencillos pasos:</p>
|
36 |
-
<h3>Paso 1: Habilitar fuentes desconocidas en el dispositivo</h3>
|
37 |
-
|
38 |
-
<ol>
|
39 |
-
<li>Ir a la configuración de su dispositivo y toque en la seguridad o la privacidad. </li>
|
40 |
-
<li>Encontrar la opción que dice fuentes desconocidas o instalar aplicaciones desconocidas y alternar en. </li>
|
41 |
-
<li>Confirme su elección tocando OK o Permitir.</li>
|
42 |
-
</ol>
|
43 |
-
<h3>Paso 2: Encontrar una fuente confiable para el archivo APK</h3>
|
44 |
-
<p>El siguiente paso es encontrar una fuente confiable para el archivo APK de Stumble Guys 2023. Hay muchos sitios web que afirman ofrecer archivos APK para su descarga gratuita, pero no todos ellos son confiables o seguros. Algunos de ellos pueden contener malware o virus que pueden dañar tu dispositivo o robar tus datos. Para evitar esto:</p <ol>
|
45 |
-
<li>Vaya al sitio web oficial de Stumble Guys en <a href="">https://stumbleguys.com</a> y busque el botón de descarga. Esta es la fuente más segura y confiable para el archivo APK de Stumble Guys 2023. </li>
|
46 |
-
<li>Alternativamente, puede utilizar un sitio web de terceros de confianza que ofrece archivos APK para descargar. Algunos de los populares son APKPure, APKMirror y APKMonk. Asegúrese de comprobar las calificaciones, reseñas y comentarios de los usuarios antes de descargar cualquier archivo APK de estos sitios web. </li>
|
47 |
-
<li>Evite cualquier sitio web que le pida que llene encuestas, ingrese su información personal o descargue aplicaciones o software adicionales antes de darle el archivo APK. Estos son generalmente estafas o intentos de phishing que pueden comprometer su seguridad y privacidad. </li>
|
48 |
-
</ol>
|
49 |
-
<h3>Paso 3: Descargar e instalar el archivo APK</h3>
|
50 |
-
<p>Una vez que haya encontrado una fuente confiable para el archivo APK de Stumble Guys 2023, puede proceder a descargarlo e instalarlo en su dispositivo. Para hacer esto:</p>
|
51 |
-
<ol>
|
52 |
-
<li>Toque en el botón de descarga o enlace y esperar a que el archivo APK para ser descargado en su dispositivo. Puede comprobar el progreso de la descarga en la barra de notificaciones o en el navegador. </li>
|
53 |
-
<li>Una vez que la descarga se ha completado, toque en el archivo APK o abrirlo con su administrador de archivos. Puede ver un mensaje de advertencia que dice que este tipo de archivo puede dañar su dispositivo. Ignórelo y toque Instalar de todos modos o Confiar.</li>
|
54 |
-
|
55 |
-
</ol>
|
56 |
-
<h3>Paso 4: Iniciar el juego y disfrutar de</h3>
|
57 |
-
<p>Felicidades! Usted ha descargado con éxito e instalado Stumble Guys 2023 APK en su dispositivo Android. Ahora puede lanzar el juego y disfrutar de todas las nuevas características y mejoras que tiene para ofrecer. También puede unirse al club web y obtener actualizaciones exclusivas, noticias, consejos, trucos y secretos de los desarrolladores y la comunidad. </p>
|
58 |
-
<h2>Conclusión</h2>
|
59 |
-
<p>Stumble Guys es uno de los juegos de fiesta más divertidos y adictivos que puedes jugar en tu dispositivo Android. Se trata de un partido masivo multijugador knockout juego con hasta 32 jugadores en línea. Puedes unirte ronda tras ronda de caos creciente para tropezar a través de diferentes niveles hasta que un vencedor sea coronado. También puedes invitar a tus amigos y competir contra millones de jugadores de todo el mundo. </p>
|
60 |
-
<p>Si desea experimentar el último juego knockout en su dispositivo Android, usted debe descargar Stumble Guys 2023 APK ahora mismo. Es la última versión del juego que tiene todas las nuevas características y mejoras que se han añadido al juego desde su lanzamiento en agosto de 2020. También tiene algunas ofertas web exclusivas y ofertas que solo se pueden obtener mediante la descarga del juego desde el sitio web oficial. </p>
|
61 |
-
<p>Para descargar Stumble Guys 2023 APK, solo tienes que seguir estos sencillos pasos: habilitar fuentes desconocidas en su dispositivo, encontrar una fuente confiable para el archivo APK, descargar e instalar el archivo APK, y lanzar el juego y disfrutar. No es muy difícil si sigues estos pasos cuidadosamente. </p>
|
62 |
-
<p>Entonces, ¿qué estás esperando? Descargar Stumble Guys 2023 APK hoy y unirse a la fiesta en su dispositivo Android! </p>
|
63 |
-
<h2>Preguntas frecuentes</h2>
|
64 |
-
<p>Aquí están algunas de las preguntas más frecuentes sobre Stumble Guys 2023 APK:</p>
|
65 |
-
<h4>Q: ¿Es Stumble Guys 2023 APK seguro para descargar? </h4>
|
66 |
-
|
67 |
-
<h4>Q: ¿Es Stumble Guys 2023 APK libre para jugar? </h4>
|
68 |
-
<p>A: Sí, Stumble Guys 2023 APK es gratis para jugar. Puedes descargarlo e instalarlo en tu dispositivo sin pagar nada. Sin embargo, hay algunas compras en la aplicación y anuncios que pueden mejorar su experiencia de juego o apoyar a los desarrolladores. </p>
|
69 |
-
<h4>Q: ¿Cuáles son los requisitos para Stumble Guys 2023 APK? </h4>
|
70 |
-
<p>A: Para jugar Stumble Guys 2023 APK en su dispositivo, es necesario tener una versión de Android de 5.0 o superior y un mínimo de 100 MB de espacio de almacenamiento libre. También es necesario tener una conexión a Internet estable ya que el juego es solo en línea. </p <h4>Q: ¿Cómo puedo actualizar Stumble Guys 2023 APK? </h4>
|
71 |
-
<p>A: Para actualizar Stumble Guys 2023 APK, puede consultar el sitio web oficial o el sitio web de terceros donde descargó el archivo APK para cualquier versión nueva o actualizaciones. También puede habilitar la función de actualización automática en su dispositivo para recibir notificaciones de las actualizaciones automáticamente. Sin embargo, es posible que tenga que desinstalar y reinstalar el archivo APK cada vez que haya una actualización importante. </p>
|
72 |
-
<h4>Q: ¿Cómo puedo contactar a los desarrolladores de Stumble Guys 2023 APK? </h4>
|
73 |
-
<p>A: Si usted tiene alguna pregunta, comentarios, sugerencias, o problemas con respecto Stumble Guys 2023 APK, puede ponerse en contacto con los desarrolladores del juego enviando un correo electrónico a <a href="mailto:[email protected]">[email protected]</a> o visitando sus páginas de redes sociales en Facebook, Twitter, Instagram o YouTube. También puedes unirte a su servidor de Discord o a la comunidad de Reddit para chatear con otros jugadores y obtener más información y consejos sobre el juego. </p> 64aa2da5cf<br />
|
74 |
-
<br />
|
75 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Colegial Pelea Sin Sensor Apk.md
DELETED
@@ -1,74 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Descargar Colegio pelea sin sensor Apk: Una guía para los usuarios de Android</h1>
|
3 |
-
<p>Si estás buscando un juego divertido y realista que te permita experimentar la vida de un estudiante universitario, entonces deberías probar College Brawl. Este juego es una simulación de la vida en el campus, donde puedes hacer amigos, enemigos, romance y drama. También puedes personalizar tu personaje, elegir tu especialidad, unirte a clubes y participar en varias actividades. Sin embargo, hay un inconveniente: tienes que lidiar con los problemas y conflictos que surgen en tu universidad. Tienes que luchar, negociar o cooperar con otros estudiantes para sobrevivir y tener éxito en tu vida académica y social. </p>
|
4 |
-
<p>College Brawl es un juego que no es para los débiles de corazón. Contiene temas maduros, violencia, blasfemia y desnudez. Si no se siente cómodo con estos elementos, entonces es posible que desee saltarse este juego. Sin embargo, si usted está buscando una versión más realista y sin censura de la vida universitaria, entonces es posible que desee descargar College Brawl No Sensor Apk. Esta es una versión modificada del juego original que elimina la censura y añade más características y contenido. Puedes disfrutar del juego sin restricciones ni limitaciones. </p>
|
5 |
-
<h2>descargar colegial pelea sin sensor apk</h2><br /><p><b><b>DOWNLOAD</b> ✔ <a href="https://bltlly.com/2v6MG6">https://bltlly.com/2v6MG6</a></b></p><br /><br />
|
6 |
-
<p>En este artículo, le mostraremos cómo descargar College Brawl No Sensor Apk de una fuente de confianza, cómo instalarlo en su dispositivo Android, y cómo jugar y disfrutar de sus características. Sigue estos pasos cuidadosamente y podrás experimentar el mejor juego de simulación de vida universitaria. </p>
|
7 |
-
<h2>Cómo descargar College Brawl No Sensor Apk de una fuente de confianza</h2>
|
8 |
-
|
9 |
-
<p>Uno de los mejores sitios web que recomendamos para descargar College Brawl No Sensor Apk es [Bungdus.com]( 1 ). Este sitio web es conocido por proporcionar juegos y aplicaciones de alta calidad para usuarios de Android. Tiene una gran colección de juegos y aplicaciones que son probados y verificados por su equipo de expertos. Puede descargar College Brawl No Sensor Apk desde este sitio web sin ningún tipo de preocupaciones o molestias. </p>
|
10 |
-
<p>Para descargar College Brawl No Sensor Apk de [Bungdus.com]( 1 ), siga estos pasos:</p>
|
11 |
-
<ol>
|
12 |
-
<li>Abra su navegador web y vaya a [Bungdus.com]( 1 ). </li>
|
13 |
-
<li>En la página de inicio, escriba "Pelea de la universidad" en el cuadro de búsqueda y pulse enter. </li>
|
14 |
-
<li>De los resultados de la búsqueda, haga clic en el enlace que dice "Descargar College Brawl Mod Apk Nosensor Terbaru 2023". </li>
|
15 |
-
<li>En la siguiente página, desplácese hacia abajo hasta que vea un botón verde que dice "Descargar ahora". Haga clic en él. </li>
|
16 |
-
<li>Se abrirá una nueva pestaña con un temporizador de cuenta atrás. Espere unos segundos hasta que el temporizador llegue a cero. </li>
|
17 |
-
<li>Haga clic en el botón que dice "Descargar archivo" para comenzar a descargar el archivo apk. </li>
|
18 |
-
<li>Guarde el archivo apk en su ubicación preferida en su dispositivo. </li>
|
19 |
-
</ol>
|
20 |
-
<h2> <h2>Cómo instalar College Brawl No Sensor Apk en su dispositivo Android</h2>
|
21 |
-
<p>Después de haber descargado College Brawl No Sensor Apk de [Bungdus.com], es necesario instalarlo en su dispositivo Android. Sin embargo, antes de poder hacer eso, debe habilitar la instalación de aplicaciones de fuentes desconocidas en su dispositivo. Esto se debe a College Brawl No Sensor Apk no está disponible en el Google Play Store y se considera una aplicación de terceros. Por lo tanto, debe dar permiso a su dispositivo para instalarlo. </p>
|
22 |
-
<p>Para habilitar la instalación de aplicaciones de fuentes desconocidas en tu dispositivo Android, sigue estos pasos:</p>
|
23 |
-
<ol>
|
24 |
-
<li>Ir a la aplicación Configuración en su dispositivo y toque en Seguridad o Privacidad.</li>
|
25 |
-
<li> Encontrar la opción que dice "Fuentes desconocidas" o "Instalar aplicaciones desconocidas" y alternar en. </li>
|
26 |
-
|
27 |
-
</ol>
|
28 |
-
<p>Ahora, usted está listo para instalar College Brawl No Sensor Apk en su dispositivo. Para hacer eso, siga estos pasos:</p>
|
29 |
-
<ol>
|
30 |
-
<li>Busque el archivo apk que descargó de [Bungdus.com] y toque en él. </li>
|
31 |
-
<li>Aparecerá un mensaje de confirmación. Toque en Instalar para iniciar el proceso de instalación. </li>
|
32 |
-
<li>Espere unos minutos hasta que se complete la instalación. </li>
|
33 |
-
<li>Toque en Abrir para iniciar el juego o Listo para salir del instalador. </li>
|
34 |
-
</ol>
|
35 |
-
<h2>Cómo jugar Colegio pelea sin sensor Apk y disfrutar de sus características</h2>
|
36 |
-
<p>Felicidades! Usted ha instalado con éxito College Brawl No Sensor Apk en su dispositivo Android. Ahora, puedes jugar el juego y disfrutar de sus características. Aquí hay algunos consejos y trucos para ayudarte a empezar:</p>
|
37 |
-
<p></p>
|
38 |
-
<ul>
|
39 |
-
<li>Cuando inicies el juego por primera vez, se te pedirá que crees tu personaje. Puedes elegir tu género, nombre, apariencia y personalidad. También puedes personalizar tu ropa, accesorios y peinado. </li>
|
40 |
-
<li>Después de crear tu personaje, serás llevado al menú principal. Aquí, usted puede optar por iniciar un nuevo juego, cargar un juego guardado, o acceder a la configuración. También puedes ver tus estadísticas, inventario, logros y amigos. </li>
|
41 |
-
<li>Si empiezas un nuevo juego, se te pedirá que elijas tu especialidad. Puedes elegir entre diferentes campos de estudio, como artes, ciencias, negocios, ingeniería o derecho. Tu especialidad afectará tus clases, actividades y oportunidades profesionales. </li>
|
42 |
-
<li>También se le pedirá que elija su dormitorio. Puede elegir entre diferentes tipos de dormitorios, como mixto, de un solo sexo, de lujo o barato. Su dormitorio afectará su comodidad, privacidad y vida social. </li>
|
43 |
-
<li>Una vez que haya elegido su especialidad y dormitorio, comenzará su vida universitaria. Tendrá que equilibrar su vida académica, social y personal. Tendrás que asistir a clases, hacer tareas, tomar exámenes, unirte a clubes, hacer amigos, citas, fiestas, peleas y más. </li>
|
44 |
-
|
45 |
-
<li>Puede explorar el campus e interactuar con varios personajes y objetos. También puede usar su teléfono para acceder a varias aplicaciones y características. Puede llamar o enviar mensajes de texto a otros personajes, revisar su correo electrónico o cuentas de redes sociales, jugar juegos o ver videos en línea. </li>
|
46 |
-
</ul>
|
47 |
-
<h2>Conclusión: Resumir los principales puntos y beneficios de la descarga de College Brawl No Sensor Apk</h2>
|
48 |
-
<p>En conclusión, Colegio pelea sin sensor Apk es un juego que le permite experimentar la vida de un estudiante universitario de una manera realista y sin censura. Puedes crear tu propio personaje y personalizarlo según tus preferencias. Puede elegir su especialidad y dormitorio y dar forma a su vida académica y social. Puedes participar en varias actividades y eventos y tomar decisiones que afectarán tu futuro y tus relaciones. También puedes disfrutar del juego sin censura ni limitaciones. </p>
|
49 |
-
<p>Si desea descargar College Brawl No Sensor Apk gratis de una fuente de confianza, entonces usted debe visitar [Bungdus.com]. Este sitio web ofrece juegos y aplicaciones modded de alta calidad para usuarios de Android. Puede descargar College Brawl No Sensor Apk desde este sitio web sin ningún tipo de preocupaciones o molestias. </p>
|
50 |
-
<p>Esperamos que este artículo le ha ayudado a aprender cómo descargar College Brawl No Sensor Apk de [Bungdus.com], cómo instalarlo en su dispositivo Android, y cómo jugar y disfrutar de sus características. Si tiene alguna pregunta o comentario sobre este artículo o el juego o el sitio web, no dude en dejar un comentario a continuación. Nos encantaría saber de ti y ayudarte. ¡Gracias por leer y tener un gran día! </p>
|
51 |
-
<h2>Preguntas frecuentes</h2>
|
52 |
-
<p>Aquí están algunas de las preguntas más frecuentes sobre College Brawl No Sensor Apk:</p>
|
53 |
-
<h3>¿Cuál es la diferencia entre Pelea de Colegio y Pelea de Colegio Sin Sensor? </h3>
|
54 |
-
|
55 |
-
<p>College Brawl No Sensor es una versión modificada del juego que elimina la censura y añade más características y contenido. Es una versión más realista y sin censura de la vida universitaria. Puedes disfrutar del juego sin restricciones ni limitaciones. Por ejemplo, tiene escenas claras y detalladas, contenido completo y sin cortar, y funciones desbloqueadas. </p>
|
56 |
-
<h3>¿Es seguro y legal descargar College Brawl No Sensor Apk? </h3>
|
57 |
-
<p>College Brawl No Sensor Apk es seguro y legal para descargar, siempre y cuando se descarga desde una fuente de confianza como [Bungdus.com]. Este sitio web ofrece juegos y aplicaciones modded de alta calidad para usuarios de Android. Cuenta con un equipo de expertos que prueban y verifican los archivos apk antes de subirlos al sitio web. Puede descargar College Brawl No Sensor Apk desde este sitio web sin ningún tipo de preocupaciones o molestias. </p>
|
58 |
-
<p>Sin embargo, usted debe tener en cuenta que la descarga de College Brawl No Sensor Apk podría violar los términos y condiciones del desarrollador de juegos original. Por lo tanto, debe descargarlo y usarlo bajo su propio riesgo y discreción. No nos hacemos responsables de las consecuencias o daños que puedan producirse al descargar o usar College Brawl No Sensor Apk.</p>
|
59 |
-
<h3>¿Cuáles son los requisitos mínimos para ejecutar College Brawl No Sensor Apk en su dispositivo Android? </h3>
|
60 |
-
<p>Para ejecutar College Brawl No Sensor Apk en su dispositivo Android, es necesario tener los siguientes requisitos mínimos:</p>
|
61 |
-
<ul>
|
62 |
-
<li>Un dispositivo Android con la versión 4.4 o superior. </li>
|
63 |
-
<li>Una conexión a Internet estable. </li>
|
64 |
-
<li>Al menos 1 GB de espacio de almacenamiento libre. </li>
|
65 |
-
<li>Al menos 2 GB de RAM.</li>
|
66 |
-
</ul>
|
67 |
-
<p>Si su dispositivo cumple con estos requisitos, entonces usted debe ser capaz de ejecutar College Brawl No Sensor Apk sin problemas y sin ningún problema. </p>
|
68 |
-
<h3>¿Cómo puedo actualizar College Brawl No Sensor Apk a la última versión? </h3>
|
69 |
-
|
70 |
-
<p>Alternativamente, también puede buscar actualizaciones dentro del juego. Puede ir al menú de configuración y tocar el botón de actualización. Si hay una nueva actualización disponible, puedes descargarla directamente del juego e instalarla en tu dispositivo. </p>
|
71 |
-
<h3>¿Cómo puedo contactar con el desarrollador de College Brawl No Sensor Apk para obtener información o apoyo? </h3>
|
72 |
-
<p>Si desea ponerse en contacto con el desarrollador de College Brawl No Sensor Apk para obtener información o apoyo, puede hacerlo enviando un correo electrónico a [[email protected]]. También puede visitar su sitio web oficial en [collegebrawlnosensor.com] o sus cuentas de redes sociales en Facebook, Twitter, Instagram o YouTube. También puedes dejar un comentario en [Bungdus.com] o en este artículo y trataremos de reenviarlo a ellos. </p> 64aa2da5cf<br />
|
73 |
-
<br />
|
74 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Dungeon Quest Mod Apk.md
DELETED
@@ -1,64 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Descargar Dungeon Quest Mod Apk y disfrutar de una aventura de RPG lleno de botín</h1>
|
3 |
-
<p>Si estás buscando un juego de rol divertido y adictivo que puedas jugar sin conexión, entonces deberías probar Dungeon Quest. Este juego te llevará en un viaje épico para encontrar el mejor botín y derrotar a todos los enemigos en su camino. Y si usted quiere hacer su aventura aún más emocionante, se puede descargar Dungeon Quest mod apk y disfrutar de recursos ilimitados, compras gratis, y más. En este artículo, te diremos qué es Dungeon Quest, por qué deberías descargar su mod apk, cómo instalarlo y algunos consejos y trucos para jugarlo. </p>
|
4 |
-
<h2>¿Qué es Dungeon Quest? </h2>
|
5 |
-
<p>Dungeon Quest es un juego de rol de acción sin conexión que fue desarrollado por Shiny Box Games. Está disponible para dispositivos Android, iOS y Apple TV. Estas son algunas de las características de este juego:</p>
|
6 |
-
<h2>Descargar Dungeon Quest mod apk</h2><br /><p><b><b>DOWNLOAD</b> ✪ <a href="https://bltlly.com/2v6JdX">https://bltlly.com/2v6JdX</a></b></p><br /><br />
|
7 |
-
<h3>Un juego de rol de acción sin conexión para todos</h3>
|
8 |
-
<p>Puedes jugar a Dungeon Quest todo el tiempo que quieras sin contenido ni muros de pago. Usted no necesita una conexión a Internet para disfrutar de este juego, por lo que puede jugar en cualquier momento y en cualquier lugar. También puedes personalizar la apariencia, el equipo, las habilidades y los talentos de tu personaje para adaptarlos a tu estilo de juego. </p>
|
9 |
-
<h3>Un juego con botín aleatorio, mazmorras generadas y jefes legendarios</h3>
|
10 |
-
<p>En Dungeon Quest, nunca lucharás en la misma mazmorra dos veces. El juego tiene pisos generados aleatoriamente ilimitados que desafiarán tus habilidades y estrategia. También encontrarás increíbles botines aleatorios que puedes equipar y usar en combate. Y al final de cada acto, te enfrentarás a uno de los cuatro jefes legendarios que pondrán a prueba tu fuerza. </p>
|
11 |
-
<h3>Un juego con tres clases, sistema de elaboración, sistema de habilidades y sistema de mascotas</h3>
|
12 |
-
|
13 |
-
<h2>¿Por qué descargar Dungeon Quest mod apk? </h2>
|
14 |
-
<p>Dungeon Quest ya es un juego divertido y divertido, pero si quieres hacerlo aún mejor, puedes descargar su apk mod. Con este apk mod, puede obtener acceso a algunas características increíbles que harán que su juego más fácil y más emocionante. Estos son algunos de los beneficios de descargar Dungeon Quest mod apk:</p>
|
15 |
-
<h3>Consigue oro y cristales ilimitados para mejorar tu equipo y habilidades</h3>
|
16 |
-
<p>El oro y los cristales son las principales monedas en Dungeon Quest. Los necesitas para comprar objetos, mejorar tu equipo, desbloquear habilidades y mucho más. Con Dungeon Quest mod apk, puede obtener oro ilimitado y cristales que se puede utilizar tanto como quieras. Usted no tiene que preocuparse por quedarse sin recursos o moler por ellos. </p>
|
17 |
-
<h3>Obtén compras gratuitas y acceso a artículos y características premium</h3>
|
18 |
-
<p>Dungeon Quest tiene algunos elementos y características que requieren dinero real o compras en el juego. Por ejemplo, puedes comprar mascotas premium, disfraces, espacios de inventario y más. Con Dungeon Quest mod apk, usted puede obtener compras gratuitas y el acceso a todos los artículos premium y características sin gastar dinero. Puedes disfrutar de la experiencia completa del juego sin limitaciones. </p>
|
19 |
-
<h3>Obtén resistencia y salud ilimitadas para sobrevivir más tiempo en batallas</h3>
|
20 |
-
<p>La resistencia y la salud son vitales para tu supervivencia en Dungeon Quest. Necesitas resistencia para usar tus habilidades y habilidades, y necesitas salud para soportar el daño de los enemigos. Con Dungeon Quest mod apk, puede obtener la resistencia y la salud ilimitada que nunca se agotará. Puedes usar tus habilidades tanto como quieras y recibir tanto daño como puedas sin morir. </p>
|
21 |
-
<h2>Cómo descargar e instalar Dungeon Quest mod apk? </h2>
|
22 |
-
<p>Descargar e instalar Dungeon Quest mod apk es muy fácil y simple. Solo tienes que seguir estos pasos:</p>
|
23 |
-
<h3>Descargar el archivo apk mod de una fuente de confianza</h3>
|
24 |
-
|
25 |
-
<p><a href="">Descargar Dungeon Quest mod apk aquí</a></p>
|
26 |
-
<p></p>
|
27 |
-
<h3>Habilitar fuentes desconocidas en la configuración del dispositivo</h3>
|
28 |
-
<p>Lo siguiente que debe hacer es permitir que su dispositivo instale aplicaciones de fuentes desconocidas. Esto es porque Dungeon Quest mod apk no es de la tienda oficial de Google Play o App Store. Para hacer esto, vaya a la configuración del dispositivo, luego a la seguridad y habilite fuentes desconocidas. Esto le permitirá instalar aplicaciones desde fuentes externas. </p>
|
29 |
-
<h3>Instalar el archivo apk mod y lanzar el juego</h3>
|
30 |
-
<p>Lo último que tienes que hacer es instalar el archivo apk mod que has descargado. Busque el archivo en el almacenamiento del dispositivo y, a continuación, toque en él para iniciar el proceso de instalación. Siga las instrucciones de la pantalla y espere a que termine la instalación. Una vez que se hace, se puede iniciar el juego y disfrutar de Dungeon Quest mod apk. </p>
|
31 |
-
<h2>Consejos y trucos para jugar Dungeon Quest</h2>
|
32 |
-
<p>Dungeon Quest es un juego divertido y adictivo, pero también puede ser desafiante y complejo. Para ayudarte a empezar y mejorar tu juego, aquí hay algunos consejos y trucos que puedes usar:</p>
|
33 |
-
<h3>Prioriza la misión principal y completa misiones diarias para recompensas</h3>
|
34 |
-
<p>La misión principal es la mejor manera de progresar en Dungeon Quest. Te guiará a través de los diferentes actos, mazmorras y jefes del juego. También te recompensará con oro, cristales, equipo y más. Siempre debes seguir la misión principal y completarla lo antes posible. </p>
|
35 |
-
<p>Las misiones diarias son otra gran manera de ganar recompensas en Dungeon Quest. Son tareas sencillas que puedes hacer todos los días, como matar a un cierto número de enemigos, usar cierta habilidad o encontrar un determinado objeto. Te recompensarán con oro, cristales, piedras mitológicas y más. Siempre debes revisar tus misiones diarias y completarlas antes de que expiren. </p>
|
36 |
-
<h3>Enfócate en una clase y estudia a cada héroe para la mejor formación</h3>
|
37 |
-
|
38 |
-
<p>También debes estudiar a cada héroe que pertenece a tu clase. Cada héroe tiene un rol y una habilidad diferentes que pueden afectar tu juego. Por ejemplo, algunos héroes son buenos para hacer daño, mientras que otros son buenos para curar o pulir. Deberías aprender cómo funciona cada héroe y cómo utilizarlo eficazmente en combate. </p>
|
39 |
-
<h3>Únete a un gremio y usa mercenarios y mascotas para ayudarte en el combate</h3>
|
40 |
-
<p>Dungeon Quest no es un juego en solitario. Puedes unirte a un gremio e interactuar con otros jugadores que comparten tu pasión por el juego. Puedes chatear con ellos, comerciar con ellos o ayudarlos en sus misiones. También puedes participar en eventos y competiciones de gremios para obtener más recompensas y diversión. </p>
|
41 |
-
<p>También puedes usar mercenarios y mascotas para ayudarte en el combate. Los mercenarios son otros héroes que puedes contratar por una tarifa para unirte a tu aventura. Lucharán junto a ti y utilizarán sus habilidades para ayudarte a derrotar a los enemigos. Las mascotas son criaturas lindas que puedes adoptar o comprar para seguirte. También lucharán contigo y te proporcionarán bonificaciones o efectos pasivos. </p>
|
42 |
-
<h3>Progresa en la torre infinita y ponte a prueba con diferentes dificultades</h3>
|
43 |
-
<p>Dungeon Quest tiene un modo de torre infinita que te permite subir a una torre sin fin de pisos generados aleatoriamente. Cada piso tiene diferentes enemigos, trampas, rompecabezas y recompensas. Cuanto más alto vayas, más difícil será, pero mejor será el botín. También puede elegir diferentes dificultades para desafiarse y ganar más recompensas. El modo torre infinita es una gran manera de poner a prueba tus habilidades y divertirse en Dungeon Quest.</p>
|
44 |
-
<h2>Conclusión</h2>
|
45 |
-
|
46 |
-
<h2>Preguntas frecuentes</h2>
|
47 |
-
<p>Aquí están algunas de las preguntas más frecuentes sobre Dungeon Quest y su mod apk:</p>
|
48 |
-
<h3>Es Dungeon Quest mod apk seguro de usar? </h3>
|
49 |
-
<p>Sí, Dungeon Quest mod apk es seguro de usar siempre y cuando se descarga desde una fuente de confianza. Sin embargo, siempre debe tener cuidado al instalar aplicaciones de fuentes desconocidas, ya que pueden contener virus o malware que pueden dañar su dispositivo. También debe hacer una copia de seguridad de sus datos antes de instalar el apk mod, en caso de que algo salga mal. </p>
|
50 |
-
<h3>¿Me prohibirán por usar Dungeon Quest mod apk? </h3>
|
51 |
-
<p>No, usted no será prohibido para el uso de Dungeon Quest mod apk. Esto se debe a que Dungeon Quest es un juego fuera de línea que no requiere una conexión a Internet o una cuenta para jugar. Por lo tanto, no hay manera para los desarrolladores o los servidores de juegos para detectar o prohibir el uso de la apk mod. Puedes jugar sin preocupaciones. </p>
|
52 |
-
<h3>¿Puedo jugar Dungeon Quest con mis amigos? </h3>
|
53 |
-
<p>Sí, puedes jugar a Dungeon Quest con tus amigos. Aunque Dungeon Quest es un juego offline, tiene un modo multijugador que te permite jugar con otros jugadores online. Puedes unirte o crear una habitación e invitar a tus amigos a unirse a ti. También puedes chatear con ellos y cooperar con ellos en combate. Jugar a Dungeon Quest con tus amigos es muy divertido y gratificante. </p>
|
54 |
-
<h3>¿Cómo puedo actualizar Dungeon Quest mod apk? </h3>
|
55 |
-
<p>Para actualizar Dungeon Quest mod apk, es necesario descargar la última versión del archivo apk mod de la misma fuente que lo descargó de antes. A continuación, es necesario desinstalar la versión anterior de la apk mod e instalar el nuevo. También debe comprobar si la nueva versión del apk mod es compatible con su dispositivo y la versión del juego. </p>
|
56 |
-
<h3>¿Cuáles son algunos otros juegos como Dungeon Quest? </h3>
|
57 |
-
<p>Si te gusta Dungeon Quest, también te pueden gustar otros juegos similares. Algunos de estos juegos son:</p>
|
58 |
-
<ul>
|
59 |
-
|
60 |
-
<li>Eternium: Un juego clásico de hack-and-slash RPG con gráficos impresionantes, misiones impulsadas por historias y sistema de elaboración. </li>
|
61 |
-
<li>Nonstop Knight: Un juego de rol casual con mazmorras sin fin, botín y mejoras. </li>
|
62 |
-
</ul></p> 64aa2da5cf<br />
|
63 |
-
<br />
|
64 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BetterAPI/BetterChat_new/src/lib/server/abortedGenerations.ts
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
// Shouldn't be needed if we dove into sveltekit internals, see https://github.com/huggingface/chat-ui/pull/88#issuecomment-1523173850
|
2 |
-
|
3 |
-
import { setTimeout } from "node:timers/promises";
|
4 |
-
import { collections } from "./database";
|
5 |
-
|
6 |
-
let closed = false;
|
7 |
-
process.on("SIGINT", () => {
|
8 |
-
closed = true;
|
9 |
-
});
|
10 |
-
|
11 |
-
export let abortedGenerations: Map<string, Date> = new Map();
|
12 |
-
|
13 |
-
async function maintainAbortedGenerations() {
|
14 |
-
while (!closed) {
|
15 |
-
await setTimeout(1000);
|
16 |
-
|
17 |
-
try {
|
18 |
-
const aborts = await collections.abortedGenerations.find({}).sort({ createdAt: 1 }).toArray();
|
19 |
-
|
20 |
-
abortedGenerations = new Map(
|
21 |
-
aborts.map(({ conversationId, createdAt }) => [conversationId.toString(), createdAt])
|
22 |
-
);
|
23 |
-
} catch (err) {
|
24 |
-
console.error(err);
|
25 |
-
}
|
26 |
-
}
|
27 |
-
}
|
28 |
-
|
29 |
-
maintainAbortedGenerations();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/locators.py
DELETED
@@ -1,1300 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
#
|
3 |
-
# Copyright (C) 2012-2015 Vinay Sajip.
|
4 |
-
# Licensed to the Python Software Foundation under a contributor agreement.
|
5 |
-
# See LICENSE.txt and CONTRIBUTORS.txt.
|
6 |
-
#
|
7 |
-
|
8 |
-
import gzip
|
9 |
-
from io import BytesIO
|
10 |
-
import json
|
11 |
-
import logging
|
12 |
-
import os
|
13 |
-
import posixpath
|
14 |
-
import re
|
15 |
-
try:
|
16 |
-
import threading
|
17 |
-
except ImportError: # pragma: no cover
|
18 |
-
import dummy_threading as threading
|
19 |
-
import zlib
|
20 |
-
|
21 |
-
from . import DistlibException
|
22 |
-
from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
|
23 |
-
queue, quote, unescape, build_opener,
|
24 |
-
HTTPRedirectHandler as BaseRedirectHandler, text_type,
|
25 |
-
Request, HTTPError, URLError)
|
26 |
-
from .database import Distribution, DistributionPath, make_dist
|
27 |
-
from .metadata import Metadata, MetadataInvalidError
|
28 |
-
from .util import (cached_property, ensure_slash, split_filename, get_project_data,
|
29 |
-
parse_requirement, parse_name_and_version, ServerProxy,
|
30 |
-
normalize_name)
|
31 |
-
from .version import get_scheme, UnsupportedVersionError
|
32 |
-
from .wheel import Wheel, is_compatible
|
33 |
-
|
34 |
-
logger = logging.getLogger(__name__)
|
35 |
-
|
36 |
-
HASHER_HASH = re.compile(r'^(\w+)=([a-f0-9]+)')
|
37 |
-
CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
|
38 |
-
HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
|
39 |
-
DEFAULT_INDEX = 'https://pypi.org/pypi'
|
40 |
-
|
41 |
-
def get_all_distribution_names(url=None):
|
42 |
-
"""
|
43 |
-
Return all distribution names known by an index.
|
44 |
-
:param url: The URL of the index.
|
45 |
-
:return: A list of all known distribution names.
|
46 |
-
"""
|
47 |
-
if url is None:
|
48 |
-
url = DEFAULT_INDEX
|
49 |
-
client = ServerProxy(url, timeout=3.0)
|
50 |
-
try:
|
51 |
-
return client.list_packages()
|
52 |
-
finally:
|
53 |
-
client('close')()
|
54 |
-
|
55 |
-
class RedirectHandler(BaseRedirectHandler):
|
56 |
-
"""
|
57 |
-
A class to work around a bug in some Python 3.2.x releases.
|
58 |
-
"""
|
59 |
-
# There's a bug in the base version for some 3.2.x
|
60 |
-
# (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
|
61 |
-
# returns e.g. /abc, it bails because it says the scheme ''
|
62 |
-
# is bogus, when actually it should use the request's
|
63 |
-
# URL for the scheme. See Python issue #13696.
|
64 |
-
def http_error_302(self, req, fp, code, msg, headers):
|
65 |
-
# Some servers (incorrectly) return multiple Location headers
|
66 |
-
# (so probably same goes for URI). Use first header.
|
67 |
-
newurl = None
|
68 |
-
for key in ('location', 'uri'):
|
69 |
-
if key in headers:
|
70 |
-
newurl = headers[key]
|
71 |
-
break
|
72 |
-
if newurl is None: # pragma: no cover
|
73 |
-
return
|
74 |
-
urlparts = urlparse(newurl)
|
75 |
-
if urlparts.scheme == '':
|
76 |
-
newurl = urljoin(req.get_full_url(), newurl)
|
77 |
-
if hasattr(headers, 'replace_header'):
|
78 |
-
headers.replace_header(key, newurl)
|
79 |
-
else:
|
80 |
-
headers[key] = newurl
|
81 |
-
return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
|
82 |
-
headers)
|
83 |
-
|
84 |
-
http_error_301 = http_error_303 = http_error_307 = http_error_302
|
85 |
-
|
86 |
-
class Locator(object):
|
87 |
-
"""
|
88 |
-
A base class for locators - things that locate distributions.
|
89 |
-
"""
|
90 |
-
source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
|
91 |
-
binary_extensions = ('.egg', '.exe', '.whl')
|
92 |
-
excluded_extensions = ('.pdf',)
|
93 |
-
|
94 |
-
# A list of tags indicating which wheels you want to match. The default
|
95 |
-
# value of None matches against the tags compatible with the running
|
96 |
-
# Python. If you want to match other values, set wheel_tags on a locator
|
97 |
-
# instance to a list of tuples (pyver, abi, arch) which you want to match.
|
98 |
-
wheel_tags = None
|
99 |
-
|
100 |
-
downloadable_extensions = source_extensions + ('.whl',)
|
101 |
-
|
102 |
-
def __init__(self, scheme='default'):
|
103 |
-
"""
|
104 |
-
Initialise an instance.
|
105 |
-
:param scheme: Because locators look for most recent versions, they
|
106 |
-
need to know the version scheme to use. This specifies
|
107 |
-
the current PEP-recommended scheme - use ``'legacy'``
|
108 |
-
if you need to support existing distributions on PyPI.
|
109 |
-
"""
|
110 |
-
self._cache = {}
|
111 |
-
self.scheme = scheme
|
112 |
-
# Because of bugs in some of the handlers on some of the platforms,
|
113 |
-
# we use our own opener rather than just using urlopen.
|
114 |
-
self.opener = build_opener(RedirectHandler())
|
115 |
-
# If get_project() is called from locate(), the matcher instance
|
116 |
-
# is set from the requirement passed to locate(). See issue #18 for
|
117 |
-
# why this can be useful to know.
|
118 |
-
self.matcher = None
|
119 |
-
self.errors = queue.Queue()
|
120 |
-
|
121 |
-
def get_errors(self):
|
122 |
-
"""
|
123 |
-
Return any errors which have occurred.
|
124 |
-
"""
|
125 |
-
result = []
|
126 |
-
while not self.errors.empty(): # pragma: no cover
|
127 |
-
try:
|
128 |
-
e = self.errors.get(False)
|
129 |
-
result.append(e)
|
130 |
-
except self.errors.Empty:
|
131 |
-
continue
|
132 |
-
self.errors.task_done()
|
133 |
-
return result
|
134 |
-
|
135 |
-
def clear_errors(self):
|
136 |
-
"""
|
137 |
-
Clear any errors which may have been logged.
|
138 |
-
"""
|
139 |
-
# Just get the errors and throw them away
|
140 |
-
self.get_errors()
|
141 |
-
|
142 |
-
def clear_cache(self):
|
143 |
-
self._cache.clear()
|
144 |
-
|
145 |
-
def _get_scheme(self):
|
146 |
-
return self._scheme
|
147 |
-
|
148 |
-
def _set_scheme(self, value):
|
149 |
-
self._scheme = value
|
150 |
-
|
151 |
-
scheme = property(_get_scheme, _set_scheme)
|
152 |
-
|
153 |
-
def _get_project(self, name):
|
154 |
-
"""
|
155 |
-
For a given project, get a dictionary mapping available versions to Distribution
|
156 |
-
instances.
|
157 |
-
|
158 |
-
This should be implemented in subclasses.
|
159 |
-
|
160 |
-
If called from a locate() request, self.matcher will be set to a
|
161 |
-
matcher for the requirement to satisfy, otherwise it will be None.
|
162 |
-
"""
|
163 |
-
raise NotImplementedError('Please implement in the subclass')
|
164 |
-
|
165 |
-
def get_distribution_names(self):
|
166 |
-
"""
|
167 |
-
Return all the distribution names known to this locator.
|
168 |
-
"""
|
169 |
-
raise NotImplementedError('Please implement in the subclass')
|
170 |
-
|
171 |
-
def get_project(self, name):
|
172 |
-
"""
|
173 |
-
For a given project, get a dictionary mapping available versions to Distribution
|
174 |
-
instances.
|
175 |
-
|
176 |
-
This calls _get_project to do all the work, and just implements a caching layer on top.
|
177 |
-
"""
|
178 |
-
if self._cache is None: # pragma: no cover
|
179 |
-
result = self._get_project(name)
|
180 |
-
elif name in self._cache:
|
181 |
-
result = self._cache[name]
|
182 |
-
else:
|
183 |
-
self.clear_errors()
|
184 |
-
result = self._get_project(name)
|
185 |
-
self._cache[name] = result
|
186 |
-
return result
|
187 |
-
|
188 |
-
def score_url(self, url):
|
189 |
-
"""
|
190 |
-
Give an url a score which can be used to choose preferred URLs
|
191 |
-
for a given project release.
|
192 |
-
"""
|
193 |
-
t = urlparse(url)
|
194 |
-
basename = posixpath.basename(t.path)
|
195 |
-
compatible = True
|
196 |
-
is_wheel = basename.endswith('.whl')
|
197 |
-
is_downloadable = basename.endswith(self.downloadable_extensions)
|
198 |
-
if is_wheel:
|
199 |
-
compatible = is_compatible(Wheel(basename), self.wheel_tags)
|
200 |
-
return (t.scheme == 'https', 'pypi.org' in t.netloc,
|
201 |
-
is_downloadable, is_wheel, compatible, basename)
|
202 |
-
|
203 |
-
def prefer_url(self, url1, url2):
|
204 |
-
"""
|
205 |
-
Choose one of two URLs where both are candidates for distribution
|
206 |
-
archives for the same version of a distribution (for example,
|
207 |
-
.tar.gz vs. zip).
|
208 |
-
|
209 |
-
The current implementation favours https:// URLs over http://, archives
|
210 |
-
from PyPI over those from other locations, wheel compatibility (if a
|
211 |
-
wheel) and then the archive name.
|
212 |
-
"""
|
213 |
-
result = url2
|
214 |
-
if url1:
|
215 |
-
s1 = self.score_url(url1)
|
216 |
-
s2 = self.score_url(url2)
|
217 |
-
if s1 > s2:
|
218 |
-
result = url1
|
219 |
-
if result != url2:
|
220 |
-
logger.debug('Not replacing %r with %r', url1, url2)
|
221 |
-
else:
|
222 |
-
logger.debug('Replacing %r with %r', url1, url2)
|
223 |
-
return result
|
224 |
-
|
225 |
-
def split_filename(self, filename, project_name):
|
226 |
-
"""
|
227 |
-
Attempt to split a filename in project name, version and Python version.
|
228 |
-
"""
|
229 |
-
return split_filename(filename, project_name)
|
230 |
-
|
231 |
-
def convert_url_to_download_info(self, url, project_name):
|
232 |
-
"""
|
233 |
-
See if a URL is a candidate for a download URL for a project (the URL
|
234 |
-
has typically been scraped from an HTML page).
|
235 |
-
|
236 |
-
If it is, a dictionary is returned with keys "name", "version",
|
237 |
-
"filename" and "url"; otherwise, None is returned.
|
238 |
-
"""
|
239 |
-
def same_project(name1, name2):
|
240 |
-
return normalize_name(name1) == normalize_name(name2)
|
241 |
-
|
242 |
-
result = None
|
243 |
-
scheme, netloc, path, params, query, frag = urlparse(url)
|
244 |
-
if frag.lower().startswith('egg='): # pragma: no cover
|
245 |
-
logger.debug('%s: version hint in fragment: %r',
|
246 |
-
project_name, frag)
|
247 |
-
m = HASHER_HASH.match(frag)
|
248 |
-
if m:
|
249 |
-
algo, digest = m.groups()
|
250 |
-
else:
|
251 |
-
algo, digest = None, None
|
252 |
-
origpath = path
|
253 |
-
if path and path[-1] == '/': # pragma: no cover
|
254 |
-
path = path[:-1]
|
255 |
-
if path.endswith('.whl'):
|
256 |
-
try:
|
257 |
-
wheel = Wheel(path)
|
258 |
-
if not is_compatible(wheel, self.wheel_tags):
|
259 |
-
logger.debug('Wheel not compatible: %s', path)
|
260 |
-
else:
|
261 |
-
if project_name is None:
|
262 |
-
include = True
|
263 |
-
else:
|
264 |
-
include = same_project(wheel.name, project_name)
|
265 |
-
if include:
|
266 |
-
result = {
|
267 |
-
'name': wheel.name,
|
268 |
-
'version': wheel.version,
|
269 |
-
'filename': wheel.filename,
|
270 |
-
'url': urlunparse((scheme, netloc, origpath,
|
271 |
-
params, query, '')),
|
272 |
-
'python-version': ', '.join(
|
273 |
-
['.'.join(list(v[2:])) for v in wheel.pyver]),
|
274 |
-
}
|
275 |
-
except Exception as e: # pragma: no cover
|
276 |
-
logger.warning('invalid path for wheel: %s', path)
|
277 |
-
elif not path.endswith(self.downloadable_extensions): # pragma: no cover
|
278 |
-
logger.debug('Not downloadable: %s', path)
|
279 |
-
else: # downloadable extension
|
280 |
-
path = filename = posixpath.basename(path)
|
281 |
-
for ext in self.downloadable_extensions:
|
282 |
-
if path.endswith(ext):
|
283 |
-
path = path[:-len(ext)]
|
284 |
-
t = self.split_filename(path, project_name)
|
285 |
-
if not t: # pragma: no cover
|
286 |
-
logger.debug('No match for project/version: %s', path)
|
287 |
-
else:
|
288 |
-
name, version, pyver = t
|
289 |
-
if not project_name or same_project(project_name, name):
|
290 |
-
result = {
|
291 |
-
'name': name,
|
292 |
-
'version': version,
|
293 |
-
'filename': filename,
|
294 |
-
'url': urlunparse((scheme, netloc, origpath,
|
295 |
-
params, query, '')),
|
296 |
-
#'packagetype': 'sdist',
|
297 |
-
}
|
298 |
-
if pyver: # pragma: no cover
|
299 |
-
result['python-version'] = pyver
|
300 |
-
break
|
301 |
-
if result and algo:
|
302 |
-
result['%s_digest' % algo] = digest
|
303 |
-
return result
|
304 |
-
|
305 |
-
def _get_digest(self, info):
|
306 |
-
"""
|
307 |
-
Get a digest from a dictionary by looking at a "digests" dictionary
|
308 |
-
or keys of the form 'algo_digest'.
|
309 |
-
|
310 |
-
Returns a 2-tuple (algo, digest) if found, else None. Currently
|
311 |
-
looks only for SHA256, then MD5.
|
312 |
-
"""
|
313 |
-
result = None
|
314 |
-
if 'digests' in info:
|
315 |
-
digests = info['digests']
|
316 |
-
for algo in ('sha256', 'md5'):
|
317 |
-
if algo in digests:
|
318 |
-
result = (algo, digests[algo])
|
319 |
-
break
|
320 |
-
if not result:
|
321 |
-
for algo in ('sha256', 'md5'):
|
322 |
-
key = '%s_digest' % algo
|
323 |
-
if key in info:
|
324 |
-
result = (algo, info[key])
|
325 |
-
break
|
326 |
-
return result
|
327 |
-
|
328 |
-
def _update_version_data(self, result, info):
|
329 |
-
"""
|
330 |
-
Update a result dictionary (the final result from _get_project) with a
|
331 |
-
dictionary for a specific version, which typically holds information
|
332 |
-
gleaned from a filename or URL for an archive for the distribution.
|
333 |
-
"""
|
334 |
-
name = info.pop('name')
|
335 |
-
version = info.pop('version')
|
336 |
-
if version in result:
|
337 |
-
dist = result[version]
|
338 |
-
md = dist.metadata
|
339 |
-
else:
|
340 |
-
dist = make_dist(name, version, scheme=self.scheme)
|
341 |
-
md = dist.metadata
|
342 |
-
dist.digest = digest = self._get_digest(info)
|
343 |
-
url = info['url']
|
344 |
-
result['digests'][url] = digest
|
345 |
-
if md.source_url != info['url']:
|
346 |
-
md.source_url = self.prefer_url(md.source_url, url)
|
347 |
-
result['urls'].setdefault(version, set()).add(url)
|
348 |
-
dist.locator = self
|
349 |
-
result[version] = dist
|
350 |
-
|
351 |
-
def locate(self, requirement, prereleases=False):
|
352 |
-
"""
|
353 |
-
Find the most recent distribution which matches the given
|
354 |
-
requirement.
|
355 |
-
|
356 |
-
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
|
357 |
-
'foo (>= 1.0, < 2.0, != 1.3)'
|
358 |
-
:param prereleases: If ``True``, allow pre-release versions
|
359 |
-
to be located. Otherwise, pre-release versions
|
360 |
-
are not returned.
|
361 |
-
:return: A :class:`Distribution` instance, or ``None`` if no such
|
362 |
-
distribution could be located.
|
363 |
-
"""
|
364 |
-
result = None
|
365 |
-
r = parse_requirement(requirement)
|
366 |
-
if r is None: # pragma: no cover
|
367 |
-
raise DistlibException('Not a valid requirement: %r' % requirement)
|
368 |
-
scheme = get_scheme(self.scheme)
|
369 |
-
self.matcher = matcher = scheme.matcher(r.requirement)
|
370 |
-
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
|
371 |
-
versions = self.get_project(r.name)
|
372 |
-
if len(versions) > 2: # urls and digests keys are present
|
373 |
-
# sometimes, versions are invalid
|
374 |
-
slist = []
|
375 |
-
vcls = matcher.version_class
|
376 |
-
for k in versions:
|
377 |
-
if k in ('urls', 'digests'):
|
378 |
-
continue
|
379 |
-
try:
|
380 |
-
if not matcher.match(k):
|
381 |
-
pass # logger.debug('%s did not match %r', matcher, k)
|
382 |
-
else:
|
383 |
-
if prereleases or not vcls(k).is_prerelease:
|
384 |
-
slist.append(k)
|
385 |
-
# else:
|
386 |
-
# logger.debug('skipping pre-release '
|
387 |
-
# 'version %s of %s', k, matcher.name)
|
388 |
-
except Exception: # pragma: no cover
|
389 |
-
logger.warning('error matching %s with %r', matcher, k)
|
390 |
-
pass # slist.append(k)
|
391 |
-
if len(slist) > 1:
|
392 |
-
slist = sorted(slist, key=scheme.key)
|
393 |
-
if slist:
|
394 |
-
logger.debug('sorted list: %s', slist)
|
395 |
-
version = slist[-1]
|
396 |
-
result = versions[version]
|
397 |
-
if result:
|
398 |
-
if r.extras:
|
399 |
-
result.extras = r.extras
|
400 |
-
result.download_urls = versions.get('urls', {}).get(version, set())
|
401 |
-
d = {}
|
402 |
-
sd = versions.get('digests', {})
|
403 |
-
for url in result.download_urls:
|
404 |
-
if url in sd: # pragma: no cover
|
405 |
-
d[url] = sd[url]
|
406 |
-
result.digests = d
|
407 |
-
self.matcher = None
|
408 |
-
return result
|
409 |
-
|
410 |
-
|
411 |
-
class PyPIRPCLocator(Locator):
|
412 |
-
"""
|
413 |
-
This locator uses XML-RPC to locate distributions. It therefore
|
414 |
-
cannot be used with simple mirrors (that only mirror file content).
|
415 |
-
"""
|
416 |
-
def __init__(self, url, **kwargs):
|
417 |
-
"""
|
418 |
-
Initialise an instance.
|
419 |
-
|
420 |
-
:param url: The URL to use for XML-RPC.
|
421 |
-
:param kwargs: Passed to the superclass constructor.
|
422 |
-
"""
|
423 |
-
super(PyPIRPCLocator, self).__init__(**kwargs)
|
424 |
-
self.base_url = url
|
425 |
-
self.client = ServerProxy(url, timeout=3.0)
|
426 |
-
|
427 |
-
def get_distribution_names(self):
|
428 |
-
"""
|
429 |
-
Return all the distribution names known to this locator.
|
430 |
-
"""
|
431 |
-
return set(self.client.list_packages())
|
432 |
-
|
433 |
-
def _get_project(self, name):
|
434 |
-
result = {'urls': {}, 'digests': {}}
|
435 |
-
versions = self.client.package_releases(name, True)
|
436 |
-
for v in versions:
|
437 |
-
urls = self.client.release_urls(name, v)
|
438 |
-
data = self.client.release_data(name, v)
|
439 |
-
metadata = Metadata(scheme=self.scheme)
|
440 |
-
metadata.name = data['name']
|
441 |
-
metadata.version = data['version']
|
442 |
-
metadata.license = data.get('license')
|
443 |
-
metadata.keywords = data.get('keywords', [])
|
444 |
-
metadata.summary = data.get('summary')
|
445 |
-
dist = Distribution(metadata)
|
446 |
-
if urls:
|
447 |
-
info = urls[0]
|
448 |
-
metadata.source_url = info['url']
|
449 |
-
dist.digest = self._get_digest(info)
|
450 |
-
dist.locator = self
|
451 |
-
result[v] = dist
|
452 |
-
for info in urls:
|
453 |
-
url = info['url']
|
454 |
-
digest = self._get_digest(info)
|
455 |
-
result['urls'].setdefault(v, set()).add(url)
|
456 |
-
result['digests'][url] = digest
|
457 |
-
return result
|
458 |
-
|
459 |
-
class PyPIJSONLocator(Locator):
|
460 |
-
"""
|
461 |
-
This locator uses PyPI's JSON interface. It's very limited in functionality
|
462 |
-
and probably not worth using.
|
463 |
-
"""
|
464 |
-
def __init__(self, url, **kwargs):
|
465 |
-
super(PyPIJSONLocator, self).__init__(**kwargs)
|
466 |
-
self.base_url = ensure_slash(url)
|
467 |
-
|
468 |
-
def get_distribution_names(self):
|
469 |
-
"""
|
470 |
-
Return all the distribution names known to this locator.
|
471 |
-
"""
|
472 |
-
raise NotImplementedError('Not available from this locator')
|
473 |
-
|
474 |
-
def _get_project(self, name):
|
475 |
-
result = {'urls': {}, 'digests': {}}
|
476 |
-
url = urljoin(self.base_url, '%s/json' % quote(name))
|
477 |
-
try:
|
478 |
-
resp = self.opener.open(url)
|
479 |
-
data = resp.read().decode() # for now
|
480 |
-
d = json.loads(data)
|
481 |
-
md = Metadata(scheme=self.scheme)
|
482 |
-
data = d['info']
|
483 |
-
md.name = data['name']
|
484 |
-
md.version = data['version']
|
485 |
-
md.license = data.get('license')
|
486 |
-
md.keywords = data.get('keywords', [])
|
487 |
-
md.summary = data.get('summary')
|
488 |
-
dist = Distribution(md)
|
489 |
-
dist.locator = self
|
490 |
-
urls = d['urls']
|
491 |
-
result[md.version] = dist
|
492 |
-
for info in d['urls']:
|
493 |
-
url = info['url']
|
494 |
-
dist.download_urls.add(url)
|
495 |
-
dist.digests[url] = self._get_digest(info)
|
496 |
-
result['urls'].setdefault(md.version, set()).add(url)
|
497 |
-
result['digests'][url] = self._get_digest(info)
|
498 |
-
# Now get other releases
|
499 |
-
for version, infos in d['releases'].items():
|
500 |
-
if version == md.version:
|
501 |
-
continue # already done
|
502 |
-
omd = Metadata(scheme=self.scheme)
|
503 |
-
omd.name = md.name
|
504 |
-
omd.version = version
|
505 |
-
odist = Distribution(omd)
|
506 |
-
odist.locator = self
|
507 |
-
result[version] = odist
|
508 |
-
for info in infos:
|
509 |
-
url = info['url']
|
510 |
-
odist.download_urls.add(url)
|
511 |
-
odist.digests[url] = self._get_digest(info)
|
512 |
-
result['urls'].setdefault(version, set()).add(url)
|
513 |
-
result['digests'][url] = self._get_digest(info)
|
514 |
-
# for info in urls:
|
515 |
-
# md.source_url = info['url']
|
516 |
-
# dist.digest = self._get_digest(info)
|
517 |
-
# dist.locator = self
|
518 |
-
# for info in urls:
|
519 |
-
# url = info['url']
|
520 |
-
# result['urls'].setdefault(md.version, set()).add(url)
|
521 |
-
# result['digests'][url] = self._get_digest(info)
|
522 |
-
except Exception as e:
|
523 |
-
self.errors.put(text_type(e))
|
524 |
-
logger.exception('JSON fetch failed: %s', e)
|
525 |
-
return result
|
526 |
-
|
527 |
-
|
528 |
-
class Page(object):
|
529 |
-
"""
|
530 |
-
This class represents a scraped HTML page.
|
531 |
-
"""
|
532 |
-
# The following slightly hairy-looking regex just looks for the contents of
|
533 |
-
# an anchor link, which has an attribute "href" either immediately preceded
|
534 |
-
# or immediately followed by a "rel" attribute. The attribute values can be
|
535 |
-
# declared with double quotes, single quotes or no quotes - which leads to
|
536 |
-
# the length of the expression.
|
537 |
-
_href = re.compile("""
|
538 |
-
(rel\\s*=\\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\\s\n]*))\\s+)?
|
539 |
-
href\\s*=\\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\\s\n]*))
|
540 |
-
(\\s+rel\\s*=\\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\\s\n]*)))?
|
541 |
-
""", re.I | re.S | re.X)
|
542 |
-
_base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
|
543 |
-
|
544 |
-
def __init__(self, data, url):
|
545 |
-
"""
|
546 |
-
Initialise an instance with the Unicode page contents and the URL they
|
547 |
-
came from.
|
548 |
-
"""
|
549 |
-
self.data = data
|
550 |
-
self.base_url = self.url = url
|
551 |
-
m = self._base.search(self.data)
|
552 |
-
if m:
|
553 |
-
self.base_url = m.group(1)
|
554 |
-
|
555 |
-
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
|
556 |
-
|
557 |
-
@cached_property
|
558 |
-
def links(self):
|
559 |
-
"""
|
560 |
-
Return the URLs of all the links on a page together with information
|
561 |
-
about their "rel" attribute, for determining which ones to treat as
|
562 |
-
downloads and which ones to queue for further scraping.
|
563 |
-
"""
|
564 |
-
def clean(url):
|
565 |
-
"Tidy up an URL."
|
566 |
-
scheme, netloc, path, params, query, frag = urlparse(url)
|
567 |
-
return urlunparse((scheme, netloc, quote(path),
|
568 |
-
params, query, frag))
|
569 |
-
|
570 |
-
result = set()
|
571 |
-
for match in self._href.finditer(self.data):
|
572 |
-
d = match.groupdict('')
|
573 |
-
rel = (d['rel1'] or d['rel2'] or d['rel3'] or
|
574 |
-
d['rel4'] or d['rel5'] or d['rel6'])
|
575 |
-
url = d['url1'] or d['url2'] or d['url3']
|
576 |
-
url = urljoin(self.base_url, url)
|
577 |
-
url = unescape(url)
|
578 |
-
url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
|
579 |
-
result.add((url, rel))
|
580 |
-
# We sort the result, hoping to bring the most recent versions
|
581 |
-
# to the front
|
582 |
-
result = sorted(result, key=lambda t: t[0], reverse=True)
|
583 |
-
return result
|
584 |
-
|
585 |
-
|
586 |
-
class SimpleScrapingLocator(Locator):
|
587 |
-
"""
|
588 |
-
A locator which scrapes HTML pages to locate downloads for a distribution.
|
589 |
-
This runs multiple threads to do the I/O; performance is at least as good
|
590 |
-
as pip's PackageFinder, which works in an analogous fashion.
|
591 |
-
"""
|
592 |
-
|
593 |
-
# These are used to deal with various Content-Encoding schemes.
|
594 |
-
decoders = {
|
595 |
-
'deflate': zlib.decompress,
|
596 |
-
'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(b)).read(),
|
597 |
-
'none': lambda b: b,
|
598 |
-
}
|
599 |
-
|
600 |
-
def __init__(self, url, timeout=None, num_workers=10, **kwargs):
|
601 |
-
"""
|
602 |
-
Initialise an instance.
|
603 |
-
:param url: The root URL to use for scraping.
|
604 |
-
:param timeout: The timeout, in seconds, to be applied to requests.
|
605 |
-
This defaults to ``None`` (no timeout specified).
|
606 |
-
:param num_workers: The number of worker threads you want to do I/O,
|
607 |
-
This defaults to 10.
|
608 |
-
:param kwargs: Passed to the superclass.
|
609 |
-
"""
|
610 |
-
super(SimpleScrapingLocator, self).__init__(**kwargs)
|
611 |
-
self.base_url = ensure_slash(url)
|
612 |
-
self.timeout = timeout
|
613 |
-
self._page_cache = {}
|
614 |
-
self._seen = set()
|
615 |
-
self._to_fetch = queue.Queue()
|
616 |
-
self._bad_hosts = set()
|
617 |
-
self.skip_externals = False
|
618 |
-
self.num_workers = num_workers
|
619 |
-
self._lock = threading.RLock()
|
620 |
-
# See issue #45: we need to be resilient when the locator is used
|
621 |
-
# in a thread, e.g. with concurrent.futures. We can't use self._lock
|
622 |
-
# as it is for coordinating our internal threads - the ones created
|
623 |
-
# in _prepare_threads.
|
624 |
-
self._gplock = threading.RLock()
|
625 |
-
self.platform_check = False # See issue #112
|
626 |
-
|
627 |
-
def _prepare_threads(self):
|
628 |
-
"""
|
629 |
-
Threads are created only when get_project is called, and terminate
|
630 |
-
before it returns. They are there primarily to parallelise I/O (i.e.
|
631 |
-
fetching web pages).
|
632 |
-
"""
|
633 |
-
self._threads = []
|
634 |
-
for i in range(self.num_workers):
|
635 |
-
t = threading.Thread(target=self._fetch)
|
636 |
-
t.daemon = True
|
637 |
-
t.start()
|
638 |
-
self._threads.append(t)
|
639 |
-
|
640 |
-
def _wait_threads(self):
|
641 |
-
"""
|
642 |
-
Tell all the threads to terminate (by sending a sentinel value) and
|
643 |
-
wait for them to do so.
|
644 |
-
"""
|
645 |
-
# Note that you need two loops, since you can't say which
|
646 |
-
# thread will get each sentinel
|
647 |
-
for t in self._threads:
|
648 |
-
self._to_fetch.put(None) # sentinel
|
649 |
-
for t in self._threads:
|
650 |
-
t.join()
|
651 |
-
self._threads = []
|
652 |
-
|
653 |
-
def _get_project(self, name):
|
654 |
-
result = {'urls': {}, 'digests': {}}
|
655 |
-
with self._gplock:
|
656 |
-
self.result = result
|
657 |
-
self.project_name = name
|
658 |
-
url = urljoin(self.base_url, '%s/' % quote(name))
|
659 |
-
self._seen.clear()
|
660 |
-
self._page_cache.clear()
|
661 |
-
self._prepare_threads()
|
662 |
-
try:
|
663 |
-
logger.debug('Queueing %s', url)
|
664 |
-
self._to_fetch.put(url)
|
665 |
-
self._to_fetch.join()
|
666 |
-
finally:
|
667 |
-
self._wait_threads()
|
668 |
-
del self.result
|
669 |
-
return result
|
670 |
-
|
671 |
-
platform_dependent = re.compile(r'\b(linux_(i\d86|x86_64|arm\w+)|'
|
672 |
-
r'win(32|_amd64)|macosx_?\d+)\b', re.I)
|
673 |
-
|
674 |
-
def _is_platform_dependent(self, url):
|
675 |
-
"""
|
676 |
-
Does an URL refer to a platform-specific download?
|
677 |
-
"""
|
678 |
-
return self.platform_dependent.search(url)
|
679 |
-
|
680 |
-
def _process_download(self, url):
|
681 |
-
"""
|
682 |
-
See if an URL is a suitable download for a project.
|
683 |
-
|
684 |
-
If it is, register information in the result dictionary (for
|
685 |
-
_get_project) about the specific version it's for.
|
686 |
-
|
687 |
-
Note that the return value isn't actually used other than as a boolean
|
688 |
-
value.
|
689 |
-
"""
|
690 |
-
if self.platform_check and self._is_platform_dependent(url):
|
691 |
-
info = None
|
692 |
-
else:
|
693 |
-
info = self.convert_url_to_download_info(url, self.project_name)
|
694 |
-
logger.debug('process_download: %s -> %s', url, info)
|
695 |
-
if info:
|
696 |
-
with self._lock: # needed because self.result is shared
|
697 |
-
self._update_version_data(self.result, info)
|
698 |
-
return info
|
699 |
-
|
700 |
-
def _should_queue(self, link, referrer, rel):
|
701 |
-
"""
|
702 |
-
Determine whether a link URL from a referring page and with a
|
703 |
-
particular "rel" attribute should be queued for scraping.
|
704 |
-
"""
|
705 |
-
scheme, netloc, path, _, _, _ = urlparse(link)
|
706 |
-
if path.endswith(self.source_extensions + self.binary_extensions +
|
707 |
-
self.excluded_extensions):
|
708 |
-
result = False
|
709 |
-
elif self.skip_externals and not link.startswith(self.base_url):
|
710 |
-
result = False
|
711 |
-
elif not referrer.startswith(self.base_url):
|
712 |
-
result = False
|
713 |
-
elif rel not in ('homepage', 'download'):
|
714 |
-
result = False
|
715 |
-
elif scheme not in ('http', 'https', 'ftp'):
|
716 |
-
result = False
|
717 |
-
elif self._is_platform_dependent(link):
|
718 |
-
result = False
|
719 |
-
else:
|
720 |
-
host = netloc.split(':', 1)[0]
|
721 |
-
if host.lower() == 'localhost':
|
722 |
-
result = False
|
723 |
-
else:
|
724 |
-
result = True
|
725 |
-
logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
|
726 |
-
referrer, result)
|
727 |
-
return result
|
728 |
-
|
729 |
-
def _fetch(self):
|
730 |
-
"""
|
731 |
-
Get a URL to fetch from the work queue, get the HTML page, examine its
|
732 |
-
links for download candidates and candidates for further scraping.
|
733 |
-
|
734 |
-
This is a handy method to run in a thread.
|
735 |
-
"""
|
736 |
-
while True:
|
737 |
-
url = self._to_fetch.get()
|
738 |
-
try:
|
739 |
-
if url:
|
740 |
-
page = self.get_page(url)
|
741 |
-
if page is None: # e.g. after an error
|
742 |
-
continue
|
743 |
-
for link, rel in page.links:
|
744 |
-
if link not in self._seen:
|
745 |
-
try:
|
746 |
-
self._seen.add(link)
|
747 |
-
if (not self._process_download(link) and
|
748 |
-
self._should_queue(link, url, rel)):
|
749 |
-
logger.debug('Queueing %s from %s', link, url)
|
750 |
-
self._to_fetch.put(link)
|
751 |
-
except MetadataInvalidError: # e.g. invalid versions
|
752 |
-
pass
|
753 |
-
except Exception as e: # pragma: no cover
|
754 |
-
self.errors.put(text_type(e))
|
755 |
-
finally:
|
756 |
-
# always do this, to avoid hangs :-)
|
757 |
-
self._to_fetch.task_done()
|
758 |
-
if not url:
|
759 |
-
#logger.debug('Sentinel seen, quitting.')
|
760 |
-
break
|
761 |
-
|
762 |
-
def get_page(self, url):
|
763 |
-
"""
|
764 |
-
Get the HTML for an URL, possibly from an in-memory cache.
|
765 |
-
|
766 |
-
XXX TODO Note: this cache is never actually cleared. It's assumed that
|
767 |
-
the data won't get stale over the lifetime of a locator instance (not
|
768 |
-
necessarily true for the default_locator).
|
769 |
-
"""
|
770 |
-
# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
|
771 |
-
scheme, netloc, path, _, _, _ = urlparse(url)
|
772 |
-
if scheme == 'file' and os.path.isdir(url2pathname(path)):
|
773 |
-
url = urljoin(ensure_slash(url), 'index.html')
|
774 |
-
|
775 |
-
if url in self._page_cache:
|
776 |
-
result = self._page_cache[url]
|
777 |
-
logger.debug('Returning %s from cache: %s', url, result)
|
778 |
-
else:
|
779 |
-
host = netloc.split(':', 1)[0]
|
780 |
-
result = None
|
781 |
-
if host in self._bad_hosts:
|
782 |
-
logger.debug('Skipping %s due to bad host %s', url, host)
|
783 |
-
else:
|
784 |
-
req = Request(url, headers={'Accept-encoding': 'identity'})
|
785 |
-
try:
|
786 |
-
logger.debug('Fetching %s', url)
|
787 |
-
resp = self.opener.open(req, timeout=self.timeout)
|
788 |
-
logger.debug('Fetched %s', url)
|
789 |
-
headers = resp.info()
|
790 |
-
content_type = headers.get('Content-Type', '')
|
791 |
-
if HTML_CONTENT_TYPE.match(content_type):
|
792 |
-
final_url = resp.geturl()
|
793 |
-
data = resp.read()
|
794 |
-
encoding = headers.get('Content-Encoding')
|
795 |
-
if encoding:
|
796 |
-
decoder = self.decoders[encoding] # fail if not found
|
797 |
-
data = decoder(data)
|
798 |
-
encoding = 'utf-8'
|
799 |
-
m = CHARSET.search(content_type)
|
800 |
-
if m:
|
801 |
-
encoding = m.group(1)
|
802 |
-
try:
|
803 |
-
data = data.decode(encoding)
|
804 |
-
except UnicodeError: # pragma: no cover
|
805 |
-
data = data.decode('latin-1') # fallback
|
806 |
-
result = Page(data, final_url)
|
807 |
-
self._page_cache[final_url] = result
|
808 |
-
except HTTPError as e:
|
809 |
-
if e.code != 404:
|
810 |
-
logger.exception('Fetch failed: %s: %s', url, e)
|
811 |
-
except URLError as e: # pragma: no cover
|
812 |
-
logger.exception('Fetch failed: %s: %s', url, e)
|
813 |
-
with self._lock:
|
814 |
-
self._bad_hosts.add(host)
|
815 |
-
except Exception as e: # pragma: no cover
|
816 |
-
logger.exception('Fetch failed: %s: %s', url, e)
|
817 |
-
finally:
|
818 |
-
self._page_cache[url] = result # even if None (failure)
|
819 |
-
return result
|
820 |
-
|
821 |
-
_distname_re = re.compile('<a href=[^>]*>([^<]+)<')
|
822 |
-
|
823 |
-
def get_distribution_names(self):
|
824 |
-
"""
|
825 |
-
Return all the distribution names known to this locator.
|
826 |
-
"""
|
827 |
-
result = set()
|
828 |
-
page = self.get_page(self.base_url)
|
829 |
-
if not page:
|
830 |
-
raise DistlibException('Unable to get %s' % self.base_url)
|
831 |
-
for match in self._distname_re.finditer(page.data):
|
832 |
-
result.add(match.group(1))
|
833 |
-
return result
|
834 |
-
|
835 |
-
class DirectoryLocator(Locator):
|
836 |
-
"""
|
837 |
-
This class locates distributions in a directory tree.
|
838 |
-
"""
|
839 |
-
|
840 |
-
def __init__(self, path, **kwargs):
|
841 |
-
"""
|
842 |
-
Initialise an instance.
|
843 |
-
:param path: The root of the directory tree to search.
|
844 |
-
:param kwargs: Passed to the superclass constructor,
|
845 |
-
except for:
|
846 |
-
* recursive - if True (the default), subdirectories are
|
847 |
-
recursed into. If False, only the top-level directory
|
848 |
-
is searched,
|
849 |
-
"""
|
850 |
-
self.recursive = kwargs.pop('recursive', True)
|
851 |
-
super(DirectoryLocator, self).__init__(**kwargs)
|
852 |
-
path = os.path.abspath(path)
|
853 |
-
if not os.path.isdir(path): # pragma: no cover
|
854 |
-
raise DistlibException('Not a directory: %r' % path)
|
855 |
-
self.base_dir = path
|
856 |
-
|
857 |
-
def should_include(self, filename, parent):
|
858 |
-
"""
|
859 |
-
Should a filename be considered as a candidate for a distribution
|
860 |
-
archive? As well as the filename, the directory which contains it
|
861 |
-
is provided, though not used by the current implementation.
|
862 |
-
"""
|
863 |
-
return filename.endswith(self.downloadable_extensions)
|
864 |
-
|
865 |
-
def _get_project(self, name):
|
866 |
-
result = {'urls': {}, 'digests': {}}
|
867 |
-
for root, dirs, files in os.walk(self.base_dir):
|
868 |
-
for fn in files:
|
869 |
-
if self.should_include(fn, root):
|
870 |
-
fn = os.path.join(root, fn)
|
871 |
-
url = urlunparse(('file', '',
|
872 |
-
pathname2url(os.path.abspath(fn)),
|
873 |
-
'', '', ''))
|
874 |
-
info = self.convert_url_to_download_info(url, name)
|
875 |
-
if info:
|
876 |
-
self._update_version_data(result, info)
|
877 |
-
if not self.recursive:
|
878 |
-
break
|
879 |
-
return result
|
880 |
-
|
881 |
-
def get_distribution_names(self):
|
882 |
-
"""
|
883 |
-
Return all the distribution names known to this locator.
|
884 |
-
"""
|
885 |
-
result = set()
|
886 |
-
for root, dirs, files in os.walk(self.base_dir):
|
887 |
-
for fn in files:
|
888 |
-
if self.should_include(fn, root):
|
889 |
-
fn = os.path.join(root, fn)
|
890 |
-
url = urlunparse(('file', '',
|
891 |
-
pathname2url(os.path.abspath(fn)),
|
892 |
-
'', '', ''))
|
893 |
-
info = self.convert_url_to_download_info(url, None)
|
894 |
-
if info:
|
895 |
-
result.add(info['name'])
|
896 |
-
if not self.recursive:
|
897 |
-
break
|
898 |
-
return result
|
899 |
-
|
900 |
-
class JSONLocator(Locator):
|
901 |
-
"""
|
902 |
-
This locator uses special extended metadata (not available on PyPI) and is
|
903 |
-
the basis of performant dependency resolution in distlib. Other locators
|
904 |
-
require archive downloads before dependencies can be determined! As you
|
905 |
-
might imagine, that can be slow.
|
906 |
-
"""
|
907 |
-
def get_distribution_names(self):
|
908 |
-
"""
|
909 |
-
Return all the distribution names known to this locator.
|
910 |
-
"""
|
911 |
-
raise NotImplementedError('Not available from this locator')
|
912 |
-
|
913 |
-
def _get_project(self, name):
|
914 |
-
result = {'urls': {}, 'digests': {}}
|
915 |
-
data = get_project_data(name)
|
916 |
-
if data:
|
917 |
-
for info in data.get('files', []):
|
918 |
-
if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
|
919 |
-
continue
|
920 |
-
# We don't store summary in project metadata as it makes
|
921 |
-
# the data bigger for no benefit during dependency
|
922 |
-
# resolution
|
923 |
-
dist = make_dist(data['name'], info['version'],
|
924 |
-
summary=data.get('summary',
|
925 |
-
'Placeholder for summary'),
|
926 |
-
scheme=self.scheme)
|
927 |
-
md = dist.metadata
|
928 |
-
md.source_url = info['url']
|
929 |
-
# TODO SHA256 digest
|
930 |
-
if 'digest' in info and info['digest']:
|
931 |
-
dist.digest = ('md5', info['digest'])
|
932 |
-
md.dependencies = info.get('requirements', {})
|
933 |
-
dist.exports = info.get('exports', {})
|
934 |
-
result[dist.version] = dist
|
935 |
-
result['urls'].setdefault(dist.version, set()).add(info['url'])
|
936 |
-
return result
|
937 |
-
|
938 |
-
class DistPathLocator(Locator):
|
939 |
-
"""
|
940 |
-
This locator finds installed distributions in a path. It can be useful for
|
941 |
-
adding to an :class:`AggregatingLocator`.
|
942 |
-
"""
|
943 |
-
def __init__(self, distpath, **kwargs):
|
944 |
-
"""
|
945 |
-
Initialise an instance.
|
946 |
-
|
947 |
-
:param distpath: A :class:`DistributionPath` instance to search.
|
948 |
-
"""
|
949 |
-
super(DistPathLocator, self).__init__(**kwargs)
|
950 |
-
assert isinstance(distpath, DistributionPath)
|
951 |
-
self.distpath = distpath
|
952 |
-
|
953 |
-
def _get_project(self, name):
|
954 |
-
dist = self.distpath.get_distribution(name)
|
955 |
-
if dist is None:
|
956 |
-
result = {'urls': {}, 'digests': {}}
|
957 |
-
else:
|
958 |
-
result = {
|
959 |
-
dist.version: dist,
|
960 |
-
'urls': {dist.version: set([dist.source_url])},
|
961 |
-
'digests': {dist.version: set([None])}
|
962 |
-
}
|
963 |
-
return result
|
964 |
-
|
965 |
-
|
966 |
-
class AggregatingLocator(Locator):
|
967 |
-
"""
|
968 |
-
This class allows you to chain and/or merge a list of locators.
|
969 |
-
"""
|
970 |
-
def __init__(self, *locators, **kwargs):
|
971 |
-
"""
|
972 |
-
Initialise an instance.
|
973 |
-
|
974 |
-
:param locators: The list of locators to search.
|
975 |
-
:param kwargs: Passed to the superclass constructor,
|
976 |
-
except for:
|
977 |
-
* merge - if False (the default), the first successful
|
978 |
-
search from any of the locators is returned. If True,
|
979 |
-
the results from all locators are merged (this can be
|
980 |
-
slow).
|
981 |
-
"""
|
982 |
-
self.merge = kwargs.pop('merge', False)
|
983 |
-
self.locators = locators
|
984 |
-
super(AggregatingLocator, self).__init__(**kwargs)
|
985 |
-
|
986 |
-
def clear_cache(self):
|
987 |
-
super(AggregatingLocator, self).clear_cache()
|
988 |
-
for locator in self.locators:
|
989 |
-
locator.clear_cache()
|
990 |
-
|
991 |
-
def _set_scheme(self, value):
|
992 |
-
self._scheme = value
|
993 |
-
for locator in self.locators:
|
994 |
-
locator.scheme = value
|
995 |
-
|
996 |
-
scheme = property(Locator.scheme.fget, _set_scheme)
|
997 |
-
|
998 |
-
def _get_project(self, name):
|
999 |
-
result = {}
|
1000 |
-
for locator in self.locators:
|
1001 |
-
d = locator.get_project(name)
|
1002 |
-
if d:
|
1003 |
-
if self.merge:
|
1004 |
-
files = result.get('urls', {})
|
1005 |
-
digests = result.get('digests', {})
|
1006 |
-
# next line could overwrite result['urls'], result['digests']
|
1007 |
-
result.update(d)
|
1008 |
-
df = result.get('urls')
|
1009 |
-
if files and df:
|
1010 |
-
for k, v in files.items():
|
1011 |
-
if k in df:
|
1012 |
-
df[k] |= v
|
1013 |
-
else:
|
1014 |
-
df[k] = v
|
1015 |
-
dd = result.get('digests')
|
1016 |
-
if digests and dd:
|
1017 |
-
dd.update(digests)
|
1018 |
-
else:
|
1019 |
-
# See issue #18. If any dists are found and we're looking
|
1020 |
-
# for specific constraints, we only return something if
|
1021 |
-
# a match is found. For example, if a DirectoryLocator
|
1022 |
-
# returns just foo (1.0) while we're looking for
|
1023 |
-
# foo (>= 2.0), we'll pretend there was nothing there so
|
1024 |
-
# that subsequent locators can be queried. Otherwise we
|
1025 |
-
# would just return foo (1.0) which would then lead to a
|
1026 |
-
# failure to find foo (>= 2.0), because other locators
|
1027 |
-
# weren't searched. Note that this only matters when
|
1028 |
-
# merge=False.
|
1029 |
-
if self.matcher is None:
|
1030 |
-
found = True
|
1031 |
-
else:
|
1032 |
-
found = False
|
1033 |
-
for k in d:
|
1034 |
-
if self.matcher.match(k):
|
1035 |
-
found = True
|
1036 |
-
break
|
1037 |
-
if found:
|
1038 |
-
result = d
|
1039 |
-
break
|
1040 |
-
return result
|
1041 |
-
|
1042 |
-
def get_distribution_names(self):
|
1043 |
-
"""
|
1044 |
-
Return all the distribution names known to this locator.
|
1045 |
-
"""
|
1046 |
-
result = set()
|
1047 |
-
for locator in self.locators:
|
1048 |
-
try:
|
1049 |
-
result |= locator.get_distribution_names()
|
1050 |
-
except NotImplementedError:
|
1051 |
-
pass
|
1052 |
-
return result
|
1053 |
-
|
1054 |
-
|
1055 |
-
# We use a legacy scheme simply because most of the dists on PyPI use legacy
|
1056 |
-
# versions which don't conform to PEP 440.
|
1057 |
-
default_locator = AggregatingLocator(
|
1058 |
-
# JSONLocator(), # don't use as PEP 426 is withdrawn
|
1059 |
-
SimpleScrapingLocator('https://pypi.org/simple/',
|
1060 |
-
timeout=3.0),
|
1061 |
-
scheme='legacy')
|
1062 |
-
|
1063 |
-
locate = default_locator.locate
|
1064 |
-
|
1065 |
-
|
1066 |
-
class DependencyFinder(object):
|
1067 |
-
"""
|
1068 |
-
Locate dependencies for distributions.
|
1069 |
-
"""
|
1070 |
-
|
1071 |
-
def __init__(self, locator=None):
|
1072 |
-
"""
|
1073 |
-
Initialise an instance, using the specified locator
|
1074 |
-
to locate distributions.
|
1075 |
-
"""
|
1076 |
-
self.locator = locator or default_locator
|
1077 |
-
self.scheme = get_scheme(self.locator.scheme)
|
1078 |
-
|
1079 |
-
def add_distribution(self, dist):
|
1080 |
-
"""
|
1081 |
-
Add a distribution to the finder. This will update internal information
|
1082 |
-
about who provides what.
|
1083 |
-
:param dist: The distribution to add.
|
1084 |
-
"""
|
1085 |
-
logger.debug('adding distribution %s', dist)
|
1086 |
-
name = dist.key
|
1087 |
-
self.dists_by_name[name] = dist
|
1088 |
-
self.dists[(name, dist.version)] = dist
|
1089 |
-
for p in dist.provides:
|
1090 |
-
name, version = parse_name_and_version(p)
|
1091 |
-
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
|
1092 |
-
self.provided.setdefault(name, set()).add((version, dist))
|
1093 |
-
|
1094 |
-
def remove_distribution(self, dist):
|
1095 |
-
"""
|
1096 |
-
Remove a distribution from the finder. This will update internal
|
1097 |
-
information about who provides what.
|
1098 |
-
:param dist: The distribution to remove.
|
1099 |
-
"""
|
1100 |
-
logger.debug('removing distribution %s', dist)
|
1101 |
-
name = dist.key
|
1102 |
-
del self.dists_by_name[name]
|
1103 |
-
del self.dists[(name, dist.version)]
|
1104 |
-
for p in dist.provides:
|
1105 |
-
name, version = parse_name_and_version(p)
|
1106 |
-
logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
|
1107 |
-
s = self.provided[name]
|
1108 |
-
s.remove((version, dist))
|
1109 |
-
if not s:
|
1110 |
-
del self.provided[name]
|
1111 |
-
|
1112 |
-
def get_matcher(self, reqt):
|
1113 |
-
"""
|
1114 |
-
Get a version matcher for a requirement.
|
1115 |
-
:param reqt: The requirement
|
1116 |
-
:type reqt: str
|
1117 |
-
:return: A version matcher (an instance of
|
1118 |
-
:class:`distlib.version.Matcher`).
|
1119 |
-
"""
|
1120 |
-
try:
|
1121 |
-
matcher = self.scheme.matcher(reqt)
|
1122 |
-
except UnsupportedVersionError: # pragma: no cover
|
1123 |
-
# XXX compat-mode if cannot read the version
|
1124 |
-
name = reqt.split()[0]
|
1125 |
-
matcher = self.scheme.matcher(name)
|
1126 |
-
return matcher
|
1127 |
-
|
1128 |
-
def find_providers(self, reqt):
|
1129 |
-
"""
|
1130 |
-
Find the distributions which can fulfill a requirement.
|
1131 |
-
|
1132 |
-
:param reqt: The requirement.
|
1133 |
-
:type reqt: str
|
1134 |
-
:return: A set of distribution which can fulfill the requirement.
|
1135 |
-
"""
|
1136 |
-
matcher = self.get_matcher(reqt)
|
1137 |
-
name = matcher.key # case-insensitive
|
1138 |
-
result = set()
|
1139 |
-
provided = self.provided
|
1140 |
-
if name in provided:
|
1141 |
-
for version, provider in provided[name]:
|
1142 |
-
try:
|
1143 |
-
match = matcher.match(version)
|
1144 |
-
except UnsupportedVersionError:
|
1145 |
-
match = False
|
1146 |
-
|
1147 |
-
if match:
|
1148 |
-
result.add(provider)
|
1149 |
-
break
|
1150 |
-
return result
|
1151 |
-
|
1152 |
-
def try_to_replace(self, provider, other, problems):
|
1153 |
-
"""
|
1154 |
-
Attempt to replace one provider with another. This is typically used
|
1155 |
-
when resolving dependencies from multiple sources, e.g. A requires
|
1156 |
-
(B >= 1.0) while C requires (B >= 1.1).
|
1157 |
-
|
1158 |
-
For successful replacement, ``provider`` must meet all the requirements
|
1159 |
-
which ``other`` fulfills.
|
1160 |
-
|
1161 |
-
:param provider: The provider we are trying to replace with.
|
1162 |
-
:param other: The provider we're trying to replace.
|
1163 |
-
:param problems: If False is returned, this will contain what
|
1164 |
-
problems prevented replacement. This is currently
|
1165 |
-
a tuple of the literal string 'cantreplace',
|
1166 |
-
``provider``, ``other`` and the set of requirements
|
1167 |
-
that ``provider`` couldn't fulfill.
|
1168 |
-
:return: True if we can replace ``other`` with ``provider``, else
|
1169 |
-
False.
|
1170 |
-
"""
|
1171 |
-
rlist = self.reqts[other]
|
1172 |
-
unmatched = set()
|
1173 |
-
for s in rlist:
|
1174 |
-
matcher = self.get_matcher(s)
|
1175 |
-
if not matcher.match(provider.version):
|
1176 |
-
unmatched.add(s)
|
1177 |
-
if unmatched:
|
1178 |
-
# can't replace other with provider
|
1179 |
-
problems.add(('cantreplace', provider, other,
|
1180 |
-
frozenset(unmatched)))
|
1181 |
-
result = False
|
1182 |
-
else:
|
1183 |
-
# can replace other with provider
|
1184 |
-
self.remove_distribution(other)
|
1185 |
-
del self.reqts[other]
|
1186 |
-
for s in rlist:
|
1187 |
-
self.reqts.setdefault(provider, set()).add(s)
|
1188 |
-
self.add_distribution(provider)
|
1189 |
-
result = True
|
1190 |
-
return result
|
1191 |
-
|
1192 |
-
def find(self, requirement, meta_extras=None, prereleases=False):
|
1193 |
-
"""
|
1194 |
-
Find a distribution and all distributions it depends on.
|
1195 |
-
|
1196 |
-
:param requirement: The requirement specifying the distribution to
|
1197 |
-
find, or a Distribution instance.
|
1198 |
-
:param meta_extras: A list of meta extras such as :test:, :build: and
|
1199 |
-
so on.
|
1200 |
-
:param prereleases: If ``True``, allow pre-release versions to be
|
1201 |
-
returned - otherwise, don't return prereleases
|
1202 |
-
unless they're all that's available.
|
1203 |
-
|
1204 |
-
Return a set of :class:`Distribution` instances and a set of
|
1205 |
-
problems.
|
1206 |
-
|
1207 |
-
The distributions returned should be such that they have the
|
1208 |
-
:attr:`required` attribute set to ``True`` if they were
|
1209 |
-
from the ``requirement`` passed to ``find()``, and they have the
|
1210 |
-
:attr:`build_time_dependency` attribute set to ``True`` unless they
|
1211 |
-
are post-installation dependencies of the ``requirement``.
|
1212 |
-
|
1213 |
-
The problems should be a tuple consisting of the string
|
1214 |
-
``'unsatisfied'`` and the requirement which couldn't be satisfied
|
1215 |
-
by any distribution known to the locator.
|
1216 |
-
"""
|
1217 |
-
|
1218 |
-
self.provided = {}
|
1219 |
-
self.dists = {}
|
1220 |
-
self.dists_by_name = {}
|
1221 |
-
self.reqts = {}
|
1222 |
-
|
1223 |
-
meta_extras = set(meta_extras or [])
|
1224 |
-
if ':*:' in meta_extras:
|
1225 |
-
meta_extras.remove(':*:')
|
1226 |
-
# :meta: and :run: are implicitly included
|
1227 |
-
meta_extras |= set([':test:', ':build:', ':dev:'])
|
1228 |
-
|
1229 |
-
if isinstance(requirement, Distribution):
|
1230 |
-
dist = odist = requirement
|
1231 |
-
logger.debug('passed %s as requirement', odist)
|
1232 |
-
else:
|
1233 |
-
dist = odist = self.locator.locate(requirement,
|
1234 |
-
prereleases=prereleases)
|
1235 |
-
if dist is None:
|
1236 |
-
raise DistlibException('Unable to locate %r' % requirement)
|
1237 |
-
logger.debug('located %s', odist)
|
1238 |
-
dist.requested = True
|
1239 |
-
problems = set()
|
1240 |
-
todo = set([dist])
|
1241 |
-
install_dists = set([odist])
|
1242 |
-
while todo:
|
1243 |
-
dist = todo.pop()
|
1244 |
-
name = dist.key # case-insensitive
|
1245 |
-
if name not in self.dists_by_name:
|
1246 |
-
self.add_distribution(dist)
|
1247 |
-
else:
|
1248 |
-
#import pdb; pdb.set_trace()
|
1249 |
-
other = self.dists_by_name[name]
|
1250 |
-
if other != dist:
|
1251 |
-
self.try_to_replace(dist, other, problems)
|
1252 |
-
|
1253 |
-
ireqts = dist.run_requires | dist.meta_requires
|
1254 |
-
sreqts = dist.build_requires
|
1255 |
-
ereqts = set()
|
1256 |
-
if meta_extras and dist in install_dists:
|
1257 |
-
for key in ('test', 'build', 'dev'):
|
1258 |
-
e = ':%s:' % key
|
1259 |
-
if e in meta_extras:
|
1260 |
-
ereqts |= getattr(dist, '%s_requires' % key)
|
1261 |
-
all_reqts = ireqts | sreqts | ereqts
|
1262 |
-
for r in all_reqts:
|
1263 |
-
providers = self.find_providers(r)
|
1264 |
-
if not providers:
|
1265 |
-
logger.debug('No providers found for %r', r)
|
1266 |
-
provider = self.locator.locate(r, prereleases=prereleases)
|
1267 |
-
# If no provider is found and we didn't consider
|
1268 |
-
# prereleases, consider them now.
|
1269 |
-
if provider is None and not prereleases:
|
1270 |
-
provider = self.locator.locate(r, prereleases=True)
|
1271 |
-
if provider is None:
|
1272 |
-
logger.debug('Cannot satisfy %r', r)
|
1273 |
-
problems.add(('unsatisfied', r))
|
1274 |
-
else:
|
1275 |
-
n, v = provider.key, provider.version
|
1276 |
-
if (n, v) not in self.dists:
|
1277 |
-
todo.add(provider)
|
1278 |
-
providers.add(provider)
|
1279 |
-
if r in ireqts and dist in install_dists:
|
1280 |
-
install_dists.add(provider)
|
1281 |
-
logger.debug('Adding %s to install_dists',
|
1282 |
-
provider.name_and_version)
|
1283 |
-
for p in providers:
|
1284 |
-
name = p.key
|
1285 |
-
if name not in self.dists_by_name:
|
1286 |
-
self.reqts.setdefault(p, set()).add(r)
|
1287 |
-
else:
|
1288 |
-
other = self.dists_by_name[name]
|
1289 |
-
if other != p:
|
1290 |
-
# see if other can be replaced by p
|
1291 |
-
self.try_to_replace(p, other, problems)
|
1292 |
-
|
1293 |
-
dists = set(self.dists.values())
|
1294 |
-
for dist in dists:
|
1295 |
-
dist.build_time_dependency = dist not in install_dists
|
1296 |
-
if dist.build_time_dependency:
|
1297 |
-
logger.debug('%s is a build-time dependency only.',
|
1298 |
-
dist.name_and_version)
|
1299 |
-
logger.debug('find done for %s', odist)
|
1300 |
-
return dists, problems
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/util/proxy.py
DELETED
@@ -1,57 +0,0 @@
|
|
1 |
-
from .ssl_ import create_urllib3_context, resolve_cert_reqs, resolve_ssl_version
|
2 |
-
|
3 |
-
|
4 |
-
def connection_requires_http_tunnel(
|
5 |
-
proxy_url=None, proxy_config=None, destination_scheme=None
|
6 |
-
):
|
7 |
-
"""
|
8 |
-
Returns True if the connection requires an HTTP CONNECT through the proxy.
|
9 |
-
|
10 |
-
:param URL proxy_url:
|
11 |
-
URL of the proxy.
|
12 |
-
:param ProxyConfig proxy_config:
|
13 |
-
Proxy configuration from poolmanager.py
|
14 |
-
:param str destination_scheme:
|
15 |
-
The scheme of the destination. (i.e https, http, etc)
|
16 |
-
"""
|
17 |
-
# If we're not using a proxy, no way to use a tunnel.
|
18 |
-
if proxy_url is None:
|
19 |
-
return False
|
20 |
-
|
21 |
-
# HTTP destinations never require tunneling, we always forward.
|
22 |
-
if destination_scheme == "http":
|
23 |
-
return False
|
24 |
-
|
25 |
-
# Support for forwarding with HTTPS proxies and HTTPS destinations.
|
26 |
-
if (
|
27 |
-
proxy_url.scheme == "https"
|
28 |
-
and proxy_config
|
29 |
-
and proxy_config.use_forwarding_for_https
|
30 |
-
):
|
31 |
-
return False
|
32 |
-
|
33 |
-
# Otherwise always use a tunnel.
|
34 |
-
return True
|
35 |
-
|
36 |
-
|
37 |
-
def create_proxy_ssl_context(
|
38 |
-
ssl_version, cert_reqs, ca_certs=None, ca_cert_dir=None, ca_cert_data=None
|
39 |
-
):
|
40 |
-
"""
|
41 |
-
Generates a default proxy ssl context if one hasn't been provided by the
|
42 |
-
user.
|
43 |
-
"""
|
44 |
-
ssl_context = create_urllib3_context(
|
45 |
-
ssl_version=resolve_ssl_version(ssl_version),
|
46 |
-
cert_reqs=resolve_cert_reqs(cert_reqs),
|
47 |
-
)
|
48 |
-
|
49 |
-
if (
|
50 |
-
not ca_certs
|
51 |
-
and not ca_cert_dir
|
52 |
-
and not ca_cert_data
|
53 |
-
and hasattr(ssl_context, "load_default_certs")
|
54 |
-
):
|
55 |
-
ssl_context.load_default_certs()
|
56 |
-
|
57 |
-
return ssl_context
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/connectionpool.py
DELETED
@@ -1,1110 +0,0 @@
|
|
1 |
-
from __future__ import absolute_import
|
2 |
-
|
3 |
-
import errno
|
4 |
-
import logging
|
5 |
-
import re
|
6 |
-
import socket
|
7 |
-
import sys
|
8 |
-
import warnings
|
9 |
-
from socket import error as SocketError
|
10 |
-
from socket import timeout as SocketTimeout
|
11 |
-
|
12 |
-
from .connection import (
|
13 |
-
BaseSSLError,
|
14 |
-
BrokenPipeError,
|
15 |
-
DummyConnection,
|
16 |
-
HTTPConnection,
|
17 |
-
HTTPException,
|
18 |
-
HTTPSConnection,
|
19 |
-
VerifiedHTTPSConnection,
|
20 |
-
port_by_scheme,
|
21 |
-
)
|
22 |
-
from .exceptions import (
|
23 |
-
ClosedPoolError,
|
24 |
-
EmptyPoolError,
|
25 |
-
HeaderParsingError,
|
26 |
-
HostChangedError,
|
27 |
-
InsecureRequestWarning,
|
28 |
-
LocationValueError,
|
29 |
-
MaxRetryError,
|
30 |
-
NewConnectionError,
|
31 |
-
ProtocolError,
|
32 |
-
ProxyError,
|
33 |
-
ReadTimeoutError,
|
34 |
-
SSLError,
|
35 |
-
TimeoutError,
|
36 |
-
)
|
37 |
-
from .packages import six
|
38 |
-
from .packages.six.moves import queue
|
39 |
-
from .request import RequestMethods
|
40 |
-
from .response import HTTPResponse
|
41 |
-
from .util.connection import is_connection_dropped
|
42 |
-
from .util.proxy import connection_requires_http_tunnel
|
43 |
-
from .util.queue import LifoQueue
|
44 |
-
from .util.request import set_file_position
|
45 |
-
from .util.response import assert_header_parsing
|
46 |
-
from .util.retry import Retry
|
47 |
-
from .util.ssl_match_hostname import CertificateError
|
48 |
-
from .util.timeout import Timeout
|
49 |
-
from .util.url import Url, _encode_target
|
50 |
-
from .util.url import _normalize_host as normalize_host
|
51 |
-
from .util.url import get_host, parse_url
|
52 |
-
|
53 |
-
xrange = six.moves.xrange
|
54 |
-
|
55 |
-
log = logging.getLogger(__name__)
|
56 |
-
|
57 |
-
_Default = object()
|
58 |
-
|
59 |
-
|
60 |
-
# Pool objects
|
61 |
-
class ConnectionPool(object):
|
62 |
-
"""
|
63 |
-
Base class for all connection pools, such as
|
64 |
-
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
|
65 |
-
|
66 |
-
.. note::
|
67 |
-
ConnectionPool.urlopen() does not normalize or percent-encode target URIs
|
68 |
-
which is useful if your target server doesn't support percent-encoded
|
69 |
-
target URIs.
|
70 |
-
"""
|
71 |
-
|
72 |
-
scheme = None
|
73 |
-
QueueCls = LifoQueue
|
74 |
-
|
75 |
-
def __init__(self, host, port=None):
|
76 |
-
if not host:
|
77 |
-
raise LocationValueError("No host specified.")
|
78 |
-
|
79 |
-
self.host = _normalize_host(host, scheme=self.scheme)
|
80 |
-
self._proxy_host = host.lower()
|
81 |
-
self.port = port
|
82 |
-
|
83 |
-
def __str__(self):
|
84 |
-
return "%s(host=%r, port=%r)" % (type(self).__name__, self.host, self.port)
|
85 |
-
|
86 |
-
def __enter__(self):
|
87 |
-
return self
|
88 |
-
|
89 |
-
def __exit__(self, exc_type, exc_val, exc_tb):
|
90 |
-
self.close()
|
91 |
-
# Return False to re-raise any potential exceptions
|
92 |
-
return False
|
93 |
-
|
94 |
-
def close(self):
|
95 |
-
"""
|
96 |
-
Close all pooled connections and disable the pool.
|
97 |
-
"""
|
98 |
-
pass
|
99 |
-
|
100 |
-
|
101 |
-
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
|
102 |
-
_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK}
|
103 |
-
|
104 |
-
|
105 |
-
class HTTPConnectionPool(ConnectionPool, RequestMethods):
|
106 |
-
"""
|
107 |
-
Thread-safe connection pool for one host.
|
108 |
-
|
109 |
-
:param host:
|
110 |
-
Host used for this HTTP Connection (e.g. "localhost"), passed into
|
111 |
-
:class:`http.client.HTTPConnection`.
|
112 |
-
|
113 |
-
:param port:
|
114 |
-
Port used for this HTTP Connection (None is equivalent to 80), passed
|
115 |
-
into :class:`http.client.HTTPConnection`.
|
116 |
-
|
117 |
-
:param strict:
|
118 |
-
Causes BadStatusLine to be raised if the status line can't be parsed
|
119 |
-
as a valid HTTP/1.0 or 1.1 status line, passed into
|
120 |
-
:class:`http.client.HTTPConnection`.
|
121 |
-
|
122 |
-
.. note::
|
123 |
-
Only works in Python 2. This parameter is ignored in Python 3.
|
124 |
-
|
125 |
-
:param timeout:
|
126 |
-
Socket timeout in seconds for each individual connection. This can
|
127 |
-
be a float or integer, which sets the timeout for the HTTP request,
|
128 |
-
or an instance of :class:`urllib3.util.Timeout` which gives you more
|
129 |
-
fine-grained control over request timeouts. After the constructor has
|
130 |
-
been parsed, this is always a `urllib3.util.Timeout` object.
|
131 |
-
|
132 |
-
:param maxsize:
|
133 |
-
Number of connections to save that can be reused. More than 1 is useful
|
134 |
-
in multithreaded situations. If ``block`` is set to False, more
|
135 |
-
connections will be created but they will not be saved once they've
|
136 |
-
been used.
|
137 |
-
|
138 |
-
:param block:
|
139 |
-
If set to True, no more than ``maxsize`` connections will be used at
|
140 |
-
a time. When no free connections are available, the call will block
|
141 |
-
until a connection has been released. This is a useful side effect for
|
142 |
-
particular multithreaded situations where one does not want to use more
|
143 |
-
than maxsize connections per host to prevent flooding.
|
144 |
-
|
145 |
-
:param headers:
|
146 |
-
Headers to include with all requests, unless other headers are given
|
147 |
-
explicitly.
|
148 |
-
|
149 |
-
:param retries:
|
150 |
-
Retry configuration to use by default with requests in this pool.
|
151 |
-
|
152 |
-
:param _proxy:
|
153 |
-
Parsed proxy URL, should not be used directly, instead, see
|
154 |
-
:class:`urllib3.ProxyManager`
|
155 |
-
|
156 |
-
:param _proxy_headers:
|
157 |
-
A dictionary with proxy headers, should not be used directly,
|
158 |
-
instead, see :class:`urllib3.ProxyManager`
|
159 |
-
|
160 |
-
:param \\**conn_kw:
|
161 |
-
Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
|
162 |
-
:class:`urllib3.connection.HTTPSConnection` instances.
|
163 |
-
"""
|
164 |
-
|
165 |
-
scheme = "http"
|
166 |
-
ConnectionCls = HTTPConnection
|
167 |
-
ResponseCls = HTTPResponse
|
168 |
-
|
169 |
-
def __init__(
|
170 |
-
self,
|
171 |
-
host,
|
172 |
-
port=None,
|
173 |
-
strict=False,
|
174 |
-
timeout=Timeout.DEFAULT_TIMEOUT,
|
175 |
-
maxsize=1,
|
176 |
-
block=False,
|
177 |
-
headers=None,
|
178 |
-
retries=None,
|
179 |
-
_proxy=None,
|
180 |
-
_proxy_headers=None,
|
181 |
-
_proxy_config=None,
|
182 |
-
**conn_kw
|
183 |
-
):
|
184 |
-
ConnectionPool.__init__(self, host, port)
|
185 |
-
RequestMethods.__init__(self, headers)
|
186 |
-
|
187 |
-
self.strict = strict
|
188 |
-
|
189 |
-
if not isinstance(timeout, Timeout):
|
190 |
-
timeout = Timeout.from_float(timeout)
|
191 |
-
|
192 |
-
if retries is None:
|
193 |
-
retries = Retry.DEFAULT
|
194 |
-
|
195 |
-
self.timeout = timeout
|
196 |
-
self.retries = retries
|
197 |
-
|
198 |
-
self.pool = self.QueueCls(maxsize)
|
199 |
-
self.block = block
|
200 |
-
|
201 |
-
self.proxy = _proxy
|
202 |
-
self.proxy_headers = _proxy_headers or {}
|
203 |
-
self.proxy_config = _proxy_config
|
204 |
-
|
205 |
-
# Fill the queue up so that doing get() on it will block properly
|
206 |
-
for _ in xrange(maxsize):
|
207 |
-
self.pool.put(None)
|
208 |
-
|
209 |
-
# These are mostly for testing and debugging purposes.
|
210 |
-
self.num_connections = 0
|
211 |
-
self.num_requests = 0
|
212 |
-
self.conn_kw = conn_kw
|
213 |
-
|
214 |
-
if self.proxy:
|
215 |
-
# Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
|
216 |
-
# We cannot know if the user has added default socket options, so we cannot replace the
|
217 |
-
# list.
|
218 |
-
self.conn_kw.setdefault("socket_options", [])
|
219 |
-
|
220 |
-
self.conn_kw["proxy"] = self.proxy
|
221 |
-
self.conn_kw["proxy_config"] = self.proxy_config
|
222 |
-
|
223 |
-
def _new_conn(self):
|
224 |
-
"""
|
225 |
-
Return a fresh :class:`HTTPConnection`.
|
226 |
-
"""
|
227 |
-
self.num_connections += 1
|
228 |
-
log.debug(
|
229 |
-
"Starting new HTTP connection (%d): %s:%s",
|
230 |
-
self.num_connections,
|
231 |
-
self.host,
|
232 |
-
self.port or "80",
|
233 |
-
)
|
234 |
-
|
235 |
-
conn = self.ConnectionCls(
|
236 |
-
host=self.host,
|
237 |
-
port=self.port,
|
238 |
-
timeout=self.timeout.connect_timeout,
|
239 |
-
strict=self.strict,
|
240 |
-
**self.conn_kw
|
241 |
-
)
|
242 |
-
return conn
|
243 |
-
|
244 |
-
def _get_conn(self, timeout=None):
|
245 |
-
"""
|
246 |
-
Get a connection. Will return a pooled connection if one is available.
|
247 |
-
|
248 |
-
If no connections are available and :prop:`.block` is ``False``, then a
|
249 |
-
fresh connection is returned.
|
250 |
-
|
251 |
-
:param timeout:
|
252 |
-
Seconds to wait before giving up and raising
|
253 |
-
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
|
254 |
-
:prop:`.block` is ``True``.
|
255 |
-
"""
|
256 |
-
conn = None
|
257 |
-
try:
|
258 |
-
conn = self.pool.get(block=self.block, timeout=timeout)
|
259 |
-
|
260 |
-
except AttributeError: # self.pool is None
|
261 |
-
raise ClosedPoolError(self, "Pool is closed.")
|
262 |
-
|
263 |
-
except queue.Empty:
|
264 |
-
if self.block:
|
265 |
-
raise EmptyPoolError(
|
266 |
-
self,
|
267 |
-
"Pool reached maximum size and no more connections are allowed.",
|
268 |
-
)
|
269 |
-
pass # Oh well, we'll create a new connection then
|
270 |
-
|
271 |
-
# If this is a persistent connection, check if it got disconnected
|
272 |
-
if conn and is_connection_dropped(conn):
|
273 |
-
log.debug("Resetting dropped connection: %s", self.host)
|
274 |
-
conn.close()
|
275 |
-
if getattr(conn, "auto_open", 1) == 0:
|
276 |
-
# This is a proxied connection that has been mutated by
|
277 |
-
# http.client._tunnel() and cannot be reused (since it would
|
278 |
-
# attempt to bypass the proxy)
|
279 |
-
conn = None
|
280 |
-
|
281 |
-
return conn or self._new_conn()
|
282 |
-
|
283 |
-
def _put_conn(self, conn):
|
284 |
-
"""
|
285 |
-
Put a connection back into the pool.
|
286 |
-
|
287 |
-
:param conn:
|
288 |
-
Connection object for the current host and port as returned by
|
289 |
-
:meth:`._new_conn` or :meth:`._get_conn`.
|
290 |
-
|
291 |
-
If the pool is already full, the connection is closed and discarded
|
292 |
-
because we exceeded maxsize. If connections are discarded frequently,
|
293 |
-
then maxsize should be increased.
|
294 |
-
|
295 |
-
If the pool is closed, then the connection will be closed and discarded.
|
296 |
-
"""
|
297 |
-
try:
|
298 |
-
self.pool.put(conn, block=False)
|
299 |
-
return # Everything is dandy, done.
|
300 |
-
except AttributeError:
|
301 |
-
# self.pool is None.
|
302 |
-
pass
|
303 |
-
except queue.Full:
|
304 |
-
# This should never happen if self.block == True
|
305 |
-
log.warning(
|
306 |
-
"Connection pool is full, discarding connection: %s. Connection pool size: %s",
|
307 |
-
self.host,
|
308 |
-
self.pool.qsize(),
|
309 |
-
)
|
310 |
-
# Connection never got put back into the pool, close it.
|
311 |
-
if conn:
|
312 |
-
conn.close()
|
313 |
-
|
314 |
-
def _validate_conn(self, conn):
|
315 |
-
"""
|
316 |
-
Called right before a request is made, after the socket is created.
|
317 |
-
"""
|
318 |
-
pass
|
319 |
-
|
320 |
-
def _prepare_proxy(self, conn):
|
321 |
-
# Nothing to do for HTTP connections.
|
322 |
-
pass
|
323 |
-
|
324 |
-
def _get_timeout(self, timeout):
|
325 |
-
"""Helper that always returns a :class:`urllib3.util.Timeout`"""
|
326 |
-
if timeout is _Default:
|
327 |
-
return self.timeout.clone()
|
328 |
-
|
329 |
-
if isinstance(timeout, Timeout):
|
330 |
-
return timeout.clone()
|
331 |
-
else:
|
332 |
-
# User passed us an int/float. This is for backwards compatibility,
|
333 |
-
# can be removed later
|
334 |
-
return Timeout.from_float(timeout)
|
335 |
-
|
336 |
-
def _raise_timeout(self, err, url, timeout_value):
|
337 |
-
"""Is the error actually a timeout? Will raise a ReadTimeout or pass"""
|
338 |
-
|
339 |
-
if isinstance(err, SocketTimeout):
|
340 |
-
raise ReadTimeoutError(
|
341 |
-
self, url, "Read timed out. (read timeout=%s)" % timeout_value
|
342 |
-
)
|
343 |
-
|
344 |
-
# See the above comment about EAGAIN in Python 3. In Python 2 we have
|
345 |
-
# to specifically catch it and throw the timeout error
|
346 |
-
if hasattr(err, "errno") and err.errno in _blocking_errnos:
|
347 |
-
raise ReadTimeoutError(
|
348 |
-
self, url, "Read timed out. (read timeout=%s)" % timeout_value
|
349 |
-
)
|
350 |
-
|
351 |
-
# Catch possible read timeouts thrown as SSL errors. If not the
|
352 |
-
# case, rethrow the original. We need to do this because of:
|
353 |
-
# http://bugs.python.org/issue10272
|
354 |
-
if "timed out" in str(err) or "did not complete (read)" in str(
|
355 |
-
err
|
356 |
-
): # Python < 2.7.4
|
357 |
-
raise ReadTimeoutError(
|
358 |
-
self, url, "Read timed out. (read timeout=%s)" % timeout_value
|
359 |
-
)
|
360 |
-
|
361 |
-
def _make_request(
|
362 |
-
self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw
|
363 |
-
):
|
364 |
-
"""
|
365 |
-
Perform a request on a given urllib connection object taken from our
|
366 |
-
pool.
|
367 |
-
|
368 |
-
:param conn:
|
369 |
-
a connection from one of our connection pools
|
370 |
-
|
371 |
-
:param timeout:
|
372 |
-
Socket timeout in seconds for the request. This can be a
|
373 |
-
float or integer, which will set the same timeout value for
|
374 |
-
the socket connect and the socket read, or an instance of
|
375 |
-
:class:`urllib3.util.Timeout`, which gives you more fine-grained
|
376 |
-
control over your timeouts.
|
377 |
-
"""
|
378 |
-
self.num_requests += 1
|
379 |
-
|
380 |
-
timeout_obj = self._get_timeout(timeout)
|
381 |
-
timeout_obj.start_connect()
|
382 |
-
conn.timeout = Timeout.resolve_default_timeout(timeout_obj.connect_timeout)
|
383 |
-
|
384 |
-
# Trigger any extra validation we need to do.
|
385 |
-
try:
|
386 |
-
self._validate_conn(conn)
|
387 |
-
except (SocketTimeout, BaseSSLError) as e:
|
388 |
-
# Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
|
389 |
-
self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
|
390 |
-
raise
|
391 |
-
|
392 |
-
# conn.request() calls http.client.*.request, not the method in
|
393 |
-
# urllib3.request. It also calls makefile (recv) on the socket.
|
394 |
-
try:
|
395 |
-
if chunked:
|
396 |
-
conn.request_chunked(method, url, **httplib_request_kw)
|
397 |
-
else:
|
398 |
-
conn.request(method, url, **httplib_request_kw)
|
399 |
-
|
400 |
-
# We are swallowing BrokenPipeError (errno.EPIPE) since the server is
|
401 |
-
# legitimately able to close the connection after sending a valid response.
|
402 |
-
# With this behaviour, the received response is still readable.
|
403 |
-
except BrokenPipeError:
|
404 |
-
# Python 3
|
405 |
-
pass
|
406 |
-
except IOError as e:
|
407 |
-
# Python 2 and macOS/Linux
|
408 |
-
# EPIPE and ESHUTDOWN are BrokenPipeError on Python 2, and EPROTOTYPE is needed on macOS
|
409 |
-
# https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
|
410 |
-
if e.errno not in {
|
411 |
-
errno.EPIPE,
|
412 |
-
errno.ESHUTDOWN,
|
413 |
-
errno.EPROTOTYPE,
|
414 |
-
}:
|
415 |
-
raise
|
416 |
-
|
417 |
-
# Reset the timeout for the recv() on the socket
|
418 |
-
read_timeout = timeout_obj.read_timeout
|
419 |
-
|
420 |
-
# App Engine doesn't have a sock attr
|
421 |
-
if getattr(conn, "sock", None):
|
422 |
-
# In Python 3 socket.py will catch EAGAIN and return None when you
|
423 |
-
# try and read into the file pointer created by http.client, which
|
424 |
-
# instead raises a BadStatusLine exception. Instead of catching
|
425 |
-
# the exception and assuming all BadStatusLine exceptions are read
|
426 |
-
# timeouts, check for a zero timeout before making the request.
|
427 |
-
if read_timeout == 0:
|
428 |
-
raise ReadTimeoutError(
|
429 |
-
self, url, "Read timed out. (read timeout=%s)" % read_timeout
|
430 |
-
)
|
431 |
-
if read_timeout is Timeout.DEFAULT_TIMEOUT:
|
432 |
-
conn.sock.settimeout(socket.getdefaulttimeout())
|
433 |
-
else: # None or a value
|
434 |
-
conn.sock.settimeout(read_timeout)
|
435 |
-
|
436 |
-
# Receive the response from the server
|
437 |
-
try:
|
438 |
-
try:
|
439 |
-
# Python 2.7, use buffering of HTTP responses
|
440 |
-
httplib_response = conn.getresponse(buffering=True)
|
441 |
-
except TypeError:
|
442 |
-
# Python 3
|
443 |
-
try:
|
444 |
-
httplib_response = conn.getresponse()
|
445 |
-
except BaseException as e:
|
446 |
-
# Remove the TypeError from the exception chain in
|
447 |
-
# Python 3 (including for exceptions like SystemExit).
|
448 |
-
# Otherwise it looks like a bug in the code.
|
449 |
-
six.raise_from(e, None)
|
450 |
-
except (SocketTimeout, BaseSSLError, SocketError) as e:
|
451 |
-
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
|
452 |
-
raise
|
453 |
-
|
454 |
-
# AppEngine doesn't have a version attr.
|
455 |
-
http_version = getattr(conn, "_http_vsn_str", "HTTP/?")
|
456 |
-
log.debug(
|
457 |
-
'%s://%s:%s "%s %s %s" %s %s',
|
458 |
-
self.scheme,
|
459 |
-
self.host,
|
460 |
-
self.port,
|
461 |
-
method,
|
462 |
-
url,
|
463 |
-
http_version,
|
464 |
-
httplib_response.status,
|
465 |
-
httplib_response.length,
|
466 |
-
)
|
467 |
-
|
468 |
-
try:
|
469 |
-
assert_header_parsing(httplib_response.msg)
|
470 |
-
except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3
|
471 |
-
log.warning(
|
472 |
-
"Failed to parse headers (url=%s): %s",
|
473 |
-
self._absolute_url(url),
|
474 |
-
hpe,
|
475 |
-
exc_info=True,
|
476 |
-
)
|
477 |
-
|
478 |
-
return httplib_response
|
479 |
-
|
480 |
-
def _absolute_url(self, path):
|
481 |
-
return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
|
482 |
-
|
483 |
-
def close(self):
|
484 |
-
"""
|
485 |
-
Close all pooled connections and disable the pool.
|
486 |
-
"""
|
487 |
-
if self.pool is None:
|
488 |
-
return
|
489 |
-
# Disable access to the pool
|
490 |
-
old_pool, self.pool = self.pool, None
|
491 |
-
|
492 |
-
try:
|
493 |
-
while True:
|
494 |
-
conn = old_pool.get(block=False)
|
495 |
-
if conn:
|
496 |
-
conn.close()
|
497 |
-
|
498 |
-
except queue.Empty:
|
499 |
-
pass # Done.
|
500 |
-
|
501 |
-
def is_same_host(self, url):
|
502 |
-
"""
|
503 |
-
Check if the given ``url`` is a member of the same host as this
|
504 |
-
connection pool.
|
505 |
-
"""
|
506 |
-
if url.startswith("/"):
|
507 |
-
return True
|
508 |
-
|
509 |
-
# TODO: Add optional support for socket.gethostbyname checking.
|
510 |
-
scheme, host, port = get_host(url)
|
511 |
-
if host is not None:
|
512 |
-
host = _normalize_host(host, scheme=scheme)
|
513 |
-
|
514 |
-
# Use explicit default port for comparison when none is given
|
515 |
-
if self.port and not port:
|
516 |
-
port = port_by_scheme.get(scheme)
|
517 |
-
elif not self.port and port == port_by_scheme.get(scheme):
|
518 |
-
port = None
|
519 |
-
|
520 |
-
return (scheme, host, port) == (self.scheme, self.host, self.port)
|
521 |
-
|
522 |
-
def urlopen(
|
523 |
-
self,
|
524 |
-
method,
|
525 |
-
url,
|
526 |
-
body=None,
|
527 |
-
headers=None,
|
528 |
-
retries=None,
|
529 |
-
redirect=True,
|
530 |
-
assert_same_host=True,
|
531 |
-
timeout=_Default,
|
532 |
-
pool_timeout=None,
|
533 |
-
release_conn=None,
|
534 |
-
chunked=False,
|
535 |
-
body_pos=None,
|
536 |
-
**response_kw
|
537 |
-
):
|
538 |
-
"""
|
539 |
-
Get a connection from the pool and perform an HTTP request. This is the
|
540 |
-
lowest level call for making a request, so you'll need to specify all
|
541 |
-
the raw details.
|
542 |
-
|
543 |
-
.. note::
|
544 |
-
|
545 |
-
More commonly, it's appropriate to use a convenience method provided
|
546 |
-
by :class:`.RequestMethods`, such as :meth:`request`.
|
547 |
-
|
548 |
-
.. note::
|
549 |
-
|
550 |
-
`release_conn` will only behave as expected if
|
551 |
-
`preload_content=False` because we want to make
|
552 |
-
`preload_content=False` the default behaviour someday soon without
|
553 |
-
breaking backwards compatibility.
|
554 |
-
|
555 |
-
:param method:
|
556 |
-
HTTP request method (such as GET, POST, PUT, etc.)
|
557 |
-
|
558 |
-
:param url:
|
559 |
-
The URL to perform the request on.
|
560 |
-
|
561 |
-
:param body:
|
562 |
-
Data to send in the request body, either :class:`str`, :class:`bytes`,
|
563 |
-
an iterable of :class:`str`/:class:`bytes`, or a file-like object.
|
564 |
-
|
565 |
-
:param headers:
|
566 |
-
Dictionary of custom headers to send, such as User-Agent,
|
567 |
-
If-None-Match, etc. If None, pool headers are used. If provided,
|
568 |
-
these headers completely replace any pool-specific headers.
|
569 |
-
|
570 |
-
:param retries:
|
571 |
-
Configure the number of retries to allow before raising a
|
572 |
-
:class:`~urllib3.exceptions.MaxRetryError` exception.
|
573 |
-
|
574 |
-
Pass ``None`` to retry until you receive a response. Pass a
|
575 |
-
:class:`~urllib3.util.retry.Retry` object for fine-grained control
|
576 |
-
over different types of retries.
|
577 |
-
Pass an integer number to retry connection errors that many times,
|
578 |
-
but no other types of errors. Pass zero to never retry.
|
579 |
-
|
580 |
-
If ``False``, then retries are disabled and any exception is raised
|
581 |
-
immediately. Also, instead of raising a MaxRetryError on redirects,
|
582 |
-
the redirect response will be returned.
|
583 |
-
|
584 |
-
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
|
585 |
-
|
586 |
-
:param redirect:
|
587 |
-
If True, automatically handle redirects (status codes 301, 302,
|
588 |
-
303, 307, 308). Each redirect counts as a retry. Disabling retries
|
589 |
-
will disable redirect, too.
|
590 |
-
|
591 |
-
:param assert_same_host:
|
592 |
-
If ``True``, will make sure that the host of the pool requests is
|
593 |
-
consistent else will raise HostChangedError. When ``False``, you can
|
594 |
-
use the pool on an HTTP proxy and request foreign hosts.
|
595 |
-
|
596 |
-
:param timeout:
|
597 |
-
If specified, overrides the default timeout for this one
|
598 |
-
request. It may be a float (in seconds) or an instance of
|
599 |
-
:class:`urllib3.util.Timeout`.
|
600 |
-
|
601 |
-
:param pool_timeout:
|
602 |
-
If set and the pool is set to block=True, then this method will
|
603 |
-
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
|
604 |
-
connection is available within the time period.
|
605 |
-
|
606 |
-
:param release_conn:
|
607 |
-
If False, then the urlopen call will not release the connection
|
608 |
-
back into the pool once a response is received (but will release if
|
609 |
-
you read the entire contents of the response such as when
|
610 |
-
`preload_content=True`). This is useful if you're not preloading
|
611 |
-
the response's content immediately. You will need to call
|
612 |
-
``r.release_conn()`` on the response ``r`` to return the connection
|
613 |
-
back into the pool. If None, it takes the value of
|
614 |
-
``response_kw.get('preload_content', True)``.
|
615 |
-
|
616 |
-
:param chunked:
|
617 |
-
If True, urllib3 will send the body using chunked transfer
|
618 |
-
encoding. Otherwise, urllib3 will send the body using the standard
|
619 |
-
content-length form. Defaults to False.
|
620 |
-
|
621 |
-
:param int body_pos:
|
622 |
-
Position to seek to in file-like body in the event of a retry or
|
623 |
-
redirect. Typically this won't need to be set because urllib3 will
|
624 |
-
auto-populate the value when needed.
|
625 |
-
|
626 |
-
:param \\**response_kw:
|
627 |
-
Additional parameters are passed to
|
628 |
-
:meth:`urllib3.response.HTTPResponse.from_httplib`
|
629 |
-
"""
|
630 |
-
|
631 |
-
parsed_url = parse_url(url)
|
632 |
-
destination_scheme = parsed_url.scheme
|
633 |
-
|
634 |
-
if headers is None:
|
635 |
-
headers = self.headers
|
636 |
-
|
637 |
-
if not isinstance(retries, Retry):
|
638 |
-
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
|
639 |
-
|
640 |
-
if release_conn is None:
|
641 |
-
release_conn = response_kw.get("preload_content", True)
|
642 |
-
|
643 |
-
# Check host
|
644 |
-
if assert_same_host and not self.is_same_host(url):
|
645 |
-
raise HostChangedError(self, url, retries)
|
646 |
-
|
647 |
-
# Ensure that the URL we're connecting to is properly encoded
|
648 |
-
if url.startswith("/"):
|
649 |
-
url = six.ensure_str(_encode_target(url))
|
650 |
-
else:
|
651 |
-
url = six.ensure_str(parsed_url.url)
|
652 |
-
|
653 |
-
conn = None
|
654 |
-
|
655 |
-
# Track whether `conn` needs to be released before
|
656 |
-
# returning/raising/recursing. Update this variable if necessary, and
|
657 |
-
# leave `release_conn` constant throughout the function. That way, if
|
658 |
-
# the function recurses, the original value of `release_conn` will be
|
659 |
-
# passed down into the recursive call, and its value will be respected.
|
660 |
-
#
|
661 |
-
# See issue #651 [1] for details.
|
662 |
-
#
|
663 |
-
# [1] <https://github.com/urllib3/urllib3/issues/651>
|
664 |
-
release_this_conn = release_conn
|
665 |
-
|
666 |
-
http_tunnel_required = connection_requires_http_tunnel(
|
667 |
-
self.proxy, self.proxy_config, destination_scheme
|
668 |
-
)
|
669 |
-
|
670 |
-
# Merge the proxy headers. Only done when not using HTTP CONNECT. We
|
671 |
-
# have to copy the headers dict so we can safely change it without those
|
672 |
-
# changes being reflected in anyone else's copy.
|
673 |
-
if not http_tunnel_required:
|
674 |
-
headers = headers.copy()
|
675 |
-
headers.update(self.proxy_headers)
|
676 |
-
|
677 |
-
# Must keep the exception bound to a separate variable or else Python 3
|
678 |
-
# complains about UnboundLocalError.
|
679 |
-
err = None
|
680 |
-
|
681 |
-
# Keep track of whether we cleanly exited the except block. This
|
682 |
-
# ensures we do proper cleanup in finally.
|
683 |
-
clean_exit = False
|
684 |
-
|
685 |
-
# Rewind body position, if needed. Record current position
|
686 |
-
# for future rewinds in the event of a redirect/retry.
|
687 |
-
body_pos = set_file_position(body, body_pos)
|
688 |
-
|
689 |
-
try:
|
690 |
-
# Request a connection from the queue.
|
691 |
-
timeout_obj = self._get_timeout(timeout)
|
692 |
-
conn = self._get_conn(timeout=pool_timeout)
|
693 |
-
|
694 |
-
conn.timeout = timeout_obj.connect_timeout
|
695 |
-
|
696 |
-
is_new_proxy_conn = self.proxy is not None and not getattr(
|
697 |
-
conn, "sock", None
|
698 |
-
)
|
699 |
-
if is_new_proxy_conn and http_tunnel_required:
|
700 |
-
self._prepare_proxy(conn)
|
701 |
-
|
702 |
-
# Make the request on the httplib connection object.
|
703 |
-
httplib_response = self._make_request(
|
704 |
-
conn,
|
705 |
-
method,
|
706 |
-
url,
|
707 |
-
timeout=timeout_obj,
|
708 |
-
body=body,
|
709 |
-
headers=headers,
|
710 |
-
chunked=chunked,
|
711 |
-
)
|
712 |
-
|
713 |
-
# If we're going to release the connection in ``finally:``, then
|
714 |
-
# the response doesn't need to know about the connection. Otherwise
|
715 |
-
# it will also try to release it and we'll have a double-release
|
716 |
-
# mess.
|
717 |
-
response_conn = conn if not release_conn else None
|
718 |
-
|
719 |
-
# Pass method to Response for length checking
|
720 |
-
response_kw["request_method"] = method
|
721 |
-
|
722 |
-
# Import httplib's response into our own wrapper object
|
723 |
-
response = self.ResponseCls.from_httplib(
|
724 |
-
httplib_response,
|
725 |
-
pool=self,
|
726 |
-
connection=response_conn,
|
727 |
-
retries=retries,
|
728 |
-
**response_kw
|
729 |
-
)
|
730 |
-
|
731 |
-
# Everything went great!
|
732 |
-
clean_exit = True
|
733 |
-
|
734 |
-
except EmptyPoolError:
|
735 |
-
# Didn't get a connection from the pool, no need to clean up
|
736 |
-
clean_exit = True
|
737 |
-
release_this_conn = False
|
738 |
-
raise
|
739 |
-
|
740 |
-
except (
|
741 |
-
TimeoutError,
|
742 |
-
HTTPException,
|
743 |
-
SocketError,
|
744 |
-
ProtocolError,
|
745 |
-
BaseSSLError,
|
746 |
-
SSLError,
|
747 |
-
CertificateError,
|
748 |
-
) as e:
|
749 |
-
# Discard the connection for these exceptions. It will be
|
750 |
-
# replaced during the next _get_conn() call.
|
751 |
-
clean_exit = False
|
752 |
-
|
753 |
-
def _is_ssl_error_message_from_http_proxy(ssl_error):
|
754 |
-
# We're trying to detect the message 'WRONG_VERSION_NUMBER' but
|
755 |
-
# SSLErrors are kinda all over the place when it comes to the message,
|
756 |
-
# so we try to cover our bases here!
|
757 |
-
message = " ".join(re.split("[^a-z]", str(ssl_error).lower()))
|
758 |
-
return (
|
759 |
-
"wrong version number" in message or "unknown protocol" in message
|
760 |
-
)
|
761 |
-
|
762 |
-
# Try to detect a common user error with proxies which is to
|
763 |
-
# set an HTTP proxy to be HTTPS when it should be 'http://'
|
764 |
-
# (ie {'http': 'http://proxy', 'https': 'https://proxy'})
|
765 |
-
# Instead we add a nice error message and point to a URL.
|
766 |
-
if (
|
767 |
-
isinstance(e, BaseSSLError)
|
768 |
-
and self.proxy
|
769 |
-
and _is_ssl_error_message_from_http_proxy(e)
|
770 |
-
and conn.proxy
|
771 |
-
and conn.proxy.scheme == "https"
|
772 |
-
):
|
773 |
-
e = ProxyError(
|
774 |
-
"Your proxy appears to only use HTTP and not HTTPS, "
|
775 |
-
"try changing your proxy URL to be HTTP. See: "
|
776 |
-
"https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
|
777 |
-
"#https-proxy-error-http-proxy",
|
778 |
-
SSLError(e),
|
779 |
-
)
|
780 |
-
elif isinstance(e, (BaseSSLError, CertificateError)):
|
781 |
-
e = SSLError(e)
|
782 |
-
elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
|
783 |
-
e = ProxyError("Cannot connect to proxy.", e)
|
784 |
-
elif isinstance(e, (SocketError, HTTPException)):
|
785 |
-
e = ProtocolError("Connection aborted.", e)
|
786 |
-
|
787 |
-
retries = retries.increment(
|
788 |
-
method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]
|
789 |
-
)
|
790 |
-
retries.sleep()
|
791 |
-
|
792 |
-
# Keep track of the error for the retry warning.
|
793 |
-
err = e
|
794 |
-
|
795 |
-
finally:
|
796 |
-
if not clean_exit:
|
797 |
-
# We hit some kind of exception, handled or otherwise. We need
|
798 |
-
# to throw the connection away unless explicitly told not to.
|
799 |
-
# Close the connection, set the variable to None, and make sure
|
800 |
-
# we put the None back in the pool to avoid leaking it.
|
801 |
-
conn = conn and conn.close()
|
802 |
-
release_this_conn = True
|
803 |
-
|
804 |
-
if release_this_conn:
|
805 |
-
# Put the connection back to be reused. If the connection is
|
806 |
-
# expired then it will be None, which will get replaced with a
|
807 |
-
# fresh connection during _get_conn.
|
808 |
-
self._put_conn(conn)
|
809 |
-
|
810 |
-
if not conn:
|
811 |
-
# Try again
|
812 |
-
log.warning(
|
813 |
-
"Retrying (%r) after connection broken by '%r': %s", retries, err, url
|
814 |
-
)
|
815 |
-
return self.urlopen(
|
816 |
-
method,
|
817 |
-
url,
|
818 |
-
body,
|
819 |
-
headers,
|
820 |
-
retries,
|
821 |
-
redirect,
|
822 |
-
assert_same_host,
|
823 |
-
timeout=timeout,
|
824 |
-
pool_timeout=pool_timeout,
|
825 |
-
release_conn=release_conn,
|
826 |
-
chunked=chunked,
|
827 |
-
body_pos=body_pos,
|
828 |
-
**response_kw
|
829 |
-
)
|
830 |
-
|
831 |
-
# Handle redirect?
|
832 |
-
redirect_location = redirect and response.get_redirect_location()
|
833 |
-
if redirect_location:
|
834 |
-
if response.status == 303:
|
835 |
-
method = "GET"
|
836 |
-
|
837 |
-
try:
|
838 |
-
retries = retries.increment(method, url, response=response, _pool=self)
|
839 |
-
except MaxRetryError:
|
840 |
-
if retries.raise_on_redirect:
|
841 |
-
response.drain_conn()
|
842 |
-
raise
|
843 |
-
return response
|
844 |
-
|
845 |
-
response.drain_conn()
|
846 |
-
retries.sleep_for_retry(response)
|
847 |
-
log.debug("Redirecting %s -> %s", url, redirect_location)
|
848 |
-
return self.urlopen(
|
849 |
-
method,
|
850 |
-
redirect_location,
|
851 |
-
body,
|
852 |
-
headers,
|
853 |
-
retries=retries,
|
854 |
-
redirect=redirect,
|
855 |
-
assert_same_host=assert_same_host,
|
856 |
-
timeout=timeout,
|
857 |
-
pool_timeout=pool_timeout,
|
858 |
-
release_conn=release_conn,
|
859 |
-
chunked=chunked,
|
860 |
-
body_pos=body_pos,
|
861 |
-
**response_kw
|
862 |
-
)
|
863 |
-
|
864 |
-
# Check if we should retry the HTTP response.
|
865 |
-
has_retry_after = bool(response.headers.get("Retry-After"))
|
866 |
-
if retries.is_retry(method, response.status, has_retry_after):
|
867 |
-
try:
|
868 |
-
retries = retries.increment(method, url, response=response, _pool=self)
|
869 |
-
except MaxRetryError:
|
870 |
-
if retries.raise_on_status:
|
871 |
-
response.drain_conn()
|
872 |
-
raise
|
873 |
-
return response
|
874 |
-
|
875 |
-
response.drain_conn()
|
876 |
-
retries.sleep(response)
|
877 |
-
log.debug("Retry: %s", url)
|
878 |
-
return self.urlopen(
|
879 |
-
method,
|
880 |
-
url,
|
881 |
-
body,
|
882 |
-
headers,
|
883 |
-
retries=retries,
|
884 |
-
redirect=redirect,
|
885 |
-
assert_same_host=assert_same_host,
|
886 |
-
timeout=timeout,
|
887 |
-
pool_timeout=pool_timeout,
|
888 |
-
release_conn=release_conn,
|
889 |
-
chunked=chunked,
|
890 |
-
body_pos=body_pos,
|
891 |
-
**response_kw
|
892 |
-
)
|
893 |
-
|
894 |
-
return response
|
895 |
-
|
896 |
-
|
897 |
-
class HTTPSConnectionPool(HTTPConnectionPool):
|
898 |
-
"""
|
899 |
-
Same as :class:`.HTTPConnectionPool`, but HTTPS.
|
900 |
-
|
901 |
-
:class:`.HTTPSConnection` uses one of ``assert_fingerprint``,
|
902 |
-
``assert_hostname`` and ``host`` in this order to verify connections.
|
903 |
-
If ``assert_hostname`` is False, no verification is done.
|
904 |
-
|
905 |
-
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
|
906 |
-
``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl`
|
907 |
-
is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
|
908 |
-
the connection socket into an SSL socket.
|
909 |
-
"""
|
910 |
-
|
911 |
-
scheme = "https"
|
912 |
-
ConnectionCls = HTTPSConnection
|
913 |
-
|
914 |
-
def __init__(
|
915 |
-
self,
|
916 |
-
host,
|
917 |
-
port=None,
|
918 |
-
strict=False,
|
919 |
-
timeout=Timeout.DEFAULT_TIMEOUT,
|
920 |
-
maxsize=1,
|
921 |
-
block=False,
|
922 |
-
headers=None,
|
923 |
-
retries=None,
|
924 |
-
_proxy=None,
|
925 |
-
_proxy_headers=None,
|
926 |
-
key_file=None,
|
927 |
-
cert_file=None,
|
928 |
-
cert_reqs=None,
|
929 |
-
key_password=None,
|
930 |
-
ca_certs=None,
|
931 |
-
ssl_version=None,
|
932 |
-
assert_hostname=None,
|
933 |
-
assert_fingerprint=None,
|
934 |
-
ca_cert_dir=None,
|
935 |
-
**conn_kw
|
936 |
-
):
|
937 |
-
|
938 |
-
HTTPConnectionPool.__init__(
|
939 |
-
self,
|
940 |
-
host,
|
941 |
-
port,
|
942 |
-
strict,
|
943 |
-
timeout,
|
944 |
-
maxsize,
|
945 |
-
block,
|
946 |
-
headers,
|
947 |
-
retries,
|
948 |
-
_proxy,
|
949 |
-
_proxy_headers,
|
950 |
-
**conn_kw
|
951 |
-
)
|
952 |
-
|
953 |
-
self.key_file = key_file
|
954 |
-
self.cert_file = cert_file
|
955 |
-
self.cert_reqs = cert_reqs
|
956 |
-
self.key_password = key_password
|
957 |
-
self.ca_certs = ca_certs
|
958 |
-
self.ca_cert_dir = ca_cert_dir
|
959 |
-
self.ssl_version = ssl_version
|
960 |
-
self.assert_hostname = assert_hostname
|
961 |
-
self.assert_fingerprint = assert_fingerprint
|
962 |
-
|
963 |
-
def _prepare_conn(self, conn):
|
964 |
-
"""
|
965 |
-
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
|
966 |
-
and establish the tunnel if proxy is used.
|
967 |
-
"""
|
968 |
-
|
969 |
-
if isinstance(conn, VerifiedHTTPSConnection):
|
970 |
-
conn.set_cert(
|
971 |
-
key_file=self.key_file,
|
972 |
-
key_password=self.key_password,
|
973 |
-
cert_file=self.cert_file,
|
974 |
-
cert_reqs=self.cert_reqs,
|
975 |
-
ca_certs=self.ca_certs,
|
976 |
-
ca_cert_dir=self.ca_cert_dir,
|
977 |
-
assert_hostname=self.assert_hostname,
|
978 |
-
assert_fingerprint=self.assert_fingerprint,
|
979 |
-
)
|
980 |
-
conn.ssl_version = self.ssl_version
|
981 |
-
return conn
|
982 |
-
|
983 |
-
def _prepare_proxy(self, conn):
|
984 |
-
"""
|
985 |
-
Establishes a tunnel connection through HTTP CONNECT.
|
986 |
-
|
987 |
-
Tunnel connection is established early because otherwise httplib would
|
988 |
-
improperly set Host: header to proxy's IP:port.
|
989 |
-
"""
|
990 |
-
|
991 |
-
conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers)
|
992 |
-
|
993 |
-
if self.proxy.scheme == "https":
|
994 |
-
conn.tls_in_tls_required = True
|
995 |
-
|
996 |
-
conn.connect()
|
997 |
-
|
998 |
-
def _new_conn(self):
|
999 |
-
"""
|
1000 |
-
Return a fresh :class:`http.client.HTTPSConnection`.
|
1001 |
-
"""
|
1002 |
-
self.num_connections += 1
|
1003 |
-
log.debug(
|
1004 |
-
"Starting new HTTPS connection (%d): %s:%s",
|
1005 |
-
self.num_connections,
|
1006 |
-
self.host,
|
1007 |
-
self.port or "443",
|
1008 |
-
)
|
1009 |
-
|
1010 |
-
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
|
1011 |
-
raise SSLError(
|
1012 |
-
"Can't connect to HTTPS URL because the SSL module is not available."
|
1013 |
-
)
|
1014 |
-
|
1015 |
-
actual_host = self.host
|
1016 |
-
actual_port = self.port
|
1017 |
-
if self.proxy is not None:
|
1018 |
-
actual_host = self.proxy.host
|
1019 |
-
actual_port = self.proxy.port
|
1020 |
-
|
1021 |
-
conn = self.ConnectionCls(
|
1022 |
-
host=actual_host,
|
1023 |
-
port=actual_port,
|
1024 |
-
timeout=self.timeout.connect_timeout,
|
1025 |
-
strict=self.strict,
|
1026 |
-
cert_file=self.cert_file,
|
1027 |
-
key_file=self.key_file,
|
1028 |
-
key_password=self.key_password,
|
1029 |
-
**self.conn_kw
|
1030 |
-
)
|
1031 |
-
|
1032 |
-
return self._prepare_conn(conn)
|
1033 |
-
|
1034 |
-
def _validate_conn(self, conn):
|
1035 |
-
"""
|
1036 |
-
Called right before a request is made, after the socket is created.
|
1037 |
-
"""
|
1038 |
-
super(HTTPSConnectionPool, self)._validate_conn(conn)
|
1039 |
-
|
1040 |
-
# Force connect early to allow us to validate the connection.
|
1041 |
-
if not getattr(conn, "sock", None): # AppEngine might not have `.sock`
|
1042 |
-
conn.connect()
|
1043 |
-
|
1044 |
-
if not conn.is_verified:
|
1045 |
-
warnings.warn(
|
1046 |
-
(
|
1047 |
-
"Unverified HTTPS request is being made to host '%s'. "
|
1048 |
-
"Adding certificate verification is strongly advised. See: "
|
1049 |
-
"https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
|
1050 |
-
"#ssl-warnings" % conn.host
|
1051 |
-
),
|
1052 |
-
InsecureRequestWarning,
|
1053 |
-
)
|
1054 |
-
|
1055 |
-
if getattr(conn, "proxy_is_verified", None) is False:
|
1056 |
-
warnings.warn(
|
1057 |
-
(
|
1058 |
-
"Unverified HTTPS connection done to an HTTPS proxy. "
|
1059 |
-
"Adding certificate verification is strongly advised. See: "
|
1060 |
-
"https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
|
1061 |
-
"#ssl-warnings"
|
1062 |
-
),
|
1063 |
-
InsecureRequestWarning,
|
1064 |
-
)
|
1065 |
-
|
1066 |
-
|
1067 |
-
def connection_from_url(url, **kw):
|
1068 |
-
"""
|
1069 |
-
Given a url, return an :class:`.ConnectionPool` instance of its host.
|
1070 |
-
|
1071 |
-
This is a shortcut for not having to parse out the scheme, host, and port
|
1072 |
-
of the url before creating an :class:`.ConnectionPool` instance.
|
1073 |
-
|
1074 |
-
:param url:
|
1075 |
-
Absolute URL string that must include the scheme. Port is optional.
|
1076 |
-
|
1077 |
-
:param \\**kw:
|
1078 |
-
Passes additional parameters to the constructor of the appropriate
|
1079 |
-
:class:`.ConnectionPool`. Useful for specifying things like
|
1080 |
-
timeout, maxsize, headers, etc.
|
1081 |
-
|
1082 |
-
Example::
|
1083 |
-
|
1084 |
-
>>> conn = connection_from_url('http://google.com/')
|
1085 |
-
>>> r = conn.request('GET', '/')
|
1086 |
-
"""
|
1087 |
-
scheme, host, port = get_host(url)
|
1088 |
-
port = port or port_by_scheme.get(scheme, 80)
|
1089 |
-
if scheme == "https":
|
1090 |
-
return HTTPSConnectionPool(host, port=port, **kw)
|
1091 |
-
else:
|
1092 |
-
return HTTPConnectionPool(host, port=port, **kw)
|
1093 |
-
|
1094 |
-
|
1095 |
-
def _normalize_host(host, scheme):
|
1096 |
-
"""
|
1097 |
-
Normalize hosts for comparisons and use with sockets.
|
1098 |
-
"""
|
1099 |
-
|
1100 |
-
host = normalize_host(host, scheme)
|
1101 |
-
|
1102 |
-
# httplib doesn't like it when we include brackets in IPv6 addresses
|
1103 |
-
# Specifically, if we include brackets but also pass the port then
|
1104 |
-
# httplib crazily doubles up the square brackets on the Host header.
|
1105 |
-
# Instead, we need to make sure we never pass ``None`` as the port.
|
1106 |
-
# However, for backward compatibility reasons we can't actually
|
1107 |
-
# *assert* that. See http://bugs.python.org/issue28539
|
1108 |
-
if host.startswith("[") and host.endswith("]"):
|
1109 |
-
host = host[1:-1]
|
1110 |
-
return host
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/device_new_allocator.h
DELETED
@@ -1,172 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
|
18 |
-
/*! \file device_new_allocator.h
|
19 |
-
* \brief An allocator which allocates storage with \p device_new
|
20 |
-
*/
|
21 |
-
|
22 |
-
#pragma once
|
23 |
-
|
24 |
-
#include <thrust/detail/config.h>
|
25 |
-
#include <thrust/device_ptr.h>
|
26 |
-
#include <thrust/device_reference.h>
|
27 |
-
#include <thrust/device_new.h>
|
28 |
-
#include <thrust/device_delete.h>
|
29 |
-
#include <limits>
|
30 |
-
#include <stdexcept>
|
31 |
-
|
32 |
-
namespace thrust
|
33 |
-
{
|
34 |
-
|
35 |
-
/*! \addtogroup memory_management_classes Memory Management Classes
|
36 |
-
* \ingroup memory_management
|
37 |
-
* \{
|
38 |
-
*/
|
39 |
-
|
40 |
-
/*! \p device_new_allocator is a device memory allocator that employs the
|
41 |
-
* \p device_new function for allocation.
|
42 |
-
*
|
43 |
-
* \see device_new
|
44 |
-
* \see device_ptr
|
45 |
-
* \see http://www.sgi.com/tech/stl/Allocators.html
|
46 |
-
*/
|
47 |
-
template<typename T>
|
48 |
-
class device_new_allocator
|
49 |
-
{
|
50 |
-
public:
|
51 |
-
/*! Type of element allocated, \c T. */
|
52 |
-
typedef T value_type;
|
53 |
-
|
54 |
-
/*! Pointer to allocation, \c device_ptr<T>. */
|
55 |
-
typedef device_ptr<T> pointer;
|
56 |
-
|
57 |
-
/*! \c const pointer to allocation, \c device_ptr<const T>. */
|
58 |
-
typedef device_ptr<const T> const_pointer;
|
59 |
-
|
60 |
-
/*! Reference to allocated element, \c device_reference<T>. */
|
61 |
-
typedef device_reference<T> reference;
|
62 |
-
|
63 |
-
/*! \c const reference to allocated element, \c device_reference<const T>. */
|
64 |
-
typedef device_reference<const T> const_reference;
|
65 |
-
|
66 |
-
/*! Type of allocation size, \c std::size_t. */
|
67 |
-
typedef std::size_t size_type;
|
68 |
-
|
69 |
-
/*! Type of allocation difference, \c pointer::difference_type. */
|
70 |
-
typedef typename pointer::difference_type difference_type;
|
71 |
-
|
72 |
-
/*! The \p rebind metafunction provides the type of a \p device_new_allocator
|
73 |
-
* instantiated with another type.
|
74 |
-
*
|
75 |
-
* \tparam U The other type to use for instantiation.
|
76 |
-
*/
|
77 |
-
template<typename U>
|
78 |
-
struct rebind
|
79 |
-
{
|
80 |
-
/*! The typedef \p other gives the type of the rebound \p device_new_allocator.
|
81 |
-
*/
|
82 |
-
typedef device_new_allocator<U> other;
|
83 |
-
}; // end rebind
|
84 |
-
|
85 |
-
/*! No-argument constructor has no effect. */
|
86 |
-
__host__ __device__
|
87 |
-
inline device_new_allocator() {}
|
88 |
-
|
89 |
-
/*! No-argument destructor has no effect. */
|
90 |
-
__host__ __device__
|
91 |
-
inline ~device_new_allocator() {}
|
92 |
-
|
93 |
-
/*! Copy constructor has no effect. */
|
94 |
-
__host__ __device__
|
95 |
-
inline device_new_allocator(device_new_allocator const&) {}
|
96 |
-
|
97 |
-
/*! Constructor from other \p device_malloc_allocator has no effect. */
|
98 |
-
template<typename U>
|
99 |
-
__host__ __device__
|
100 |
-
inline device_new_allocator(device_new_allocator<U> const&) {}
|
101 |
-
|
102 |
-
/*! Returns the address of an allocated object.
|
103 |
-
* \return <tt>&r</tt>.
|
104 |
-
*/
|
105 |
-
__host__ __device__
|
106 |
-
inline pointer address(reference r) { return &r; }
|
107 |
-
|
108 |
-
/*! Returns the address an allocated object.
|
109 |
-
* \return <tt>&r</tt>.
|
110 |
-
*/
|
111 |
-
__host__ __device__
|
112 |
-
inline const_pointer address(const_reference r) { return &r; }
|
113 |
-
|
114 |
-
/*! Allocates storage for \p cnt objects.
|
115 |
-
* \param cnt The number of objects to allocate.
|
116 |
-
* \return A \p pointer to uninitialized storage for \p cnt objects.
|
117 |
-
* \note Memory allocated by this function must be deallocated with \p deallocate.
|
118 |
-
*/
|
119 |
-
__host__
|
120 |
-
inline pointer allocate(size_type cnt,
|
121 |
-
const_pointer = const_pointer(static_cast<T*>(0)))
|
122 |
-
{
|
123 |
-
if(cnt > this->max_size())
|
124 |
-
{
|
125 |
-
throw std::bad_alloc();
|
126 |
-
} // end if
|
127 |
-
|
128 |
-
// use "::operator new" rather than keyword new
|
129 |
-
return pointer(device_new<T>(cnt));
|
130 |
-
} // end allocate()
|
131 |
-
|
132 |
-
/*! Deallocates storage for objects allocated with \p allocate.
|
133 |
-
* \param p A \p pointer to the storage to deallocate.
|
134 |
-
* \param cnt The size of the previous allocation.
|
135 |
-
* \note Memory deallocated by this function must previously have been
|
136 |
-
* allocated with \p allocate.
|
137 |
-
*/
|
138 |
-
__host__
|
139 |
-
inline void deallocate(pointer p, size_type cnt)
|
140 |
-
{
|
141 |
-
// use "::operator delete" rather than keyword delete
|
142 |
-
(void)cnt;
|
143 |
-
device_delete(p);
|
144 |
-
} // end deallocate()
|
145 |
-
|
146 |
-
/*! Returns the largest value \c n for which <tt>allocate(n)</tt> might succeed.
|
147 |
-
* \return The largest value \c n for which <tt>allocate(n)</tt> might succeed.
|
148 |
-
*/
|
149 |
-
__host__ __device__
|
150 |
-
inline size_type max_size() const
|
151 |
-
{
|
152 |
-
return std::numeric_limits<size_type>::max THRUST_PREVENT_MACRO_SUBSTITUTION () / sizeof(T);
|
153 |
-
} // end max_size()
|
154 |
-
|
155 |
-
/*! Compares against another \p device_malloc_allocator for equality.
|
156 |
-
* \return \c true
|
157 |
-
*/
|
158 |
-
__host__ __device__
|
159 |
-
inline bool operator==(device_new_allocator const&) { return true; }
|
160 |
-
|
161 |
-
/*! Compares against another \p device_malloc_allocator for inequality.
|
162 |
-
* \return \c false
|
163 |
-
*/
|
164 |
-
__host__ __device__
|
165 |
-
inline bool operator!=(device_new_allocator const &a) {return !operator==(a); }
|
166 |
-
}; // end device_new_allocator
|
167 |
-
|
168 |
-
/*! \}
|
169 |
-
*/
|
170 |
-
|
171 |
-
} // end thrust
|
172 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/count.h
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a count of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// the purpose of this header is to #include the count.h header
|
22 |
-
// of the sequential, host, and device systems. It should be #included in any
|
23 |
-
// code which uses adl to dispatch count
|
24 |
-
|
25 |
-
#include <thrust/system/detail/sequential/count.h>
|
26 |
-
|
27 |
-
// SCons can't see through the #defines below to figure out what this header
|
28 |
-
// includes, so we fake it out by specifying all possible files we might end up
|
29 |
-
// including inside an #if 0.
|
30 |
-
#if 0
|
31 |
-
#include <thrust/system/cpp/detail/count.h>
|
32 |
-
#include <thrust/system/cuda/detail/count.h>
|
33 |
-
#include <thrust/system/omp/detail/count.h>
|
34 |
-
#include <thrust/system/tbb/detail/count.h>
|
35 |
-
#endif
|
36 |
-
|
37 |
-
#define __THRUST_HOST_SYSTEM_COUNT_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/count.h>
|
38 |
-
#include __THRUST_HOST_SYSTEM_COUNT_HEADER
|
39 |
-
#undef __THRUST_HOST_SYSTEM_COUNT_HEADER
|
40 |
-
|
41 |
-
#define __THRUST_DEVICE_SYSTEM_COUNT_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/count.h>
|
42 |
-
#include __THRUST_DEVICE_SYSTEM_COUNT_HEADER
|
43 |
-
#undef __THRUST_DEVICE_SYSTEM_COUNT_HEADER
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/unique.h
DELETED
@@ -1,78 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
#include <thrust/system/detail/generic/tag.h>
|
21 |
-
|
22 |
-
namespace thrust
|
23 |
-
{
|
24 |
-
namespace system
|
25 |
-
{
|
26 |
-
namespace detail
|
27 |
-
{
|
28 |
-
namespace generic
|
29 |
-
{
|
30 |
-
|
31 |
-
|
32 |
-
template<typename DerivedPolicy,
|
33 |
-
typename ForwardIterator>
|
34 |
-
__host__ __device__
|
35 |
-
ForwardIterator unique(thrust::execution_policy<DerivedPolicy> &exec,
|
36 |
-
ForwardIterator first,
|
37 |
-
ForwardIterator last);
|
38 |
-
|
39 |
-
|
40 |
-
template<typename DerivedPolicy,
|
41 |
-
typename ForwardIterator,
|
42 |
-
typename BinaryPredicate>
|
43 |
-
__host__ __device__
|
44 |
-
ForwardIterator unique(thrust::execution_policy<DerivedPolicy> &exec,
|
45 |
-
ForwardIterator first,
|
46 |
-
ForwardIterator last,
|
47 |
-
BinaryPredicate binary_pred);
|
48 |
-
|
49 |
-
|
50 |
-
template<typename DerivedPolicy,
|
51 |
-
typename InputIterator,
|
52 |
-
typename OutputIterator>
|
53 |
-
__host__ __device__
|
54 |
-
OutputIterator unique_copy(thrust::execution_policy<DerivedPolicy> &exec,
|
55 |
-
InputIterator first,
|
56 |
-
InputIterator last,
|
57 |
-
OutputIterator output);
|
58 |
-
|
59 |
-
|
60 |
-
template<typename DerivedPolicy,
|
61 |
-
typename InputIterator,
|
62 |
-
typename OutputIterator,
|
63 |
-
typename BinaryPredicate>
|
64 |
-
__host__ __device__
|
65 |
-
OutputIterator unique_copy(thrust::execution_policy<DerivedPolicy> &exec,
|
66 |
-
InputIterator first,
|
67 |
-
InputIterator last,
|
68 |
-
OutputIterator output,
|
69 |
-
BinaryPredicate binary_pred);
|
70 |
-
|
71 |
-
|
72 |
-
} // end namespace generic
|
73 |
-
} // end namespace detail
|
74 |
-
} // end namespace system
|
75 |
-
} // end namespace thrust
|
76 |
-
|
77 |
-
#include <thrust/system/detail/generic/unique.inl>
|
78 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/time/README.md
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Time
|
3 |
-
emoji: ⏰
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: green
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.0.17
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
14 |
-
|
15 |
-
This is the demo for It's About Time: Analog Clock Reading in the Wild
|
16 |
-
Charig Yang, Weidi Xie, Andrew Zisserman
|
17 |
-
CVPR 2022
|
18 |
-
|
19 |
-
Project page: https://www.robots.ox.ac.uk/~vgg/research/time/
|
20 |
-
Video: https://www.youtube.com/watch?v=6pYOi92XsGQ
|
21 |
-
|
22 |
-
Note the model takes in cropped image (i.e. we don't run object detector on here).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cecil8352/vits-models/transforms.py
DELETED
@@ -1,193 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch.nn import functional as F
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
|
6 |
-
|
7 |
-
DEFAULT_MIN_BIN_WIDTH = 1e-3
|
8 |
-
DEFAULT_MIN_BIN_HEIGHT = 1e-3
|
9 |
-
DEFAULT_MIN_DERIVATIVE = 1e-3
|
10 |
-
|
11 |
-
|
12 |
-
def piecewise_rational_quadratic_transform(inputs,
|
13 |
-
unnormalized_widths,
|
14 |
-
unnormalized_heights,
|
15 |
-
unnormalized_derivatives,
|
16 |
-
inverse=False,
|
17 |
-
tails=None,
|
18 |
-
tail_bound=1.,
|
19 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
20 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
21 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
22 |
-
|
23 |
-
if tails is None:
|
24 |
-
spline_fn = rational_quadratic_spline
|
25 |
-
spline_kwargs = {}
|
26 |
-
else:
|
27 |
-
spline_fn = unconstrained_rational_quadratic_spline
|
28 |
-
spline_kwargs = {
|
29 |
-
'tails': tails,
|
30 |
-
'tail_bound': tail_bound
|
31 |
-
}
|
32 |
-
|
33 |
-
outputs, logabsdet = spline_fn(
|
34 |
-
inputs=inputs,
|
35 |
-
unnormalized_widths=unnormalized_widths,
|
36 |
-
unnormalized_heights=unnormalized_heights,
|
37 |
-
unnormalized_derivatives=unnormalized_derivatives,
|
38 |
-
inverse=inverse,
|
39 |
-
min_bin_width=min_bin_width,
|
40 |
-
min_bin_height=min_bin_height,
|
41 |
-
min_derivative=min_derivative,
|
42 |
-
**spline_kwargs
|
43 |
-
)
|
44 |
-
return outputs, logabsdet
|
45 |
-
|
46 |
-
|
47 |
-
def searchsorted(bin_locations, inputs, eps=1e-6):
|
48 |
-
bin_locations[..., -1] += eps
|
49 |
-
return torch.sum(
|
50 |
-
inputs[..., None] >= bin_locations,
|
51 |
-
dim=-1
|
52 |
-
) - 1
|
53 |
-
|
54 |
-
|
55 |
-
def unconstrained_rational_quadratic_spline(inputs,
|
56 |
-
unnormalized_widths,
|
57 |
-
unnormalized_heights,
|
58 |
-
unnormalized_derivatives,
|
59 |
-
inverse=False,
|
60 |
-
tails='linear',
|
61 |
-
tail_bound=1.,
|
62 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
63 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
64 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
65 |
-
inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
|
66 |
-
outside_interval_mask = ~inside_interval_mask
|
67 |
-
|
68 |
-
outputs = torch.zeros_like(inputs)
|
69 |
-
logabsdet = torch.zeros_like(inputs)
|
70 |
-
|
71 |
-
if tails == 'linear':
|
72 |
-
unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
|
73 |
-
constant = np.log(np.exp(1 - min_derivative) - 1)
|
74 |
-
unnormalized_derivatives[..., 0] = constant
|
75 |
-
unnormalized_derivatives[..., -1] = constant
|
76 |
-
|
77 |
-
outputs[outside_interval_mask] = inputs[outside_interval_mask]
|
78 |
-
logabsdet[outside_interval_mask] = 0
|
79 |
-
else:
|
80 |
-
raise RuntimeError('{} tails are not implemented.'.format(tails))
|
81 |
-
|
82 |
-
outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
|
83 |
-
inputs=inputs[inside_interval_mask],
|
84 |
-
unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
|
85 |
-
unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
|
86 |
-
unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
|
87 |
-
inverse=inverse,
|
88 |
-
left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
|
89 |
-
min_bin_width=min_bin_width,
|
90 |
-
min_bin_height=min_bin_height,
|
91 |
-
min_derivative=min_derivative
|
92 |
-
)
|
93 |
-
|
94 |
-
return outputs, logabsdet
|
95 |
-
|
96 |
-
def rational_quadratic_spline(inputs,
|
97 |
-
unnormalized_widths,
|
98 |
-
unnormalized_heights,
|
99 |
-
unnormalized_derivatives,
|
100 |
-
inverse=False,
|
101 |
-
left=0., right=1., bottom=0., top=1.,
|
102 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
103 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
104 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
105 |
-
if torch.min(inputs) < left or torch.max(inputs) > right:
|
106 |
-
raise ValueError('Input to a transform is not within its domain')
|
107 |
-
|
108 |
-
num_bins = unnormalized_widths.shape[-1]
|
109 |
-
|
110 |
-
if min_bin_width * num_bins > 1.0:
|
111 |
-
raise ValueError('Minimal bin width too large for the number of bins')
|
112 |
-
if min_bin_height * num_bins > 1.0:
|
113 |
-
raise ValueError('Minimal bin height too large for the number of bins')
|
114 |
-
|
115 |
-
widths = F.softmax(unnormalized_widths, dim=-1)
|
116 |
-
widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
|
117 |
-
cumwidths = torch.cumsum(widths, dim=-1)
|
118 |
-
cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
|
119 |
-
cumwidths = (right - left) * cumwidths + left
|
120 |
-
cumwidths[..., 0] = left
|
121 |
-
cumwidths[..., -1] = right
|
122 |
-
widths = cumwidths[..., 1:] - cumwidths[..., :-1]
|
123 |
-
|
124 |
-
derivatives = min_derivative + F.softplus(unnormalized_derivatives)
|
125 |
-
|
126 |
-
heights = F.softmax(unnormalized_heights, dim=-1)
|
127 |
-
heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
|
128 |
-
cumheights = torch.cumsum(heights, dim=-1)
|
129 |
-
cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
|
130 |
-
cumheights = (top - bottom) * cumheights + bottom
|
131 |
-
cumheights[..., 0] = bottom
|
132 |
-
cumheights[..., -1] = top
|
133 |
-
heights = cumheights[..., 1:] - cumheights[..., :-1]
|
134 |
-
|
135 |
-
if inverse:
|
136 |
-
bin_idx = searchsorted(cumheights, inputs)[..., None]
|
137 |
-
else:
|
138 |
-
bin_idx = searchsorted(cumwidths, inputs)[..., None]
|
139 |
-
|
140 |
-
input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
|
141 |
-
input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
|
142 |
-
|
143 |
-
input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
|
144 |
-
delta = heights / widths
|
145 |
-
input_delta = delta.gather(-1, bin_idx)[..., 0]
|
146 |
-
|
147 |
-
input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
|
148 |
-
input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
|
149 |
-
|
150 |
-
input_heights = heights.gather(-1, bin_idx)[..., 0]
|
151 |
-
|
152 |
-
if inverse:
|
153 |
-
a = (((inputs - input_cumheights) * (input_derivatives
|
154 |
-
+ input_derivatives_plus_one
|
155 |
-
- 2 * input_delta)
|
156 |
-
+ input_heights * (input_delta - input_derivatives)))
|
157 |
-
b = (input_heights * input_derivatives
|
158 |
-
- (inputs - input_cumheights) * (input_derivatives
|
159 |
-
+ input_derivatives_plus_one
|
160 |
-
- 2 * input_delta))
|
161 |
-
c = - input_delta * (inputs - input_cumheights)
|
162 |
-
|
163 |
-
discriminant = b.pow(2) - 4 * a * c
|
164 |
-
assert (discriminant >= 0).all()
|
165 |
-
|
166 |
-
root = (2 * c) / (-b - torch.sqrt(discriminant))
|
167 |
-
outputs = root * input_bin_widths + input_cumwidths
|
168 |
-
|
169 |
-
theta_one_minus_theta = root * (1 - root)
|
170 |
-
denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
171 |
-
* theta_one_minus_theta)
|
172 |
-
derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
|
173 |
-
+ 2 * input_delta * theta_one_minus_theta
|
174 |
-
+ input_derivatives * (1 - root).pow(2))
|
175 |
-
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
176 |
-
|
177 |
-
return outputs, -logabsdet
|
178 |
-
else:
|
179 |
-
theta = (inputs - input_cumwidths) / input_bin_widths
|
180 |
-
theta_one_minus_theta = theta * (1 - theta)
|
181 |
-
|
182 |
-
numerator = input_heights * (input_delta * theta.pow(2)
|
183 |
-
+ input_derivatives * theta_one_minus_theta)
|
184 |
-
denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
185 |
-
* theta_one_minus_theta)
|
186 |
-
outputs = input_cumheights + numerator / denominator
|
187 |
-
|
188 |
-
derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
|
189 |
-
+ 2 * input_delta * theta_one_minus_theta
|
190 |
-
+ input_derivatives * (1 - theta).pow(2))
|
191 |
-
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
192 |
-
|
193 |
-
return outputs, logabsdet
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Chomkwoy/Nilkessye/cpool_new/src/top_pool.cpp
DELETED
@@ -1,91 +0,0 @@
|
|
1 |
-
// #include <torch/torch.h>
|
2 |
-
#include <torch/extension.h>
|
3 |
-
|
4 |
-
#include <vector>
|
5 |
-
|
6 |
-
std::vector<torch::Tensor> top_pool_forward(
|
7 |
-
torch::Tensor input
|
8 |
-
) {
|
9 |
-
// Initialize output
|
10 |
-
torch::Tensor output = torch::zeros_like(input);
|
11 |
-
|
12 |
-
// Get height
|
13 |
-
int64_t height = input.size(2);
|
14 |
-
|
15 |
-
// Copy the last column
|
16 |
-
torch::Tensor input_temp = input.select(2, height - 1);
|
17 |
-
torch::Tensor output_temp = output.select(2, height - 1);
|
18 |
-
output_temp.copy_(input_temp);
|
19 |
-
|
20 |
-
torch::Tensor max_temp;
|
21 |
-
for (int64_t ind = 1; ind < height; ++ind) {
|
22 |
-
input_temp = input.select(2, height - ind - 1);
|
23 |
-
output_temp = output.select(2, height - ind);
|
24 |
-
max_temp = output.select(2, height - ind - 1);
|
25 |
-
|
26 |
-
torch::max_out(max_temp, input_temp, output_temp);
|
27 |
-
}
|
28 |
-
|
29 |
-
return {
|
30 |
-
output
|
31 |
-
};
|
32 |
-
}
|
33 |
-
|
34 |
-
std::vector<torch::Tensor> top_pool_backward(
|
35 |
-
torch::Tensor input,
|
36 |
-
torch::Tensor grad_output
|
37 |
-
) {
|
38 |
-
auto output = torch::zeros_like(input);
|
39 |
-
|
40 |
-
int32_t batch = input.size(0);
|
41 |
-
int32_t channel = input.size(1);
|
42 |
-
int32_t height = input.size(2);
|
43 |
-
int32_t width = input.size(3);
|
44 |
-
|
45 |
-
// auto max_val = torch::zeros(torch::CUDA(torch::kFloat), {batch, channel, width});
|
46 |
-
// auto max_ind = torch::zeros(torch::CUDA(torch::kLong), {batch, channel, width});
|
47 |
-
auto max_val = torch::zeros({batch, channel, width}, torch::TensorOptions().dtype(torch::kFloat).device(torch::kCUDA));
|
48 |
-
auto max_ind = torch::zeros({batch, channel, width}, torch::TensorOptions().dtype(torch::kLong).device(torch::kCUDA));
|
49 |
-
|
50 |
-
auto input_temp = input.select(2, height - 1);
|
51 |
-
max_val.copy_(input_temp);
|
52 |
-
|
53 |
-
max_ind.fill_(height - 1);
|
54 |
-
|
55 |
-
auto output_temp = output.select(2, height - 1);
|
56 |
-
auto grad_output_temp = grad_output.select(2, height - 1);
|
57 |
-
output_temp.copy_(grad_output_temp);
|
58 |
-
|
59 |
-
auto un_max_ind = max_ind.unsqueeze(2);
|
60 |
-
// auto gt_mask = torch::zeros(torch::CUDA(torch::kByte), {batch, channel, width});
|
61 |
-
// auto max_temp = torch::zeros(torch::CUDA(torch::kFloat), {batch, channel, width});
|
62 |
-
auto gt_mask = torch::zeros({batch, channel, width}, torch::TensorOptions().dtype(torch::kByte).device(torch::kCUDA));
|
63 |
-
auto max_temp = torch::zeros({batch, channel, width}, torch::TensorOptions().dtype(torch::kFloat).device(torch::kCUDA));
|
64 |
-
|
65 |
-
for (int32_t ind = 1; ind < height; ++ind) {
|
66 |
-
input_temp = input.select(2, height - ind - 1);
|
67 |
-
torch::gt_out(gt_mask, input_temp, max_val);
|
68 |
-
|
69 |
-
torch::masked_select_out(max_temp, input_temp, gt_mask);
|
70 |
-
max_val.masked_scatter_(gt_mask, max_temp);
|
71 |
-
max_ind.masked_fill_(gt_mask, height - ind - 1);
|
72 |
-
|
73 |
-
grad_output_temp = grad_output.select(2, height - ind - 1).unsqueeze(2);
|
74 |
-
output.scatter_add_(2, un_max_ind, grad_output_temp);
|
75 |
-
}
|
76 |
-
|
77 |
-
return {
|
78 |
-
output
|
79 |
-
};
|
80 |
-
}
|
81 |
-
|
82 |
-
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
83 |
-
m.def(
|
84 |
-
"forward", &top_pool_forward, "Top Pool Forward",
|
85 |
-
py::call_guard<py::gil_scoped_release>()
|
86 |
-
);
|
87 |
-
m.def(
|
88 |
-
"backward", &top_pool_backward, "Top Pool Backward",
|
89 |
-
py::call_guard<py::gil_scoped_release>()
|
90 |
-
);
|
91 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CofAI/CalculatorUI/README.md
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: CalculatorUI
|
3 |
-
emoji: ➕️➖️✖️➗️
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: gray
|
6 |
-
sdk: static
|
7 |
-
pinned: false
|
8 |
-
---
|
9 |
-
|
10 |
-
Это UI модель калькулятора от CofAI, можете копировать и дорабатывать её, мы не против, даже можете зарабатывать на ней, спасибо!
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CofAI/picgen/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: PicGen
|
3 |
-
emoji: 🖼☕🖼
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: green
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.12.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: creativeml-openrail-m
|
11 |
-
duplicated_from: null
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/logger.py
DELETED
@@ -1,133 +0,0 @@
|
|
1 |
-
# A simple torch style logger
|
2 |
-
# (C) Wei YANG 2017
|
3 |
-
from __future__ import absolute_import
|
4 |
-
# import matplotlib.pyplot as plt
|
5 |
-
import matplotlib
|
6 |
-
matplotlib.use('pdf')
|
7 |
-
import matplotlib.pyplot as plt
|
8 |
-
import os
|
9 |
-
import sys
|
10 |
-
import numpy as np
|
11 |
-
|
12 |
-
__all__ = ['Logger', 'LoggerMonitor', 'savefig']
|
13 |
-
|
14 |
-
def savefig(fname, dpi=None):
|
15 |
-
dpi = 150 if dpi == None else dpi
|
16 |
-
plt.savefig(fname, dpi=dpi)
|
17 |
-
|
18 |
-
def plot_overlap(logger, names=None):
|
19 |
-
names = logger.names if names == None else names
|
20 |
-
numbers = logger.numbers
|
21 |
-
for _, name in enumerate(names):
|
22 |
-
x = np.arange(len(numbers[name]))
|
23 |
-
plt.plot(x, np.asarray(numbers[name]))
|
24 |
-
return [logger.title + '(' + name + ')' for name in names]
|
25 |
-
|
26 |
-
class Logger(object):
|
27 |
-
'''Save training process to log file with simple plot function.'''
|
28 |
-
def __init__(self, fpath, title=None, resume=False):
|
29 |
-
self.file = None
|
30 |
-
self.resume = resume
|
31 |
-
self.title = '' if title == None else title
|
32 |
-
if fpath is not None:
|
33 |
-
if resume:
|
34 |
-
self.file = open(fpath, 'r')
|
35 |
-
name = self.file.readline()
|
36 |
-
self.names = name.rstrip().split('\t')
|
37 |
-
self.numbers = {}
|
38 |
-
for _, name in enumerate(self.names):
|
39 |
-
self.numbers[name] = []
|
40 |
-
|
41 |
-
for numbers in self.file:
|
42 |
-
numbers = numbers.rstrip().split('\t')
|
43 |
-
for i in range(0, len(numbers)):
|
44 |
-
self.numbers[self.names[i]].append(numbers[i])
|
45 |
-
self.file.close()
|
46 |
-
self.file = open(fpath, 'a')
|
47 |
-
else:
|
48 |
-
self.file = open(fpath, 'w')
|
49 |
-
|
50 |
-
def set_names(self, names):
|
51 |
-
if self.resume:
|
52 |
-
pass
|
53 |
-
# initialize numbers as empty list
|
54 |
-
self.numbers = {}
|
55 |
-
self.names = names
|
56 |
-
for _, name in enumerate(self.names):
|
57 |
-
self.file.write(name)
|
58 |
-
self.file.write('\t')
|
59 |
-
self.numbers[name] = []
|
60 |
-
self.file.write('\n')
|
61 |
-
self.file.flush()
|
62 |
-
|
63 |
-
|
64 |
-
def append(self, numbers):
|
65 |
-
assert len(self.names) == len(numbers), 'Numbers do not match names'
|
66 |
-
for index, num in enumerate(numbers):
|
67 |
-
self.file.write("{0:.6f}".format(num))
|
68 |
-
self.file.write('\t')
|
69 |
-
self.numbers[self.names[index]].append(num)
|
70 |
-
self.file.write('\n')
|
71 |
-
self.file.flush()
|
72 |
-
|
73 |
-
def plot(self, names=None):
|
74 |
-
print 'plot'
|
75 |
-
'''
|
76 |
-
names = self.names if names == None else names
|
77 |
-
numbers = self.numbers
|
78 |
-
for _, name in enumerate(names):
|
79 |
-
x = np.arange(len(numbers[name]))
|
80 |
-
plt.plot(x, np.asarray(numbers[name]))
|
81 |
-
plt.legend([self.title + '(' + name + ')' for name in names])
|
82 |
-
plt.grid(True)
|
83 |
-
'''
|
84 |
-
|
85 |
-
def close(self):
|
86 |
-
if self.file is not None:
|
87 |
-
self.file.close()
|
88 |
-
|
89 |
-
class LoggerMonitor(object):
|
90 |
-
'''Load and visualize multiple logs.'''
|
91 |
-
def __init__ (self, paths):
|
92 |
-
'''paths is a distionary with {name:filepath} pair'''
|
93 |
-
self.loggers = []
|
94 |
-
for title, path in paths.items():
|
95 |
-
logger = Logger(path, title=title, resume=True)
|
96 |
-
self.loggers.append(logger)
|
97 |
-
|
98 |
-
def plot(self, names=None):
|
99 |
-
plt.figure()
|
100 |
-
plt.subplot(121)
|
101 |
-
legend_text = []
|
102 |
-
for logger in self.loggers:
|
103 |
-
legend_text += plot_overlap(logger, names)
|
104 |
-
plt.legend(legend_text, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
|
105 |
-
plt.grid(True)
|
106 |
-
|
107 |
-
if __name__ == '__main__':
|
108 |
-
# # Example
|
109 |
-
# logger = Logger('test.txt')
|
110 |
-
# logger.set_names(['Train loss', 'Valid loss','Test loss'])
|
111 |
-
|
112 |
-
# length = 100
|
113 |
-
# t = np.arange(length)
|
114 |
-
# train_loss = np.exp(-t / 10.0) + np.random.rand(length) * 0.1
|
115 |
-
# valid_loss = np.exp(-t / 10.0) + np.random.rand(length) * 0.1
|
116 |
-
# test_loss = np.exp(-t / 10.0) + np.random.rand(length) * 0.1
|
117 |
-
|
118 |
-
# for i in range(0, length):
|
119 |
-
# logger.append([train_loss[i], valid_loss[i], test_loss[i]])
|
120 |
-
# logger.plot()
|
121 |
-
|
122 |
-
# Example: logger monitor
|
123 |
-
paths = {
|
124 |
-
'resadvnet20':'/home/wyang/code/pytorch-classification/checkpoint/cifar10/resadvnet20/log.txt',
|
125 |
-
'resadvnet32':'/home/wyang/code/pytorch-classification/checkpoint/cifar10/resadvnet32/log.txt',
|
126 |
-
'resadvnet44':'/home/wyang/code/pytorch-classification/checkpoint/cifar10/resadvnet44/log.txt',
|
127 |
-
}
|
128 |
-
|
129 |
-
field = ['Valid Acc.']
|
130 |
-
|
131 |
-
monitor = LoggerMonitor(paths)
|
132 |
-
monitor.plot(names=field)
|
133 |
-
savefig('test.eps')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|