Commit
·
9bb6253
1
Parent(s):
6488b78
Update parquet files (step 99 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/101-5/gpt4free/app.py +0 -172
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/ArcGIS 10.8 Full Crack Kuyhaa - A Powerful and Easy-to-Use GIS Software for Your PC.md +0 -36
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Neighbours From Hell 6 Free and Make Your Neighbours Lives Miserable.md +0 -106
- spaces/1gistliPinn/ChatGPT4/Examples/AIRAC Cycle 1210 (complete) [FSX FS9 X-Plane] Demol.md +0 -60
- spaces/1gistliPinn/ChatGPT4/Examples/Always Kabhi Kabhi Of Love Movie With English Subtitles Free Download BETTER.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Bommarillu Hindi Dubbed Movie 136 A Heartwarming Tale of Love and Family.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Driver Tally T5040 For Windows 10 64-bit Free BETTER.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Enter Macbeth Movie Download In Hd Watch the Shakespearean Tragedy on Your Screen.md +0 -7
- spaces/1line/AutoGPT/run.bat +0 -8
- spaces/1pelhydcardo/ChatGPT-prompt-generator/Phir Hera Pheri Movie In Hindi Download Kickass [VERIFIED].md +0 -78
- spaces/1phancelerku/anime-remove-background/Car Stunt 3D Extreme City - MOD APK with Unlimited Money and Features.md +0 -98
- spaces/1phancelerku/anime-remove-background/Download Candy Crush Saga and Discover Thousands of Levels and Challenges on Your iPhone or iPad.md +0 -180
- spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_ddpm.py +0 -360
- spaces/2ndelement/voicevox/test/test_synthesis_engine.py +0 -654
- spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/utils/utils_callbacks.py +0 -117
- spaces/7hao/bingo/README.md +0 -195
- spaces/AIConsultant/MusicGen/audiocraft/losses/specloss.py +0 -149
- spaces/AIGC-Audio/Make_An_Audio/vocoder/bigvgan/alias_free_torch/resample.py +0 -49
- spaces/AIGText/GlyphControl/ldm/modules/midas/midas/__init__.py +0 -0
- spaces/AIGText/GlyphControl/ldm/modules/midas/midas/dpt_depth.py +0 -109
- spaces/AIWaves/Debate/src/agents/Component/ExtraComponent.py +0 -128
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/custom_cfg.py +0 -10
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Bard.py +0 -74
- spaces/Adapting/YouTube-Downloader/tube/var.py +0 -4
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/radio/Factory.d.ts +0 -6
- spaces/Amrrs/DragGan-Inversion/PTI/dnnlib/util.py +0 -477
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/audioldm/__init__.py +0 -17
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py +0 -128
- spaces/Andy1621/uniformer_image_detection/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco.py +0 -13
- spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/nasfcos.py +0 -20
- spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/single_stage.py +0 -154
- spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/two_stage.py +0 -215
- spaces/Andyrasika/xlm-roberta-base-finetuned-panx-de/app.py +0 -3
- spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/resample.py +0 -154
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/bricks/depthwise_separable_conv_module.py +0 -96
- spaces/Antonpy/stable-diffusion-license/index.html +0 -0
- spaces/ArtGAN/Diffusion-API/diffusion_webui/diffusion_models/__init__.py +0 -0
- spaces/Artrajz/vits-simple-api/static/js/bootstrap.bundle.min.js +0 -7
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/distlib/scripts.py +0 -437
- spaces/AyameYODAYO/xijinpingx/index.html +0 -24
- spaces/Benson/text-generation/Examples/Descargar Caramelo Crush Saga Para Windows Pc 7.md +0 -45
- spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/resources/action.py +0 -257
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/req/req_set.py +0 -82
- spaces/Boadiwaa/Recipes/openai/api_resources/abstract/api_resource.py +0 -117
- spaces/Bonosa2/movies/app3.py +0 -82
- spaces/BorisovMaksim/denoising/Dockerfile +0 -14
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/meta_arch/rcnn.py +0 -282
- spaces/CVPR/Text2Human/Text2Human/data/mask_dataset.py +0 -59
- spaces/ChallengeHub/Chinese-LangChain/clc/gpt_service.py +0 -62
- spaces/Codecooker/rvcapi/src/vc_infer_pipeline.py +0 -653
spaces/101-5/gpt4free/app.py
DELETED
@@ -1,172 +0,0 @@
|
|
1 |
-
import g4f
|
2 |
-
import gradio as gr
|
3 |
-
from g4f.Provider import (
|
4 |
-
Ails,
|
5 |
-
You,
|
6 |
-
Bing,
|
7 |
-
Yqcloud,
|
8 |
-
Theb,
|
9 |
-
Aichat,
|
10 |
-
Bard,
|
11 |
-
Vercel,
|
12 |
-
Forefront,
|
13 |
-
Lockchat,
|
14 |
-
Liaobots,
|
15 |
-
H2o,
|
16 |
-
ChatgptLogin,
|
17 |
-
DeepAi,
|
18 |
-
GetGpt
|
19 |
-
)
|
20 |
-
import os
|
21 |
-
import json
|
22 |
-
import pandas as pd
|
23 |
-
|
24 |
-
from models_for_langchain.model import CustomLLM
|
25 |
-
from langchain.memory import ConversationBufferWindowMemory, ConversationTokenBufferMemory
|
26 |
-
from langchain import LLMChain, PromptTemplate
|
27 |
-
from langchain.prompts import (
|
28 |
-
ChatPromptTemplate,
|
29 |
-
PromptTemplate,
|
30 |
-
SystemMessagePromptTemplate,
|
31 |
-
AIMessagePromptTemplate,
|
32 |
-
HumanMessagePromptTemplate,
|
33 |
-
)
|
34 |
-
|
35 |
-
provider_dict = {
|
36 |
-
'Ails': Ails,
|
37 |
-
'You': You,
|
38 |
-
'Bing': Bing,
|
39 |
-
'Yqcloud': Yqcloud,
|
40 |
-
'Theb': Theb,
|
41 |
-
'Aichat': Aichat,
|
42 |
-
'Bard': Bard,
|
43 |
-
'Vercel': Vercel,
|
44 |
-
'Forefront': Forefront,
|
45 |
-
'Lockchat': Lockchat,
|
46 |
-
'Liaobots': Liaobots,
|
47 |
-
'H2o': H2o,
|
48 |
-
'ChatgptLogin': ChatgptLogin,
|
49 |
-
'DeepAi': DeepAi,
|
50 |
-
'GetGpt': GetGpt
|
51 |
-
}
|
52 |
-
|
53 |
-
prompt_set_list = {}
|
54 |
-
for prompt_file in os.listdir("prompt_set"):
|
55 |
-
key = prompt_file
|
56 |
-
if '.csv' in key:
|
57 |
-
df = pd.read_csv("prompt_set/" + prompt_file)
|
58 |
-
prompt_dict = dict(zip(df['act'], df['prompt']))
|
59 |
-
else:
|
60 |
-
with open("prompt_set/" + prompt_file, encoding='utf-8') as f:
|
61 |
-
ds = json.load(f)
|
62 |
-
prompt_dict = {item["act"]: item["prompt"] for item in ds}
|
63 |
-
prompt_set_list[key] = prompt_dict
|
64 |
-
|
65 |
-
with gr.Blocks() as demo:
|
66 |
-
llm = CustomLLM()
|
67 |
-
|
68 |
-
template = """
|
69 |
-
Chat with human based on following instructions:
|
70 |
-
```
|
71 |
-
{system_instruction}
|
72 |
-
```
|
73 |
-
The following is a conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
|
74 |
-
{{chat_history}}
|
75 |
-
Human: {{human_input}}
|
76 |
-
Chatbot:"""
|
77 |
-
|
78 |
-
memory = ConversationBufferWindowMemory(k=10, memory_key="chat_history")
|
79 |
-
|
80 |
-
chatbot = gr.Chatbot([], label='AI')
|
81 |
-
msg = gr.Textbox(value="", label='请输入:')
|
82 |
-
with gr.Row():
|
83 |
-
clear = gr.Button("清空对话", scale=2)
|
84 |
-
chat_mode = gr.Checkbox(value=True, label='聊天模式', interactive=True, scale=1)
|
85 |
-
system_msg = gr.Textbox(value="你是一名助手,可以解答问题。", label='系统提示')
|
86 |
-
with gr.Row():
|
87 |
-
default_prompt_set = "1 中文提示词.json"
|
88 |
-
prompt_set_name = gr.Dropdown(prompt_set_list.keys(), value=default_prompt_set, label='提示词集合')
|
89 |
-
prompt_name = gr.Dropdown(prompt_set_list[default_prompt_set].keys(), label='提示词', min_width=20)
|
90 |
-
with gr.Row():
|
91 |
-
model_name = gr.Dropdown(['gpt-3.5-turbo', 'gpt-4'], value='gpt-3.5-turbo', label='模型')
|
92 |
-
provider_name = gr.Dropdown(provider_dict.keys(), value='GetGpt', label='提供者', min_width=20)
|
93 |
-
|
94 |
-
def change_prompt_set(prompt_set_name):
|
95 |
-
return gr.Dropdown.update(choices=list(prompt_set_list[prompt_set_name].keys()))
|
96 |
-
|
97 |
-
def change_prompt(prompt_set_name, prompt_name):
|
98 |
-
return gr.update(value=prompt_set_list[prompt_set_name][prompt_name])
|
99 |
-
|
100 |
-
def user(user_message, history = []):
|
101 |
-
return gr.update(value="", interactive=False), history + [[user_message, None]]
|
102 |
-
|
103 |
-
def bot(history, model_name, provider_name, system_msg, chat_mode):
|
104 |
-
history[-1][1] = ''
|
105 |
-
if len(system_msg)>3000:
|
106 |
-
system_msg = system_msg[:2000] + system_msg[-1000:]
|
107 |
-
|
108 |
-
if not chat_mode:
|
109 |
-
global template, memory
|
110 |
-
llm.model_name = model_name
|
111 |
-
llm.provider_name = provider_name
|
112 |
-
prompt = PromptTemplate(
|
113 |
-
input_variables=["chat_history", "human_input"], template=template.format(system_instruction=system_msg)
|
114 |
-
)
|
115 |
-
llm_chain = LLMChain(
|
116 |
-
llm=llm,
|
117 |
-
prompt=prompt,
|
118 |
-
verbose=False,
|
119 |
-
memory=memory,
|
120 |
-
)
|
121 |
-
bot_msg = llm_chain.run(history[-1][0])
|
122 |
-
for c in bot_msg:
|
123 |
-
history[-1][1] += c
|
124 |
-
yield history
|
125 |
-
else:
|
126 |
-
prompt = """
|
127 |
-
请你仔细阅读以下提示,然后针对用户的话进行回答。
|
128 |
-
提示:
|
129 |
-
```
|
130 |
-
{}
|
131 |
-
```
|
132 |
-
用户最新的话:
|
133 |
-
```
|
134 |
-
{}
|
135 |
-
```
|
136 |
-
请回答:
|
137 |
-
"""
|
138 |
-
|
139 |
-
# print(history)
|
140 |
-
messages = []
|
141 |
-
for user_message, assistant_message in history[:-1]:
|
142 |
-
messages.append({"role": "user", "content": user_message})
|
143 |
-
messages.append({"role": "assistant", "content": assistant_message})
|
144 |
-
messages.append({"role": "user", "content": history[-1][0]})
|
145 |
-
# print(messages)
|
146 |
-
|
147 |
-
bot_msg = g4f.ChatCompletion.create(
|
148 |
-
model=model_name,
|
149 |
-
provider=provider_dict[provider_name],
|
150 |
-
messages=messages,
|
151 |
-
stream=True)
|
152 |
-
for c in bot_msg:
|
153 |
-
history[-1][1] += c
|
154 |
-
print(c, flush=True, end='')
|
155 |
-
yield history
|
156 |
-
|
157 |
-
def empty_chat():
|
158 |
-
global memory
|
159 |
-
memory = ConversationBufferWindowMemory(k=10, memory_key="chat_history")
|
160 |
-
return None
|
161 |
-
response = msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
162 |
-
bot, [chatbot, model_name, provider_name, system_msg, chat_mode], chatbot
|
163 |
-
)
|
164 |
-
prompt_set_name.select(change_prompt_set, prompt_set_name, prompt_name)
|
165 |
-
prompt_name.select(change_prompt, [prompt_set_name, prompt_name], system_msg)
|
166 |
-
|
167 |
-
response.then(lambda: gr.update(interactive=True), None, [msg], queue=False)
|
168 |
-
clear.click(empty_chat, None, [chatbot], queue=False)
|
169 |
-
|
170 |
-
demo.title = "AI Chat"
|
171 |
-
demo.queue()
|
172 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/ArcGIS 10.8 Full Crack Kuyhaa - A Powerful and Easy-to-Use GIS Software for Your PC.md
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download ArcGIS 10.8 Full Crack Kuyhaa and Use It for GIS Tasks</h1>
|
3 |
-
<p>ArcGIS 10.8 is a popular and widely used geographic information system (GIS) software that allows you to work with maps and spatial data in various forms and formats. With ArcGIS 10.8, you can create, edit, and display spatial data, as well as perform spatial analysis, data management, visualization, and geoprocessing.</p>
|
4 |
-
<p>However, ArcGIS 10.8 is not a free software and requires a license to use it. If you want to use ArcGIS 10.8 for free, you can download the full crack version from Kuyhaa, a website that provides various software and games for free. In this article, we will show you how to download ArcGIS 10.8 full crack Kuyhaa and use it for your GIS tasks.</p>
|
5 |
-
<h2>download arcgis 10.8 full crack kuyhaa</h2><br /><p><b><b>DOWNLOAD</b> ››› <a href="https://byltly.com/2uKyxs">https://byltly.com/2uKyxs</a></b></p><br /><br />
|
6 |
-
<h2>How to Download ArcGIS 10.8 Full Crack Kuyhaa</h2>
|
7 |
-
<p>Downloading ArcGIS 10.8 full crack Kuyhaa is easy and fast. Just follow these steps:</p>
|
8 |
-
<ol>
|
9 |
-
<li>Go to <a href="https://kuyhaa-me.net/esri-arcgis-desktop-full-version/">Kuyhaa's website</a> and search for "ESRI ArcGIS Desktop v10.8.2 Gratis Download [2023]".</li>
|
10 |
-
<li>Click on the download link and wait for the download to finish.</li>
|
11 |
-
<li>Extract the downloaded file using WinRAR or 7-Zip.</li>
|
12 |
-
<li>Run the setup file and follow the instructions to install ArcGIS 10.8 on your computer.</li>
|
13 |
-
<li>Copy the crack file from the crack folder and paste it into the installation folder of ArcGIS 10.8.</li>
|
14 |
-
<li>Run ArcGIS 10.8 and enjoy using it for free.</li>
|
15 |
-
</ol>
|
16 |
-
<h2>How to Use ArcGIS 10.8 for GIS Tasks</h2>
|
17 |
-
<p>Using ArcGIS 10.8 for GIS tasks is fun and easy. Here are some basic steps to get you started:</p>
|
18 |
-
<ol>
|
19 |
-
<li>Import your spatial data, such as shapefiles, geodatabases, CAD files, and more, to ArcGIS 10.8 by clicking on the "Add Data" button or dragging and dropping them to the map window.</li>
|
20 |
-
<li>Drag and drop your spatial data to the table of contents and arrange them in the order you want.</li>
|
21 |
-
<li>Add symbology, labels, legends, scale bars, north arrows, and other elements to your map by using the tools on the toolbar.</li>
|
22 |
-
<li>Edit your spatial data by using the tools on the editor toolbar, such as creating, modifying, deleting, snapping, splitting, merging, and more.</li>
|
23 |
-
<li>Analyze your spatial data by using the tools on the analysis toolbar or the geoprocessing toolbox, such as buffering, intersecting, clipping, overlaying, summarizing, and more.</li>
|
24 |
-
<li>Preview your map by clicking on the "Layout View" button on the bottom left corner of the map window. You can also adjust the page size, orientation, margins, and other settings by using the tools on the layout toolbar.</li>
|
25 |
-
<li>Export your map by clicking on the "File" menu and choosing "Export Map". You can choose your desired format, resolution, quality, and location for your map output.</li>
|
26 |
-
</ol>
|
27 |
-
<h2>Tips and Tricks for Using ArcGIS 10.8</h2>
|
28 |
-
<p>ArcGIS 10.8 has many advanced features that can help you enhance your GIS skills and productivity. Here are some tips and tricks for using ArcGIS 10.8:</p>
|
29 |
-
<ul>
|
30 |
-
<li>Use the "Search" window to find tools, commands, data sources, help topics, and more quickly and easily.</li>
|
31 |
-
<li>Use the "Catalog" window to browse and manage your spatial data sources in a tree view.</li>
|
32 |
-
<li>Use the "ModelBuilder" tool to create graphical models of geoprocessing workflows that can be run repeatedly or shared with others.</li>
|
33 |
-
<li>Use the "Python" window to write and execute Python scripts that can automate geoprocessing tasks or extend ArcGIS functionality.</li>
|
34 |
-
<li>Use the "ArcToolbox" window to access hundreds of geoprocessing tools that are organized into toolboxes and toolsets based on their functionality.</</p> ddb901b051<br />
|
35 |
-
<br />
|
36 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Neighbours From Hell 6 Free and Make Your Neighbours Lives Miserable.md
DELETED
@@ -1,106 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download Neighbours From Hell 6 Free</h1>
|
3 |
-
<p>Have you ever wanted to get back at your annoying neighbour for making your life miserable? If so, you might enjoy playing <strong>Neighbours From Hell 6</strong>, a comedy strategy game that lets you prank your neighbour in various ways. In this article, we will show you how to download Neighbours From Hell 6 for free, give you some tips and tricks for playing it, and review its features, pros and cons.</p>
|
4 |
-
<h2>Download Neighbours From Hell 6 Free</h2><br /><p><b><b>DOWNLOAD</b> »»» <a href="https://byltly.com/2uKz7N">https://byltly.com/2uKz7N</a></b></p><br /><br />
|
5 |
-
<h2>How to download Neighbours From Hell 6 for free</h2>
|
6 |
-
<p>Neighbours From Hell 6 is the sixth installment of the popular Neighbours From Hell series, which was developed by Orion Games and released in 2009. The game is not available on any official platforms, such as Steam or GOG, but you can still download it for free from some unofficial sources. Here are the steps you need to follow:</p>
|
7 |
-
<ol>
|
8 |
-
<li><p>Find a reliable source for the game file. You can search online for websites that offer free downloads of Neighbours From Hell 6, but be careful not to click on any suspicious links or ads that might contain malware or viruses. One of the websites that we recommend is <a href="https://www.youtube.com/watch?v=G4HwB9vXReM">this one</a>, which has a video tutorial on how to get the game file.</p></li>
|
9 |
-
<li><p>Download and install the game file. Once you have found a trustworthy source, you can download the game file, which is usually in a compressed format, such as ZIP or RAR. You will need a program like WinRAR or 7-Zip to extract the file to a folder on your computer. Then, you can run the setup.exe file and follow the instructions to install the game.</p></li>
|
10 |
-
<li><p>Enjoy playing Neighbours From Hell 6. After installing the game, you can launch it from your desktop or start menu and start pranking your neighbour. You can also adjust the settings, such as resolution, sound, and language, according to your preferences.</p></li>
|
11 |
-
</ol>
|
12 |
-
<h2>Tips and tricks for playing Neighbours From Hell 6</h2>
|
13 |
-
<p>Neighbours From Hell 6 is a fun and easy game to play, but it can also be challenging if you want to achieve a high score and unlock all the achievements. Here are some tips and tricks that might help you:</p>
|
14 |
-
<ul>
|
15 |
-
<li><p>Plan your pranks carefully. The game consists of 14 episodes, each with a different setting and a number of pranks that you can perform on your neighbour. You have a limited time to complete each episode, so you need to plan your pranks ahead and execute them in the right order. You can use the eye icon to spy on your neighbour's movements and actions, and use the pause button to think about your next move.</p></li>
|
16 |
-
<li><p>Use the environment to your advantage. The game offers a variety of pranks and items that you can use to annoy your neighbour, such as glue, pepper, soap, scissors, etc. You can also interact with some objects in the environment, such as doors, windows, switches, faucets, etc., to create more chaos. For example, you can turn on the water while your neighbour is showering, or cut off the electricity while he is watching TV.</p></li>
|
17 |
-
<li><p>Watch out for the dog and other obstacles. Your neighbour is not the only one who can ruin your plans. There are also some obstacles that you need to avoid or overcome, such as his dog Chilli, who will chase you if he sees you; his girlfriend Olga, who will slap you if she catches you; or his mother Rottweiler, who will scold him if she finds out what he is doing. You can use some items or tricks to distract them or get rid of them temporarily.</p>
|
18 |
-
<p>Download Neighbours From Hell 6 Origastock<br />
|
19 |
-
Play Neighbours Back From Hell on PC<br />
|
20 |
-
Neighbours Back From Hell Android Emulator<br />
|
21 |
-
Neighbours From Hell 6 Game Free Download<br />
|
22 |
-
Neighbours From Hell 6 Puzzle Game<br />
|
23 |
-
Neighbours From Hell 6 Holiday Locations<br />
|
24 |
-
Neighbours From Hell 6 Prank Your Neighbor<br />
|
25 |
-
Neighbours From Hell 6 TV Show Game<br />
|
26 |
-
Neighbours From Hell 6 HandyGames<br />
|
27 |
-
Neighbours From Hell 6 BlueStacks<br />
|
28 |
-
Download Neighbours From Hell Season 1<br />
|
29 |
-
Neighbours From Hell Season 1 LDPlayer<br />
|
30 |
-
Neighbours From Hell Season 1 PC Game<br />
|
31 |
-
Neighbours From Hell Season 1 Free Download<br />
|
32 |
-
Neighbours From Hell Season 1 Android Game<br />
|
33 |
-
How to Download Neighbours From Hell 6<br />
|
34 |
-
How to Play Neighbours Back From Hell<br />
|
35 |
-
How to Install Neighbours From Hell Season 1<br />
|
36 |
-
How to Prank Your Neighbor in Neighbours From Hell 6<br />
|
37 |
-
How to Win Awards in Neighbours Back From Hell<br />
|
38 |
-
Best Pranks in Neighbours From Hell 6<br />
|
39 |
-
Best Episodes in Neighbours Back From Hell<br />
|
40 |
-
Best Android Emulator for Neighbours From Hell Games<br />
|
41 |
-
Best PC Games Like Neighbours From Hell<br />
|
42 |
-
Best Tips and Tricks for Neighbours From Hell Games<br />
|
43 |
-
Download Neighbours From Hell Complete Collection<br />
|
44 |
-
Play All Seasons of Neighbours From Hell on PC<br />
|
45 |
-
Neighbours From Hell Games for Windows 10<br />
|
46 |
-
Neighbours From Hell Games for Mac OS<br />
|
47 |
-
Neighbours From Hell Games for Linux<br />
|
48 |
-
Download Neighbours From Hell APK File<br />
|
49 |
-
Download Neighbours Back From Hell MOD APK<br />
|
50 |
-
Download Neighbours From Hell Season 1 OBB Data<br />
|
51 |
-
Download Neighbours From Hell Full Version Crack<br />
|
52 |
-
Download Neighbours Back From Hell Patch Update<br />
|
53 |
-
Download Neighbours From Hell Soundtrack MP3<br />
|
54 |
-
Download Neighbours Back From Hell Wallpaper HD<br />
|
55 |
-
Download Neighbours From Hell Comics PDF<br />
|
56 |
-
Download Neighbours Back From Hell Cheats and Hacks<br />
|
57 |
-
Download Neighbours From Hell Walkthrough Guide</p></li>
|
58 |
-
</ul>
|
59 |
-
<h2>Features of Neighbours From Hell 6</h2>
|
60 |
-
<p>Neighbours From Hell 6 is a game that offers a lot of features that make it enjoyable and entertaining. Here are some of them:</p>
|
61 |
-
<ul>
|
62 |
-
<li><p>14 hilarious episodes with different settings. The game takes you to various locations where you can prank your neighbour, such as his house, his office, his hotel room, his cruise ship cabin, his ski resort chalet, etc. Each episode has its own theme and atmosphere, as well as unique pranks and items that you can use.</p></li>
|
63 |
-
<li><p>A variety of pranks and items to use. The game gives you a lot of options to choose from when it comes to pranking your neighbour. You can use simple items like glue or pepper, or more elaborate ones like fireworks or dynamite. You can also combine some items to create more effects or damage. For example, you can put glue on his chair and then cut his pants with scissors.</p></li>
|
64 |
-
<li><p>A catchy soundtrack and funny sound effects. The game has a catchy soundtrack that matches the mood of each episode. It also has funny sound effects that add more humor to the game. You can hear your neighbour's screams, groans, curses, etc., as well as his reactions to your pranks.</p></li>
|
65 |
-
</ul>
|
66 |
-
<h2>Pros and cons of Neighbours From Hell 6</h2>
|
67 |
-
<p>Neighbours From Hell 6 is a game that has its pros and cons. Here are some of them:</p>
|
68 |
-
<table>
|
69 |
-
<tr>
|
70 |
-
<th>Pros</th>
|
71 |
-
<th>Cons</th>
|
72 |
-
</tr>
|
73 |
-
<tr>
|
74 |
-
<td><p>A fun and addictive gameplay. The game is easy to play but hard to master. It requires strategy, timing, creativity, and patience. It also has a replay value because you can try different pranks or aim for higher scores.</p></td>
|
75 |
-
<td><p>A repetitive and predictable pattern. The game follows a similar pattern in each episode. You have to spy on your neighbour's routine, find items in hidden places, set up pranks in specific spots, etc. The neighbour's behaviour is also predictable after a while.</p></td>
|
76 |
-
</tr>
|
77 |
-
<tr>
|
78 |
-
<td><p>A humorous and original concept. The game has a unique concept that makes it stand out from other games. It is based on a TV show with the same name that aired in Germany in 2003-2005. It is also inspired by real-life situations that many people can relate to.</p></td>
|
79 |
-
<td><p>A lack of difficulty and challenge. The game is not very challenging because it does not have any penalties or consequences for failing an episode or getting caught by your neighbour. You can simply restart or retry until you succeed. The game also does not have any difficulty levels or modes that could make it more challenging.</p></td>
|
80 |
-
</tr>
|
81 |
-
<tr>
|
82 |
-
<td><p>A low system requirement and easy installation. The game does not require a high-end computer or device to run smoothly. It has low graphics quality and size that make it compatible with most systems. It also has an easy installation process that does not require any additional programs or files.</p></td>
|
83 |
-
<td><p>A dated graphics and animation. The game has poor graphics quality and animation that make it look outdated compared to other games released in the same year or later. The characters are pixelated and stiff; the backgrounds are bland and blurry; the movements are slow and unnatural.</p></td>
|
84 |
-
</tr>
|
85 |
-
</table>
|
86 |
-
<h2>Conclusion</h2>
|
87 |
-
<p>Neighbours From Hell 6 is a comedy strategy game that lets you prank your neighbour in various ways. You can download it for free from some unofficial sources and enjoy playing it on your computer. The game has a fun and addictive gameplay, a humorous and original concept, and a catchy soundtrack and sound effects. However, it also has some drawbacks, such as a repetitive and predictable pattern, a lack of difficulty and challenge, and a dated graphics and animation. Overall, Neighbours From Hell 6 is a game that can make you laugh and have a good time, but it might not appeal to everyone.</p>
|
88 |
-
<p>If you are interested in playing Neighbours From Hell 6, you can follow the steps we provided in this article and start pranking your neighbour today. You can also check out some tips and tricks we shared to help you improve your performance and score. And if you want to learn more about the features, pros and cons of Neighbours From Hell 6, you can read our review and see if it suits your taste.</p>
|
89 |
-
<p>We hope you enjoyed reading this article and found it useful. If you did, please share it with your friends and family who might also like to play Neighbours From Hell 6. And if you have any questions or feedback, please leave them in the comments section below. We would love to hear from you.</p>
|
90 |
-
<p>Thank you for reading and happy pranking!</p>
|
91 |
-
<h3>FAQs</h3>
|
92 |
-
<ul>
|
93 |
-
<li><p><strong>Q: What is the difference between Neighbours From Hell 6 and other Neighbours From Hell games?</strong></p>
|
94 |
-
<p>A: Neighbours From Hell 6 is the sixth installment of the series, which was released in 2009. It has 14 episodes with different settings, such as a cruise ship, a ski resort, a casino, etc. It also has some new pranks and items that were not available in previous games.</p></li>
|
95 |
-
<li><p><strong>Q: Is Neighbours From Hell 6 safe to download?</strong></p>
|
96 |
-
<p>A: Neighbours From Hell 6 is not available on any official platforms, such as Steam or GOG, but you can still download it for free from some unofficial sources. However, you need to be careful not to click on any suspicious links or ads that might contain malware or viruses. You should also scan the game file with an antivirus program before installing it.</p></li>
|
97 |
-
<li><p><strong>Q: How long does it take to finish Neighbours From Hell 6?</strong></p>
|
98 |
-
<p>A: It depends on your skill level and how much time you spend on each episode. Each episode has a time limit that ranges from 5 to 15 minutes. If you complete all the episodes with 100% score, it might take you around 3 to 4 hours to finish the game.</p></li>
|
99 |
-
<li><p><strong>Q: Can I play Neighbours From Hell 6 online or with friends?</strong></p>
|
100 |
-
<p>A: No, Neighbours From Hell 6 is a single-player game that does not have any online or multiplayer features. You can only play it offline on your computer.</p></li>
|
101 |
-
<li><p><strong>Q: Where can I find more information about Neighbours From Hell 6?</strong></p>
|
102 |
-
<p>A: You can find more information about Neighbours From Hell 6 on some websites that offer free downloads of the game, such as <a href="https://www.youtube.com/watch?v=G4HwB9vXReM">this one</a>. You can also watch some videos on YouTube that show the gameplay and walkthrough of the game.</p></li>
|
103 |
-
</ul>
|
104 |
-
</p> 0a6ba089eb<br />
|
105 |
-
<br />
|
106 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/AIRAC Cycle 1210 (complete) [FSX FS9 X-Plane] Demol.md
DELETED
@@ -1,60 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>AIRAC Cycle 1210 (complete) [FSX, FS9, X-Plane] Demol: How to Update Your Flight Simulator with the Latest Navigation Data</h1>
|
3 |
-
|
4 |
-
<p>If you are a flight simulator enthusiast, you probably know how important it is to have the most accurate and up-to-date navigation data for your flights. Navigation data includes information such as waypoints, airways, navaids, procedures, and more. Without it, you might end up flying to the wrong destination, missing an approach, or violating airspace restrictions.</p>
|
5 |
-
<h2>AIRAC Cycle 1210 (complete) [FSX, FS9, X-Plane] Demol</h2><br /><p><b><b>DOWNLOAD</b> » <a href="https://imgfil.com/2uxXAQ">https://imgfil.com/2uxXAQ</a></b></p><br /><br />
|
6 |
-
|
7 |
-
<p>That's why you need to update your flight simulator with the latest AIRAC cycle. AIRAC stands for Aeronautical Information Regulation And Control, and it is a system that ensures that all aeronautical information is published and updated at regular intervals. Every 28 days, a new AIRAC cycle is released with the latest changes and corrections to the navigation data.</p>
|
8 |
-
|
9 |
-
<p>But how do you update your flight simulator with the new AIRAC cycle? That's where AIRAC Cycle 1210 (complete) [FSX, FS9, X-Plane] Demol comes in. This is a package that contains all the navigation data files for three popular flight simulators: Microsoft Flight Simulator X (FSX), Microsoft Flight Simulator 2004 (FS9), and X-Plane. It also includes a tool called Demol that allows you to easily install the files into your simulator.</p>
|
10 |
-
|
11 |
-
<p>In this article, we will show you how to use AIRAC Cycle 1210 (complete) [FSX, FS9, X-Plane] Demol to update your flight simulator with the latest navigation data. Follow these simple steps and enjoy more realistic and accurate flights.</p>
|
12 |
-
|
13 |
-
<h2>Step 1: Download AIRAC Cycle 1210 (complete) [FSX, FS9, X-Plane] Demol</h2>
|
14 |
-
|
15 |
-
<p>The first step is to download the package from the link below. The package is a ZIP file that contains all the navigation data files for FSX, FS9, and X-Plane, as well as the Demol tool. The file size is about 1.2 GB, so make sure you have enough space on your hard drive and a stable internet connection.</p>
|
16 |
-
<p></p>
|
17 |
-
|
18 |
-
<p><a href="https://example.com/download/airac-cycle-1210-complete-fsx-fs9-x-plane-demol.zip">Download AIRAC Cycle 1210 (complete) [FSX, FS9, X-Plane] Demol</a></p>
|
19 |
-
|
20 |
-
<h2>Step 2: Extract the ZIP file</h2>
|
21 |
-
|
22 |
-
<p>The next step is to extract the ZIP file to a folder on your computer. You can use any software that can handle ZIP files, such as WinZip, WinRAR, or 7-Zip. To extract the file, right-click on it and select "Extract All" or "Extract Here". Choose a destination folder where you want to save the extracted files.</p>
|
23 |
-
|
24 |
-
<h2>Step 3: Run Demol</h2>
|
25 |
-
|
26 |
-
<p>The third step is to run Demol.exe from the extracted folder. This is a tool that will help you install the navigation data files into your flight simulator. When you run Demol.exe, you will see a window like this:</p>
|
27 |
-
|
28 |
-
<img src="https://example.com/images/demol-window.png" alt="Demol window">
|
29 |
-
|
30 |
-
<p>As you can see, Demol has four tabs: FSX/P3D, FS9/FS2004, X-Plane 10/11/12/13/14/15/16/17/18/19/20/21/22/23/24/25/26/27/28/29/30
|
31 |
-
|
32 |
-
<h2>Step 4: Select your flight simulator and install the files</h2>
|
33 |
-
|
34 |
-
<p>The fourth step is to select your flight simulator and install the navigation data files. To do this, click on the tab that corresponds to your simulator: FSX/P3D, FS9/FS2004, or X-Plane. You will see a window like this:</p>
|
35 |
-
|
36 |
-
<img src="https://example.com/images/demol-fsx.png" alt="Demol FSX tab">
|
37 |
-
|
38 |
-
<p>In this example, we will use FSX as our simulator. You can see that Demol has detected the location of our FSX installation folder. If Demol does not find your simulator folder automatically, you can browse for it manually by clicking on the "..." button.</p>
|
39 |
-
|
40 |
-
<p>Next, you need to select the navigation data files that you want to install. You can choose between two options: "Install all files" or "Install selected files". If you choose "Install all files", Demol will install all the navigation data files for your simulator. This includes the files for the default aircraft and scenery, as well as for any add-ons that you have installed. This option is recommended if you want to have the most complete and updated navigation data for your simulator.</p>
|
41 |
-
|
42 |
-
<p>If you choose "Install selected files", Demol will let you choose which files you want to install. This option is useful if you only want to update certain parts of your simulator, such as specific add-ons or regions. To select the files, click on the "Select Files" button. You will see a window like this:</p>
|
43 |
-
|
44 |
-
<img src="https://example.com/images/demol-select-files.png" alt="Demol select files window">
|
45 |
-
|
46 |
-
<p>Here, you can browse through the folders and subfolders that contain the navigation data files. You can check or uncheck the boxes next to each file to select or deselect it. You can also use the buttons at the bottom to select or deselect all files in a folder or subfolder.</p>
|
47 |
-
|
48 |
-
<p>Once you have selected the files that you want to install, click on the "OK" button to return to the main window. You will see a summary of the files that you have selected at the bottom of the window.</p>
|
49 |
-
|
50 |
-
<p>Finally, click on the "Install" button to start installing the navigation data files into your simulator. Demol will show you a progress bar and a log of the installation process. Depending on the number and size of the files that you have selected, this may take some time.</p>
|
51 |
-
|
52 |
-
<h2>Step 5: Enjoy your updated flight simulator</h2>
|
53 |
-
|
54 |
-
<p>The fifth and final step is to enjoy your updated flight simulator with the latest AIRAC cycle. Once Demol has finished installing the files, it will show you a message saying "Installation completed successfully". You can close Demol and launch your flight simulator.</p>
|
55 |
-
|
56 |
-
<p>You will notice that your flight simulator now has more accurate and up-to-date navigation data for your flights. You can check this by looking at the map, GPS, FMC, or any other navigation device in your aircraft. You can also use online tools such as <a href="https://example.com/navigraph">Navigraph</a> or <a href="https://example.com/airac">AIRAC</a> to compare and verify the navigation data.</p>
|
57 |
-
|
58 |
-
<p>Congratulations! You have successfully updated your flight simulator with AIRAC Cycle 1210 (complete) [FSX, FS9, X-Plane] Demol. Now you can enjoy more realistic and accurate flights with your favorite simulator.</p> d5da3c52bf<br />
|
59 |
-
<br />
|
60 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Always Kabhi Kabhi Of Love Movie With English Subtitles Free Download BETTER.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Always Kabhi Kabhi Of Love Movie With English Subtitles Free Download</h2><br /><p><b><b>Download Zip</b> » <a href="https://imgfil.com/2uxZJD">https://imgfil.com/2uxZJD</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
Bollywood Music, Hindi Bollywood Translation, Hindi Movie and Reviews, Bollywood Hindi Movie ... We found one dictionary with English definitions that includes the word sabse bada sukh: Click on ... Download Lagu Sabse Bada Khiladi Subtitle Indonesia MP3 Gratis. ... Musibatein Unka Rasta Kabhi Nahi Rok Sakti Jinhe. 4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Bommarillu Hindi Dubbed Movie 136 A Heartwarming Tale of Love and Family.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Bommarillu Hindi Dubbed Movie Download 136</h2><br /><p><b><b>Download Zip</b> ===> <a href="https://imgfil.com/2uy0Bd">https://imgfil.com/2uy0Bd</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
aaccfb2cb3<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Driver Tally T5040 For Windows 10 64-bit Free BETTER.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Driver Tally T5040 for Windows 10 64-bit free</h2><br /><p><b><b>DOWNLOAD</b> ——— <a href="https://imgfil.com/2uxXNG">https://imgfil.com/2uxXNG</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
If you have installed the wrong driver, this driver will not only damage your computer, but it can also cause serious damage to the monitor, graphics card, and to your... 4-8-2012 · Tally 5040 how to connect to driver netmeeting how to fix driver error windows vista My computer is a Dell OptiPlex GX270. The Tally i5040 driver software works fine on my desktop computer but not on my laptop and it's driving me crazy. As of April 6, 2012, the first driver release was available for Windows XP, Vista, and Windows 7 users. Tally Genicom Tally 5040 driver compatible for: Windows XP, Vista, Windows 7, Windows 8, Windows 8. 1, Windows 8. 10, Windows Server 2012 R2. Download the Latest Updated Tally Genicom Tally 5040 drivers from our trusted DriversGuide. If you have not already done so, please update your drivers. Tally Genicom Tally 5040 VGA Driver Download Tally Genicom Tally 5040 Driver Installation Instructions Tally Genicom Tally 5040 Driver Windows 8 Download Tally Genicom Tally 5040 Driver Windows 7 Download (Vista, XP, 2000, 98SE, 98) Tally Genicom Tally 5040 Driver for Windows 10 64 Bit Tally Genicom Tally 5040 Driver for Windows 7 32 Bit. Install the Tally 5040 driver manually using the CD/DVD which is provided with the hardware. Then go to the start screen, search for the. Tally Genicom Tally 5040 Video Driver Download. Tally Genicom Tally 5040 Driver Download. Tally Genicom Tally 5040 Driver Download. Tally Genicom Tally 5040 driver is a free Driver you can download at www. 3-11-2016 · Tally Genicom Tally 5040 driver a complete solution to all of your Tally Genicom Tally 5040 problems. If you have a Windows Vista machine you can download the driver from Tally Genicom's website. Tally Genicom Tally 5040 driver is a free Driver you can download at www. 3-11-2016 · Tally Genicom Tally 5040 driver a complete solution to all of your Tally Genicom Tally 5040 problems. If you have a Windows Vista machine you can download the driver from Tally Genicom's website. www. 3 4fefd39f24<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Enter Macbeth Movie Download In Hd Watch the Shakespearean Tragedy on Your Screen.md
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
|
2 |
-
<p>This document was downloaded from Lit2Go, a free online collection of stories and poems in Mp3 (audiobook) format published by the Florida Center for Instructional Technology. For more information, including classroom activities, readability data, and original sources, please visit -tragedy-of-macbeth/5576/act-5-scene-5/.</p>
|
3 |
-
<h2>Enter Macbeth Movie Download In Hd</h2><br /><p><b><b>Download Zip</b> 🗹 <a href="https://imgfil.com/2uxXa0">https://imgfil.com/2uxXa0</a></b></p><br /><br />
|
4 |
-
<p>Warner Bros. Pictures is hosting its own streaming application for the nominated films below. You can stream directly from the web or you can download the application to your desktop, phone or other devices prior to streaming. <strong>To access please click the graphic below and either enter your Awards PIN in the "Access Code" field or once the app is downloaded, enter your Awards PIN in the "Login Code" field.</strong></p>
|
5 |
-
<p>The Screen Actors Guild Awards support efforts to eliminate the theft of copyrighted materials, as content theft threatens the economic livelihood of all entertainment industry professionals, especially working actors who depend on residuals to make a living. Screener DVDs, digital downloads, and streaming offers are provided to members for personal viewing in connection with awards consideration only and must not be uploaded to the internet, publicly exhibited, distributed, rented, loaned, sold, reproduced or given to anyone. The unauthorized use of copyrighted materials violates state and/or federal laws and may result in civil and/or criminal liability. It may also constitute grounds for discipline, including expulsion from SAG-AFTRA.</p> aaccfb2cb3<br />
|
6 |
-
<br />
|
7 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1line/AutoGPT/run.bat
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
@echo off
|
2 |
-
python scripts/check_requirements.py requirements.txt
|
3 |
-
if errorlevel 1 (
|
4 |
-
echo Installing missing packages...
|
5 |
-
pip install -r requirements.txt
|
6 |
-
)
|
7 |
-
python -m autogpt %*
|
8 |
-
pause
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/Phir Hera Pheri Movie In Hindi Download Kickass [VERIFIED].md
DELETED
@@ -1,78 +0,0 @@
|
|
1 |
-
## Phir Hera Pheri Movie In Hindi Download Kickass
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
**LINK >>> [https://lodystiri.blogspot.com/?file=2txPBe](https://lodystiri.blogspot.com/?file=2txPBe)**
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
Here is a possible title and article with HTML formatting for the keyword "Phir Hera Pheri Movie In Hindi Download Kickass":
|
24 |
-
|
25 |
-
# How to Download Phir Hera Pheri Movie in Hindi for Free
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
Phir Hera Pheri is a 2006 Bollywood comedy film starring Akshay Kumar, Suniel Shetty, Paresh Rawal, Bipasha Basu and Rimi Sen. It is the sequel to the 2000 film Hera Pheri, which was also a hit among the audience. The film follows the hilarious adventures of three friends who get involved in a scam and have to repay a huge amount of money to a don.
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
If you are looking for a way to download Phir Hera Pheri movie in Hindi for free, you have come to the right place. In this article, we will show you some of the best websites and platforms where you can watch or download this movie legally and safely. You can also find some useful tips and tricks to enhance your viewing experience.
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
## Where to Watch or Download Phir Hera Pheri Movie in Hindi
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
There are many options available online to watch or download Phir Hera Pheri movie in Hindi. However, not all of them are reliable or legal. Some of them may contain viruses, malware, pop-ups, ads or other unwanted content that can harm your device or compromise your privacy. Therefore, it is advisable to use only trusted and verified sources that offer high-quality video and audio.
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
Here are some of the best websites and platforms where you can watch or download Phir Hera Pheri movie in Hindi for free:
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
- **JioCinema**: JioCinema is a streaming service that offers a wide range of movies, TV shows, web series and music videos in various languages. You can watch Phir Hera Pheri movie in Hindi on JioCinema for free if you are a Jio user. You just need to sign in with your Jio number and enjoy unlimited entertainment. You can also download the movie on your device for offline viewing. You can access JioCinema on your smartphone, tablet, laptop or smart TV.[^2^]
|
50 |
-
|
51 |
-
- **YouTube**: YouTube is one of the most popular video-sharing platforms in the world. You can find almost any kind of content on YouTube, including movies. You can watch Phir Hera Pheri movie in Hindi on YouTube for free on the official channel of Shemaroo Comedy. The channel has uploaded the full movie in HD quality with English subtitles. You can also download the movie using a YouTube downloader app or software.[^3^]
|
52 |
-
|
53 |
-
- **Archive.org**: Archive.org is a digital library that preserves and provides access to millions of books, movies, music, software and other media. You can watch or download Phir Hera Pheri movie in Hindi on Archive.org for free. The website has uploaded the movie in various formats and resolutions, such as 480p, 720p and 1080p. You can choose the one that suits your device and internet speed.[^4^]
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
## Tips and Tricks to Enhance Your Viewing Experience
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
Watching or downloading Phir Hera Pheri movie in Hindi for free can be fun and easy if you follow some simple tips and tricks. Here are some of them:
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
- **Use a VPN**: A VPN (Virtual Private Network) is a service that encrypts your internet traffic and hides your IP address and location. This can help you bypass geo-restrictions, censorship and firewalls that may prevent you from accessing some websites or platforms that offer Phir Hera Pheri movie in Hindi for free. A VPN can also protect your online privacy and security from hackers, trackers and malware.
|
66 |
-
|
67 |
-
- **Use an ad-blocker**: An ad-blocker is a software or extension that blocks or removes ads from websites or platforms that you visit. This can help you avoid annoying pop-ups, banners, redirects or other intrusive ads that may interrupt your viewing experience or expose you to malicious content.
|
68 |
-
|
69 |
-
- **Use a good media player**: A good media player is a software or app that plays video and audio files on your device. A good media player should support various formats and codecs, offer high-quality playback, have user-friendly features and controls, and be compatible with your dfd1c89656
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Car Stunt 3D Extreme City - MOD APK with Unlimited Money and Features.md
DELETED
@@ -1,98 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Car Stunt 3D Extreme City Mod Apk: A Review</h1>
|
3 |
-
<p>If you are a fan of car racing and stunt games, you might have heard of Car Stunt 3D Extreme City. This is a thrilling and addictive game that lets you perform amazing stunts in a fantasy city. You can drive various cars, customize them, and challenge yourself with different tracks and missions. But what if you want to enjoy the game without any limitations or interruptions? That's where Car Stunt 3D Extreme City Mod Apk comes in. In this article, we will review this modded version of the game, tell you how to download and install it, and explain its benefits.</p>
|
4 |
-
<h2>What is Car Stunt 3D Extreme City?</h2>
|
5 |
-
<p>Car Stunt 3D Extreme City is a simulation game developed by Timuz Games. It is available for Android devices on Google Play Store. The game has over 10 million downloads and a rating of 4.0 out of 5 stars. The game is designed to test your driving skills and creativity in a realistic and immersive environment. You can choose from a variety of cars, each with its own features and specifications. You can also customize your car with different colors, wheels, stickers, and more. The game offers multiple tracks, each with its own obstacles, ramps, loops, bridges, and tunnels. You can perform stunning stunts like flips, jumps, drifts, and spins. The game also has different modes, such as free mode, time mode, challenge mode, and multiplayer mode. You can compete with other players online or offline, and earn rewards for completing missions.</p>
|
6 |
-
<h2>car stunt 3d extreme city mod apk</h2><br /><p><b><b>Download</b> ★ <a href="https://jinyurl.com/2uNK1e">https://jinyurl.com/2uNK1e</a></b></p><br /><br />
|
7 |
-
<h3>Features of Car Stunt 3D Extreme City</h3>
|
8 |
-
<h4>Realistic physics and graphics</h4>
|
9 |
-
<p>One of the best features of Car Stunt 3D Extreme City is its realistic physics and graphics. The game uses advanced physics engine to simulate the movement and behavior of the cars. You can feel the impact of gravity, friction, inertia, and momentum as you drive your car. The game also has stunning graphics that create a lifelike cityscape. You can see the details of the buildings, roads, trees, sky, and water. The game also has dynamic lighting and shadows that enhance the visual effects.</p>
|
10 |
-
<h4>Multiple cars and tracks</h4>
|
11 |
-
<p>Another feature of Car Stunt 3D Extreme City is its multiple cars and tracks. The game offers more than 20 cars to choose from, including sports cars, muscle cars, monster trucks, and more. Each car has its own speed, acceleration, handling, braking, and durability. You can also customize your car with different colors, wheels, stickers, and more. The game also has more than 100 tracks to explore, each with its own obstacles, ramps, loops, bridges, and tunnels. You can find different themes for the tracks, such as city, desert, snow, forest, and more.</p>
|
12 |
-
<h4>Challenging stunts and missions</h4>
|
13 |
-
<p>A third feature of Car Stunt 3D Extreme City is its challenging stunts and missions. The game allows you to perform amazing stunts like flips, jumps, drifts, and spins. You can also use nitro boosters to increase your speed and power. The game also has different modes to test your skills and creativity. You can play in free mode to explore the tracks at your own pace. You can play in time mode to race against the clock and beat your best time. You can play in challenge mode to complete various tasks and objectives. You can also play in multiplayer mode to compete with other players online or offline. The game also has a leaderboard and achievements system to track your progress and performance.</p>
|
14 |
-
<h4>Customizable controls and settings</h4>
|
15 |
-
<p>A fourth feature of Car Stunt 3D Extreme City is its customizable controls and settings. The game allows you to choose from different control options, such as tilt, steering wheel, buttons, or joystick. You can also adjust the sensitivity and calibration of the controls to suit your preference. The game also has different settings to optimize your gaming experience. You can change the graphics quality, sound effects, music, and language of the game. You can also enable or disable the vibration, camera shake, and nitro effects.</p>
|
16 |
-
<h3>How to download and install Car Stunt 3D Extreme City Mod Apk?</h3>
|
17 |
-
<h4>Requirements and compatibility</h4>
|
18 |
-
<p>To download and install Car Stunt 3D Extreme City Mod Apk, you need to have an Android device that meets the following requirements: - Android version: 4.4 or higher - RAM: 2 GB or more - Storage: 100 MB or more - Internet connection: required for multiplayer mode The game is compatible with most Android devices, including smartphones and tablets. However, some devices may not support the game or run it smoothly due to hardware limitations.</p>
|
19 |
-
<h4>Steps to download and install</h4>
|
20 |
-
<p>To download and install Car Stunt 3D Extreme City Mod Apk, you need to follow these steps: - Step 1: Go to a trusted website that provides the modded version of the game. You can search for "Car Stunt 3D Extreme City Mod Apk" on Google or any other search engine. - Step 2: Download the modded apk file from the website. Make sure you download the latest version of the mod that matches your device's specifications. - Step 3: Before installing the modded apk file, you need to enable the "Unknown Sources" option on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on. - Step 4: Locate the downloaded modded apk file on your device's file manager and tap on it to start the installation process. Follow the instructions on the screen to complete the installation. - Step 5: Once the installation is done, you can launch the game from your app drawer or home screen. Enjoy playing Car Stunt 3D Extreme City Mod Apk with unlimited money and gems, all cars and tracks unlocked, no ads and pop-ups, and enhanced performance and stability.</p>
|
21 |
-
<p>extreme car stunts 3d mod apk download<br />
|
22 |
-
car stunt 3d extreme city hack apk<br />
|
23 |
-
car stunt 3d extreme city mod apk unlimited money<br />
|
24 |
-
extreme car stunts 3d mod apk android 1<br />
|
25 |
-
car stunt 3d extreme city game mod apk<br />
|
26 |
-
extreme car stunts 3d mod apk latest version<br />
|
27 |
-
car stunt 3d extreme city mod apk revdl<br />
|
28 |
-
extreme car stunts 3d mod apk free download<br />
|
29 |
-
car stunt 3d extreme city mod apk offline<br />
|
30 |
-
extreme car stunts 3d mod apk unlocked all cars<br />
|
31 |
-
car stunt 3d extreme city mod apk rexdl<br />
|
32 |
-
extreme car stunts 3d mod apk happymod<br />
|
33 |
-
car stunt 3d extreme city mod apk unlimited coins<br />
|
34 |
-
extreme car stunts 3d mod apk no ads<br />
|
35 |
-
car stunt 3d extreme city mod apk online<br />
|
36 |
-
extreme car stunts 3d mod apk pure<br />
|
37 |
-
car stunt 3d extreme city mod apk obb<br />
|
38 |
-
extreme car stunts 3d mod apk old version<br />
|
39 |
-
car stunt 3d extreme city mod apk for pc<br />
|
40 |
-
extreme car stunts 3d mod apk vip<br />
|
41 |
-
car stunt 3d extreme city mod apk new version<br />
|
42 |
-
extreme car stunts 3d mod apk full version<br />
|
43 |
-
car stunt 3d extreme city mod apk android oyun club<br />
|
44 |
-
extreme car stunts 3d mod apk unlimited everything<br />
|
45 |
-
car stunt 3d extreme city mod apk uptodown<br />
|
46 |
-
extreme car stunts 3d mod apk all levels unlocked<br />
|
47 |
-
car stunt 3d extreme city mod apk apkpure<br />
|
48 |
-
extreme car stunts 3d mod apk unlimited gems<br />
|
49 |
-
car stunt 3d extreme city mod apk an1<br />
|
50 |
-
extreme car stunts 3d mod apk mega mod<br />
|
51 |
-
car stunt 3d extreme city mod apk apkmody<br />
|
52 |
-
extreme car stunts 3d mod apk unlimited nitro<br />
|
53 |
-
car stunt 3d extreme city mod apk apkmirror<br />
|
54 |
-
extreme car stunts 3d mod apk god mode<br />
|
55 |
-
car stunt 3d extreme city mod apk highly compressed<br />
|
56 |
-
extreme car stunts 3d mod apk premium<br />
|
57 |
-
car stunt 3d extreme city mod apk hack download<br />
|
58 |
-
extreme car stunts 3d mod apk pro<br />
|
59 |
-
car stunt 3d extreme city mod apk latest update<br />
|
60 |
-
extreme car stunts 3d mod apk original<br />
|
61 |
-
car stunt 3d extreme city mod apk unlimited keys<br />
|
62 |
-
extreme car stunts 3d mod apk cracked<br />
|
63 |
-
car stunt 3d extreme city mod apk unlimited lives<br />
|
64 |
-
extreme car stunts 3d mod apk cheat codes<br />
|
65 |
-
car stunt 3d extreme city mod apk unlimited gold<br />
|
66 |
-
extreme car stunts 3d mod apk easy download<br />
|
67 |
-
car stunt 3d extreme city mod apk ios<br />
|
68 |
-
extreme car stunts 3d mod apk no root<br />
|
69 |
-
car stunt 3d extreme city mod apk play store</p>
|
70 |
-
<h4>Permissions and safety</h4>
|
71 |
-
<p>To run Car Stunt 3D Extreme City Mod Apk, you need to grant some permissions to the game. These permissions include: - Access to device storage: to read and write game data - Access to device location: to provide location-based services - Access to device camera: to take screenshots and record videos - Access to device microphone: to enable voice chat in multiplayer mode These permissions are necessary for the game to function properly and provide you with a better gaming experience. However, you should be careful about downloading and installing modded apk files from unknown sources. Some modded apk files may contain viruses, malware, or spyware that can harm your device or steal your personal information. Therefore, you should always download modded apk files from trusted websites that have positive reviews and ratings from other users. You should also scan the modded apk file with an antivirus software before installing it on your device.</p>
|
72 |
-
<h3>What are the benefits of Car Stunt 3D Extreme City Mod Apk?</h3>
|
73 |
-
<h4>Unlimited money and gems</h4>
|
74 |
-
<p>One of the benefits of Car Stunt 3D Extreme City Mod Apk is that it gives you unlimited money and gems in the game. Money and gems are the main currencies in the game that you can use to buy new cars, upgrade them, customize them, and unlock new tracks. However, earning money and gems in the game can be time-consuming and tedious. You have to complete missions, win races, watch ads, or spend real money to get them. With Car Stunt 3D Extreme City Mod Apk, you don't have to worry about that anymore. You can get unlimited money and gems for free without any effort. You can use them to buy anything you want in the game without any restrictions.</p>
|
75 |
-
<h4>All cars and tracks unlocked</h4>
|
76 |
-
<p>Another benefit of Car Stunt 3D Extreme City Mod Apk is that it unlocks all cars and tracks in the game for you. Cars and tracks are the main elements of the game that determine your gameplay experience. However, not all cars and tracks are available for you at the beginning of the game. You have to unlock them by earning money and gems, completing missions, or reaching certain levels. Some cars and tracks are also exclusive to premium users who pay real money to access them. With Car Stunt 3D Extreme City Mod Apk, you don't have to do any of that. You can access all cars and tracks in the game from the start. You can enjoy driving any car you like and exploring any track you want without any limitations.</p>
|
77 |
-
<h4>No ads and pop-ups</h4>
|
78 |
-
<p>A third benefit of Car Stunt 3D Extreme City Mod Apk is that it removes all ads and pop-ups from the game. Ads and pop-ups are annoying and distracting features that interrupt your gameplay and ruin your immersion. They also consume your data and battery life. The game has a lot of ads and pop-ups that appear before, during, and after your gameplay. You have to watch them to earn money and gems, unlock cars and tracks, or access certain features. You can also skip them by paying real money or disabling your internet connection. With Car Stunt 3D Extreme City Mod Apk, you don't have to deal with any of that. You can play the game without any ads and pop-ups bothering you. You can enjoy a smooth and uninterrupted gameplay experience.</p>
|
79 |
-
<h4>Enhanced performance and stability</h4>
|
80 |
-
<p>A fourth benefit of Car Stunt 3D Extreme City Mod Apk is that it enhances the performance and stability of the game. The game is a high-quality simulation game that requires a lot of resources and processing power to run smoothly. However, some devices may not be able to handle the game well due to hardware limitations or compatibility issues. The game may lag, crash, freeze, or glitch on some devices. The game may also have some bugs or errors that affect the gameplay quality. With Car Stunt 3D Extreme City Mod Apk, you don't have to worry about any of that. The modded version of the game optimizes the game for your device and fixes any bugs or errors that may occur. You can play the game with high speed, accuracy, and reliability.</p>
|
81 |
-
<h2>Conclusion</h2>
|
82 |
-
<p>Car Stunt 3D Extreme City is a fun and exciting game that lets you perform amazing stunts in a realistic city environment. You can drive various cars, customize them, and challenge yourself with different tracks and modes. However, if you want to enjoy the game without any limitations or interruptions, you should try Car Stunt 3D Extreme City Mod Apk. This is a modded version of the game that gives you unlimited money and gems, all cars and tracks unlocked, no ads and pop-ups, and enhanced performance and stability. You can download and install Car Stunt 3D Extreme City Mod Apk from a trusted website and follow the steps we provided in this article. You can then enjoy playing Car Stunt 3D Extreme City Mod Apk with all its benefits.</p>
|
83 |
-
<h2>FAQs</h2>
|
84 |
-
<p>Here are some frequently asked questions about Car Stunt 3D Extreme City Mod Apk:</p>
|
85 |
-
<ul>
|
86 |
-
<li><b>Q: Is Car Stunt 3D Extreme City Mod Apk free?</b></li>
|
87 |
-
<li>A: Yes, Car Stunt 3D Extreme City Mod Apk is free to download and install. You don't have to pay anything to use it.</li>
|
88 |
-
<li><b>Q: Is Car Stunt 3D Extreme City Mod Apk safe?</b></li>
|
89 |
-
<li>A: Yes, Car Stunt 3D Extreme City Mod Apk is safe to use if you download it from a trusted website. However, you should always scan the modded apk file with an antivirus software before installing it on your device.</li>
|
90 |
-
<li><b>Q: Is Car Stunt 3D Extreme City Mod Apk legal?</b></li>
|
91 |
-
<li>A: No, Car Stunt 3D Extreme City Mod Apk is not legal to use. It violates the terms and conditions of the original game developer and Google Play Store. It also infringes the intellectual property rights of the original game developer. Therefore, you should use Car Stunt 3D Extreme City Mod Apk at your own risk.</li>
|
92 |
-
<li><b>Q: Will Car Stunt 3D Extreme City Mod Apk work on my device?</b></li>
|
93 |
-
<li>A: Car Stunt 3D Extreme City Mod Apk will work on most Android devices that meet the requirements we mentioned in this article. However, some devices may not support the game or run it smoothly due to hardware limitations or compatibility issues.</li>
|
94 |
-
<li><b>Q: How can I update Car Stunt 3D Extreme City Mod Apk?</b></li>
|
95 |
-
<li>A: To update Car Stunt 3D Extreme City Mod Apk, you need to download the latest version of the mod from the same website you downloaded it from. You can also check the website for any updates or notifications about the mod. You should always update the mod to get the latest features and bug fixes. However, you should also backup your game data before updating the mod, as some updates may erase your progress or cause compatibility issues.</li>
|
96 |
-
</ul></p> 401be4b1e0<br />
|
97 |
-
<br />
|
98 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Candy Crush Saga and Discover Thousands of Levels and Challenges on Your iPhone or iPad.md
DELETED
@@ -1,180 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download and Play Candy Crush Saga on iOS Devices</h1>
|
3 |
-
<p>If you are looking for a fun and addictive puzzle game to play on your iPhone or iPad, you might want to check out Candy Crush Saga. This game has been around for almost a decade, and it is still one of the most popular mobile games in the world. In this article, we will tell you what Candy Crush Saga is, how to download and play it on your iOS device, what features it offers, and some tips and tricks to help you crush more candies. We will also suggest some alternatives to Candy Crush Saga in case you want to try something different.</p>
|
4 |
-
<h2>What is Candy Crush Saga and why is it popular?</h2>
|
5 |
-
<p>Candy Crush Saga is a match-three puzzle game developed by King, a leading company in casual gaming. The game was released in 2012 for Facebook, and later for iOS, Android, Windows Phone, and Windows 10. The game has over a trillion levels, each with a different objective and layout. The basic gameplay involves swapping adjacent candies to make matches of three or more of the same color, which will clear them from the board and make way for new ones. Matching more than three candies will create special candies that have various effects, such as clearing a whole row or column, or exploding all candies of a certain color. The game also has different game modes, such as target score, clear the jelly, collect the ingredients, and order mode.</p>
|
6 |
-
<h2>candy crush saga ios download</h2><br /><p><b><b>DOWNLOAD</b> ::: <a href="https://jinyurl.com/2uNUtG">https://jinyurl.com/2uNUtG</a></b></p><br /><br />
|
7 |
-
<p>Candy Crush Saga is popular because it is simple to play but challenging to master. It appeals to a wide range of players, from casual gamers who want to kill some time, to hardcore gamers who want to compete with their friends and other players around the world. The game also has a colorful and cute design, with catchy music and sound effects. The game is constantly updated with new levels and features, keeping the players engaged and entertained.</p>
|
8 |
-
<h2>How to download and play Candy Crush Saga on iOS devices?</h2>
|
9 |
-
<p>Downloading and playing Candy Crush Saga on your iPhone or iPad is easy and free. Here are the steps you need to follow:</p>
|
10 |
-
<ol>
|
11 |
-
<li>Go to the App Store on your device and search for Candy Crush Saga.</li>
|
12 |
-
<li>Tap on the Get button to download the game. You may need to enter your Apple ID password or use Touch ID or Face ID to confirm.</li>
|
13 |
-
<li>Once the download is complete, tap on the Open button to launch the game.</li>
|
14 |
-
<li>You will see a tutorial that will teach you the basics of the game. You can also tap on the Level Type button at the bottom-left corner of the screen to learn more about the different objectives and game modes.</li>
|
15 |
-
<li>To start playing, tap on a level on the map screen. You can also swipe left or right to see more levels.</li>
|
16 |
-
<li>To swap candies, simply drag one candy over another adjacent one. You can also tap on a candy and then tap on another one next to it.</li>
|
17 |
-
<li>To use a special candy, match it with other candies of the same color or type.</li>
|
18 |
-
<li>To use a booster, tap on it before starting a level or during a level. Boosters can help you clear difficult levels by giving you extra moves, lives, or power-ups.</li>
|
19 |
-
<li>To pause or exit a level, tap on the Menu button at the bottom-right corner of the screen.</li>
|
20 |
-
</ol>
|
21 |
-
<p>You can also connect your game to your Facebook account to sync your progress, get more lives, and play with your friends. To do this, tap on the Connect button on the main screen and follow the instructions. You can also invite your friends to play Candy Crush Saga and send or receive lives and boosters from them.</p>
|
22 |
-
<h2>Features of Candy Crush Saga</h2>
|
23 |
-
<p>Candy Crush Saga is not just a simple match-three game. It has many features that make it more fun and exciting. Here are some of them:</p>
|
24 |
-
<h3>The game that keeps you craving more</h3>
|
25 |
-
<p>Candy Crush Saga has over a trillion levels, and new ones are added every week. You will never run out of challenges and surprises. The game also has different episodes, each with a unique theme and story. You will meet various characters along the way, such as Tiffi, Mr. Toffee, Odus the Owl, and many more. You will also encounter different types of candies, such as striped, wrapped, color bomb, jelly fish, coconut wheel, and more. Each candy has a different effect when matched or activated.</p>
|
26 |
-
<h3>Many ways to win rewards</h3>
|
27 |
-
<p>Candy Crush Saga rewards you for playing well and being loyal. You can earn stars by completing levels with high scores. You can also collect sugar drops by matching certain candies. These stars and sugar drops can be used to unlock special features and boosters. You can also spin the Daily Booster Wheel every day to get a free booster. You can also participate in events and quests to win more prizes and bonuses. Some of the events and quests are Daily Quests, Sugar Track, Build-a-Bot, Fantastic Five, Sweet Streak, and more.</p>
|
28 |
-
<h3>Variety of sugar-coated challenges</h3>
|
29 |
-
<p>Candy Crush Saga has different game modes that test your skills and strategy. Some of the game modes are:</p>
|
30 |
-
<p>candy crush saga game download for iphone<br />
|
31 |
-
how to install candy crush saga on ios<br />
|
32 |
-
candy crush saga free download ios app store<br />
|
33 |
-
candy crush saga latest version download for ios<br />
|
34 |
-
candy crush saga cheats and tips for ios<br />
|
35 |
-
candy crush saga ios download without wifi<br />
|
36 |
-
candy crush saga offline mode download for ios<br />
|
37 |
-
candy crush saga hack download for ios<br />
|
38 |
-
candy crush saga mod apk download for ios<br />
|
39 |
-
candy crush saga unlimited lives download for ios<br />
|
40 |
-
candy crush saga update download for ios<br />
|
41 |
-
candy crush saga old version download for ios<br />
|
42 |
-
candy crush saga download size for ios<br />
|
43 |
-
candy crush saga download link for ios<br />
|
44 |
-
candy crush saga download error on ios<br />
|
45 |
-
candy crush saga download from itunes<br />
|
46 |
-
candy crush saga compatible with ios 14<br />
|
47 |
-
candy crush saga support for ios 15<br />
|
48 |
-
candy crush saga requirements for ios devices<br />
|
49 |
-
candy crush saga ratings and reviews for ios<br />
|
50 |
-
candy crush saga features and benefits for ios users<br />
|
51 |
-
candy crush saga alternatives and competitors for ios<br />
|
52 |
-
candy crush saga best levels and episodes for ios<br />
|
53 |
-
candy crush saga rewards and prizes for ios players<br />
|
54 |
-
candy crush saga events and challenges for ios gamers<br />
|
55 |
-
candy crush saga friends and community for ios fans<br />
|
56 |
-
candy crush soda saga download for ios<br />
|
57 |
-
candy crush jelly saga download for ios<br />
|
58 |
-
candy crush friends saga download for ios<br />
|
59 |
-
candy crush dreamworld saga download for ios<br />
|
60 |
-
candy crush all stars download for ios<br />
|
61 |
-
candy crush blast download for ios<br />
|
62 |
-
candy crush farm heroes download for ios<br />
|
63 |
-
candy crush pet rescue download for ios<br />
|
64 |
-
candy crush bubble witch download for ios<br />
|
65 |
-
how to play candy crush saga on ipad<br />
|
66 |
-
how to play candy crush saga on apple watch<br />
|
67 |
-
how to play candy crush saga on apple tv<br />
|
68 |
-
how to play candy crush saga on macbook<br />
|
69 |
-
how to play candy crush saga on imac<br />
|
70 |
-
how to sync candy crush saga across ios devices<br />
|
71 |
-
how to backup and restore candy crush saga on ios devices<br />
|
72 |
-
how to transfer candy crush saga from android to ios devices or vice versa <br />
|
73 |
-
how to connect candy crush saga with facebook on ios devices <br />
|
74 |
-
how to contact candy crush saga customer service on ios devices <br />
|
75 |
-
how to delete or uninstall candy crush saga from ios devices <br />
|
76 |
-
how to fix or troubleshoot candy crush saga issues on ios devices</p>
|
77 |
-
<ul>
|
78 |
-
<li>Target Score: You need to reach a certain score within a limited number of moves or time.</li>
|
79 |
-
<li>Clear the Jelly: You need to clear all the jelly tiles from the board by matching candies on them.</li>
|
80 |
-
<li>Collect the Ingredients: You need to bring down all the ingredients (cherries or hazelnuts) to the bottom of the board.</li>
|
81 |
-
<li>Order Mode: You need to collect a specific number or type of candies or combinations.</li>
|
82 |
-
<li>Mixed Mode: You need to complete two or more objectives in one level.</li>
|
83 |
-
</ul>
|
84 |
-
<p>Each game mode has its own challenges and strategies. You need to adapt your moves according to the objective and the layout of the board.</p>
|
85 |
-
<h3>Play alone or with friends</h3>
|
86 |
-
<p>Candy Crush Saga is a game that you can enjoy alone or with your friends. You can play offline or online, depending on your preference. You can also connect your game to your Facebook account to see how your friends are doing, compare scores, send or receive lives and boosters, and compete in leaderboards and tournaments. You can also join a team with other players and work together to achieve common goals and rewards.</p>
|
87 |
-
<h2>Tips and tricks for Candy Crush Saga</h2>
|
88 |
-
<p>Candy Crush Saga is a game that requires skill, strategy, and luck. Sometimes, you may get stuck on a level or run out of lives or boosters. Don't worry, we have some tips and tricks that can help you overcome these challenges and have more fun playing the game. Here are some of them:</p>
|
89 |
-
<h3>Know the best and worst combos</h3>
|
90 |
-
<p>One of the keys to success in Candy Crush Saga is to make good use of the special candies and their combinations. Some of the best combos are:</p>
|
91 |
-
<ul>
|
92 |
-
<li>Striped + Wrapped: This creates a giant candy that clears three rows and three columns.</li>
|
93 |
-
<li>Striped + Color Bomb: This turns all the candies of the same color as the striped candy into striped candies and activates them.</li>
|
94 |
-
<li>Wrapped + Color Bomb: This clears all the candies of two colors from the board.</li>
|
95 |
-
<li>Color Bomb + Color Bomb: This clears all the candies from the board.</li>
|
96 |
-
</ul>
|
97 |
-
<p>Some of the worst combos are:</p>
|
98 |
-
<ul>
|
99 |
-
<li>Jelly Fish + Jelly Fish: This only creates three more jelly fish that may not be very helpful.</li>
|
100 |
-
<li>Coconut Wheel + Coconut Wheel: This only creates two striped candies that may not be in a good position.</li>
|
101 |
-
<li>Lucky Candy + Lucky Candy: This only creates two random special candies that may not match your objective.</li>
|
102 |
-
</ul>
|
103 |
-
<h3>Use special candies wisely</h3>
|
104 |
-
<p>Special candies are very powerful, but they are also limited. You should use them wisely and strategically. Here are some tips on how to use them:</p>
|
105 |
-
<ul>
|
106 |
-
<li>Don't waste them on easy matches or low-value candies. Save them for hard-to-reach areas, obstacles, or objectives.</li>
|
107 |
-
<li>Don't activate them right away. Wait for the best opportunity to make a bigger impact.</li>
|
108 |
-
<li>Don't combine them randomly. Think about the best combo for your situation and objective.</li>
|
109 |
-
<li>Don't rely on them too much. Sometimes, a simple match can be more effective than a special candy.</li>
|
110 |
-
</ul>
|
111 |
-
<h3>Plan your moves ahead</h3>
|
112 |
-
<p>Candy Crush Saga is a game that requires you to think ahead and plan your moves carefully. You should not just match candies randomly or impulsively. Here are some tips on how to plan your moves:</p>
|
113 |
-
<ul>
|
114 |
-
<li>Look at the whole board and the objective before making a move.</li>
|
115 |
-
<li>Try to create matches at the bottom of the board first, as this will cause more cascades and create more opportunities for matches and special candies.</li>
|
116 |
-
<li>Try to create matches near the edges or corners of the board, as these are harder to clear and may contain obstacles or ingredients.</li>
|
117 |
-
<li>Try to create matches that will help you clear the jelly, collect the ingredients, or fulfill the order, depending on the game mode.</li>
|
118 |
-
<li>Try to avoid moves that will disrupt your existing combos or potential matches.</li>
|
119 |
-
</ul>
|
120 |
-
<h3>Save your boosters for hard levels</h3>
|
121 |
-
<p>Boosters are very helpful in Candy Crush Saga, but they are also scarce and expensive. You should not use them on easy levels or when you are not sure if they will help you. Here are some tips on how to save your boosters:</p>
|
122 |
-
<ul>
|
123 |
-
<li>Earn boosters by playing well, spinning the Daily Booster Wheel, completing events and quests, or watching ads.</li>
|
124 |
-
<li>Use boosters only when you are stuck on a level or when you are close to completing it.</li>
|
125 |
-
<li>Use boosters that match your objective and situation. For example, use a Lollipop Hammer to clear a single candy or obstacle, use a Free Switch to swap two candies that are not adjacent, use a Striped and Wrapped to clear a large area of the board, etc.</li>
|
126 |
-
<li>Use boosters in combination with special candies or other boosters to maximize their effect.</li>
|
127 |
-
</ul>
|
128 |
-
<h3>Connect to Facebook for extra benefits</h3>
|
129 |
-
<p>Candy Crush Saga is more fun when you play with your friends. You can connect your game to your Facebook account to enjoy some extra benefits. Here are some of them:</p>
|
130 |
-
<ul>
|
131 |
-
<li>You can sync your progress across different devices and platforms.</li>
|
132 |
-
<li>You can see how your friends are doing on the map screen and compare scores with them.</li>
|
133 |
-
<li>You can send and receive lives and boosters from your friends.</li>
|
134 |
-
<li>You can compete with your friends and other players in leaderboards and tournaments.</li>
|
135 |
-
<li>You can join a team with other players and work together to achieve common goals and rewards.</li>
|
136 |
-
</ul>
|
137 |
-
<h2>Alternatives to Candy Crush Saga</h2>
|
138 |
-
<p>Candy Crush Saga is a great game, but it is not the only one of its kind. There are many other games that offer similar or different gameplay and features. If you want to try something new, here are some alternatives to Candy Crush Saga that you might like:</p>
|
139 |
-
<h3>Zookeeper Battle</h3>
|
140 |
-
<p>Zookeeper Battle is a match-three puzzle game that pits you against other players in real-time battles. You need to match animal tiles to attack your opponent and defend yourself. You can also use items and skills to gain an edge in the battle. The game has cute graphics and sound effects, and a simple but addictive gameplay. You can play with your friends or with random players from around the world.</p>
|
141 |
-
<h3>Bejeweled Blitz</h3>
|
142 |
-
<p>Bejeweled Blitz is a fast-paced match-three puzzle game that challenges you to score as high as possible in 60 seconds. You need to match gems of the same color to clear them from the board and create cascades and combos. You can also use special gems and boosters to increase your score and unleash powerful effects. The game has stunning graphics and sound effects, and a competitive gameplay. You can play with your friends or with millions of players from around the world.</p>
|
143 |
-
<h3>Two Dots</h3>
|
144 |
-
<p>Two Dots is a minimalist match-three puzzle game that requires you to connect dots of the same color to clear them from the board. You need to complete different objectives in each level, such as breaking ice, dropping anchors, or collecting fireflies. The game has a beautiful design and music, and a relaxing but challenging gameplay. You can play alone or with your friends in co-op mode.</p>
|
145 |
-
<h3>Futurama: Game of Drones</h3>
|
146 |
-
<p>Futurama: Game of Drones is a match-four puzzle game that features the characters and humor of the popular animated series Futurama. You need to match delivery drones of the same color to clear them from the board and deliver packages. You can also use special drones and power-ups to create explosions and combos. The game has a hilarious story and dialogue, and a fun and addictive gameplay. You can play with your friends or with other players from around the world.</p>
|
147 |
-
<h2>Conclusion</h2>
|
148 |
-
<p>Candy Crush Saga is a game that you can download and play on your iOS device for free. It is a match-three puzzle game that has over a trillion levels, each with a different objective and game mode. It also has many features that make it more fun and exciting, such as special candies, boosters, events, quests, and more. You can also play with your friends or with other players from around the world. Candy Crush Saga is a game that will keep you craving more.</p>
|
149 |
-
<p>If you are ready to join the sweet adventure, download Candy Crush Saga today and start matching candies. You will not regret it.</p>
|
150 |
-
<h2>FAQs</h2>
|
151 |
-
<p>Here are some frequently asked questions about Candy Crush Saga:</p>
|
152 |
-
<h3>How do I get more lives in Candy Crush Saga?</h3>
|
153 |
-
<p>You can get more lives in Candy Crush Saga by doing one of the following:</p>
|
154 |
-
<ul>
|
155 |
-
<li>Wait for 30 minutes for each life to regenerate.</li>
|
156 |
-
<li>Ask your friends to send you lives.</li>
|
157 |
-
<li>Buy lives with gold bars.</li>
|
158 |
-
<li>Use the Infinite Lives booster when it is available.</li>
|
159 |
-
</ul>
|
160 |
-
<h3>How do I sync my progress across devices?</h3>
|
161 |
-
<p>You can sync your progress across devices by connecting your game to your Facebook account. This will allow you to access your game data on any device or platform that supports Candy Crush Saga.</p>
|
162 |
-
<h3>How do I clear jelly and other obstacles?</h3>
|
163 |
-
<p>You can clear jelly and other obstacles by matching candies on them or near them. Some obstacles may require more than one match to clear, such as double jelly, licorice locks, chocolate, or cake bombs. You can also use special candies or boosters to clear them faster or easier.</p>
|
164 |
-
<h3>How do I get free gold bars in Candy Crush Saga?</h3>
|
165 |
-
<p>You can get free gold bars in Candy Crush Saga by doing one of the following:</p>
|
166 |
-
<ul>
|
167 |
-
<li>Complete achievements and milestones.</li>
|
168 |
-
<li>Participate in events and quests that offer gold bars as rewards.</li>
|
169 |
-
<li>Watch ads when they are available.</li>
|
170 |
-
<li>Use the Piggy Bank feature when it is full.</li>
|
171 |
-
</ul>
|
172 |
-
<h3>How do I contact the support team for Candy Crush Saga?</h3>
|
173 |
-
<p>You can contact the support team for Candy Crush Saga by doing one of the following:</p>
|
174 |
-
<ul>
|
175 |
-
<li>Tapping on the Settings button on the main screen and then tapping on Help Center.</li>
|
176 |
-
<li>Visiting the official website of King and clicking on Support.</li>
|
177 |
-
<li>Sending an email to [email protected].</li>
|
178 |
-
</ul></p> 197e85843d<br />
|
179 |
-
<br />
|
180 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_ddpm.py
DELETED
@@ -1,360 +0,0 @@
|
|
1 |
-
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
2 |
-
# Copyright 2022 UC Berkeley Team and The HuggingFace Team. All rights reserved.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
|
17 |
-
|
18 |
-
import math
|
19 |
-
from dataclasses import dataclass
|
20 |
-
from typing import List, Optional, Tuple, Union
|
21 |
-
|
22 |
-
import numpy as np
|
23 |
-
import paddle
|
24 |
-
import paddle.nn.functional as F
|
25 |
-
|
26 |
-
from ..configuration_utils import ConfigMixin, FrozenDict, register_to_config
|
27 |
-
from ..utils import _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS, BaseOutput, deprecate
|
28 |
-
from .scheduling_utils import SchedulerMixin
|
29 |
-
|
30 |
-
|
31 |
-
@dataclass
|
32 |
-
class DDPMSchedulerOutput(BaseOutput):
|
33 |
-
"""
|
34 |
-
Output class for the scheduler's step function output.
|
35 |
-
|
36 |
-
Args:
|
37 |
-
prev_sample (`paddle.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
|
38 |
-
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
|
39 |
-
denoising loop.
|
40 |
-
pred_original_sample (`paddle.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
|
41 |
-
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
|
42 |
-
`pred_original_sample` can be used to preview progress or for guidance.
|
43 |
-
"""
|
44 |
-
|
45 |
-
prev_sample: paddle.Tensor
|
46 |
-
pred_original_sample: Optional[paddle.Tensor] = None
|
47 |
-
|
48 |
-
|
49 |
-
def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999):
|
50 |
-
"""
|
51 |
-
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
|
52 |
-
(1-beta) over time from t = [0,1].
|
53 |
-
|
54 |
-
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
|
55 |
-
to that part of the diffusion process.
|
56 |
-
|
57 |
-
|
58 |
-
Args:
|
59 |
-
num_diffusion_timesteps (`int`): the number of betas to produce.
|
60 |
-
max_beta (`float`): the maximum beta to use; use values lower than 1 to
|
61 |
-
prevent singularities.
|
62 |
-
|
63 |
-
Returns:
|
64 |
-
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
|
65 |
-
"""
|
66 |
-
|
67 |
-
def alpha_bar(time_step):
|
68 |
-
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2
|
69 |
-
|
70 |
-
betas = []
|
71 |
-
for i in range(num_diffusion_timesteps):
|
72 |
-
t1 = i / num_diffusion_timesteps
|
73 |
-
t2 = (i + 1) / num_diffusion_timesteps
|
74 |
-
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
|
75 |
-
return paddle.to_tensor(betas, dtype="float32")
|
76 |
-
|
77 |
-
|
78 |
-
class DDPMScheduler(SchedulerMixin, ConfigMixin):
|
79 |
-
"""
|
80 |
-
Denoising diffusion probabilistic models (DDPMs) explores the connections between denoising score matching and
|
81 |
-
Langevin dynamics sampling.
|
82 |
-
|
83 |
-
[`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
|
84 |
-
function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
|
85 |
-
[`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
|
86 |
-
[`~SchedulerMixin.from_pretrained`] functions.
|
87 |
-
|
88 |
-
For more details, see the original paper: https://arxiv.org/abs/2006.11239
|
89 |
-
|
90 |
-
Args:
|
91 |
-
num_train_timesteps (`int`): number of diffusion steps used to train the model.
|
92 |
-
beta_start (`float`): the starting `beta` value of inference.
|
93 |
-
beta_end (`float`): the final `beta` value.
|
94 |
-
beta_schedule (`str`):
|
95 |
-
the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
|
96 |
-
`linear`, `scaled_linear`, or `squaredcos_cap_v2`.
|
97 |
-
trained_betas (`np.ndarray`, optional):
|
98 |
-
option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
|
99 |
-
variance_type (`str`):
|
100 |
-
options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small`,
|
101 |
-
`fixed_small_log`, `fixed_large`, `fixed_large_log`, `learned` or `learned_range`.
|
102 |
-
clip_sample (`bool`, default `True`):
|
103 |
-
option to clip predicted sample between -1 and 1 for numerical stability.
|
104 |
-
prediction_type (`str`, default `epsilon`, optional):
|
105 |
-
prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion
|
106 |
-
process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4
|
107 |
-
https://imagen.research.google/video/paper.pdf)
|
108 |
-
"""
|
109 |
-
|
110 |
-
_compatibles = _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS.copy()
|
111 |
-
_deprecated_kwargs = ["predict_epsilon"]
|
112 |
-
order = 1
|
113 |
-
|
114 |
-
@register_to_config
|
115 |
-
def __init__(
|
116 |
-
self,
|
117 |
-
num_train_timesteps: int = 1000,
|
118 |
-
beta_start: float = 0.0001,
|
119 |
-
beta_end: float = 0.02,
|
120 |
-
beta_schedule: str = "linear",
|
121 |
-
trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
|
122 |
-
variance_type: str = "fixed_small",
|
123 |
-
clip_sample: bool = True,
|
124 |
-
prediction_type: str = "epsilon",
|
125 |
-
**kwargs,
|
126 |
-
):
|
127 |
-
message = (
|
128 |
-
"Please make sure to instantiate your scheduler with `prediction_type` instead. E.g. `scheduler ="
|
129 |
-
" DDPMScheduler.from_pretrained(<model_id>, prediction_type='epsilon')`."
|
130 |
-
)
|
131 |
-
predict_epsilon = deprecate("predict_epsilon", "0.13.0", message, take_from=kwargs)
|
132 |
-
if predict_epsilon is not None:
|
133 |
-
self.register_to_config(prediction_type="epsilon" if predict_epsilon else "sample")
|
134 |
-
if trained_betas is not None:
|
135 |
-
self.betas = paddle.to_tensor(trained_betas, dtype="float32")
|
136 |
-
elif beta_schedule == "linear":
|
137 |
-
self.betas = paddle.linspace(beta_start, beta_end, num_train_timesteps, dtype="float32")
|
138 |
-
elif beta_schedule == "scaled_linear":
|
139 |
-
# this schedule is very specific to the latent diffusion model.
|
140 |
-
self.betas = paddle.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype="float32") ** 2
|
141 |
-
elif beta_schedule == "squaredcos_cap_v2":
|
142 |
-
# Glide cosine schedule
|
143 |
-
self.betas = betas_for_alpha_bar(num_train_timesteps)
|
144 |
-
elif beta_schedule == "sigmoid":
|
145 |
-
# GeoDiff sigmoid schedule
|
146 |
-
betas = paddle.linspace(-6, 6, num_train_timesteps)
|
147 |
-
self.betas = F.sigmoid(betas) * (beta_end - beta_start) + beta_start
|
148 |
-
else:
|
149 |
-
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
|
150 |
-
|
151 |
-
self.alphas = 1.0 - self.betas
|
152 |
-
self.alphas_cumprod = paddle.cumprod(self.alphas, 0)
|
153 |
-
self.one = paddle.to_tensor(1.0)
|
154 |
-
|
155 |
-
# standard deviation of the initial noise distribution
|
156 |
-
self.init_noise_sigma = 1.0
|
157 |
-
|
158 |
-
# setable values
|
159 |
-
self.num_inference_steps = None
|
160 |
-
self.timesteps = paddle.to_tensor(np.arange(0, num_train_timesteps)[::-1].copy())
|
161 |
-
|
162 |
-
self.variance_type = variance_type
|
163 |
-
|
164 |
-
def scale_model_input(self, sample: paddle.Tensor, timestep: Optional[int] = None) -> paddle.Tensor:
|
165 |
-
"""
|
166 |
-
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
|
167 |
-
current timestep.
|
168 |
-
|
169 |
-
Args:
|
170 |
-
sample (`paddle.Tensor`): input sample
|
171 |
-
timestep (`int`, optional): current timestep
|
172 |
-
|
173 |
-
Returns:
|
174 |
-
`paddle.Tensor`: scaled input sample
|
175 |
-
"""
|
176 |
-
return sample
|
177 |
-
|
178 |
-
def set_timesteps(self, num_inference_steps: int):
|
179 |
-
"""
|
180 |
-
Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
|
181 |
-
|
182 |
-
Args:
|
183 |
-
num_inference_steps (`int`):
|
184 |
-
the number of diffusion steps used when generating samples with a pre-trained model.
|
185 |
-
"""
|
186 |
-
num_inference_steps = min(self.config.num_train_timesteps, num_inference_steps)
|
187 |
-
self.num_inference_steps = num_inference_steps
|
188 |
-
timesteps = np.arange(
|
189 |
-
0, self.config.num_train_timesteps, self.config.num_train_timesteps // self.num_inference_steps
|
190 |
-
)[::-1].copy()
|
191 |
-
self.timesteps = paddle.to_tensor(timesteps)
|
192 |
-
|
193 |
-
def _get_variance(self, t, predicted_variance=None, variance_type=None):
|
194 |
-
alpha_prod_t = self.alphas_cumprod[t]
|
195 |
-
alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one
|
196 |
-
|
197 |
-
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
|
198 |
-
# and sample from it to get previous sample
|
199 |
-
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
|
200 |
-
variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * self.betas[t]
|
201 |
-
|
202 |
-
if variance_type is None:
|
203 |
-
variance_type = self.config.variance_type
|
204 |
-
|
205 |
-
# hacks - were probably added for training stability
|
206 |
-
if variance_type == "fixed_small":
|
207 |
-
variance = paddle.clip(variance, min=1e-20)
|
208 |
-
# for rl-diffuser https://arxiv.org/abs/2205.09991
|
209 |
-
elif variance_type == "fixed_small_log":
|
210 |
-
variance = paddle.log(paddle.clip(variance, min=1e-20))
|
211 |
-
variance = paddle.exp(0.5 * variance)
|
212 |
-
elif variance_type == "fixed_large":
|
213 |
-
variance = self.betas[t]
|
214 |
-
elif variance_type == "fixed_large_log":
|
215 |
-
# Glide max_log
|
216 |
-
variance = paddle.log(self.betas[t])
|
217 |
-
elif variance_type == "learned":
|
218 |
-
return predicted_variance
|
219 |
-
elif variance_type == "learned_range":
|
220 |
-
min_log = variance
|
221 |
-
max_log = self.betas[t]
|
222 |
-
frac = (predicted_variance + 1) / 2
|
223 |
-
variance = frac * max_log + (1 - frac) * min_log
|
224 |
-
|
225 |
-
return variance
|
226 |
-
|
227 |
-
def step(
|
228 |
-
self,
|
229 |
-
model_output: paddle.Tensor,
|
230 |
-
timestep: int,
|
231 |
-
sample: paddle.Tensor,
|
232 |
-
generator=None,
|
233 |
-
return_dict: bool = True,
|
234 |
-
**kwargs,
|
235 |
-
) -> Union[DDPMSchedulerOutput, Tuple]:
|
236 |
-
"""
|
237 |
-
Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
|
238 |
-
process from the learned model outputs (most often the predicted noise).
|
239 |
-
|
240 |
-
Args:
|
241 |
-
model_output (`paddle.Tensor`): direct output from learned diffusion model.
|
242 |
-
timestep (`int`): current discrete timestep in the diffusion chain.
|
243 |
-
sample (`paddle.Tensor`):
|
244 |
-
current instance of sample being created by diffusion process.
|
245 |
-
generator: random number generator.
|
246 |
-
return_dict (`bool`): option for returning tuple rather than DDPMSchedulerOutput class
|
247 |
-
|
248 |
-
Returns:
|
249 |
-
[`~schedulers.scheduling_utils.DDPMSchedulerOutput`] or `tuple`:
|
250 |
-
[`~schedulers.scheduling_utils.DDPMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
|
251 |
-
returning a tuple, the first element is the sample tensor.
|
252 |
-
|
253 |
-
"""
|
254 |
-
message = (
|
255 |
-
"Please make sure to instantiate your scheduler with `prediction_type` instead. E.g. `scheduler ="
|
256 |
-
" DDPMScheduler.from_pretrained(<model_id>, prediction_type='epsilon')`."
|
257 |
-
)
|
258 |
-
predict_epsilon = deprecate("predict_epsilon", "0.13.0", message, take_from=kwargs)
|
259 |
-
if predict_epsilon is not None:
|
260 |
-
new_config = dict(self.config)
|
261 |
-
new_config["prediction_type"] = "epsilon" if predict_epsilon else "sample"
|
262 |
-
self._internal_dict = FrozenDict(new_config)
|
263 |
-
|
264 |
-
t = timestep
|
265 |
-
|
266 |
-
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
|
267 |
-
model_output, predicted_variance = paddle.split(model_output, sample.shape[1], axis=1)
|
268 |
-
else:
|
269 |
-
predicted_variance = None
|
270 |
-
|
271 |
-
# 1. compute alphas, betas
|
272 |
-
alpha_prod_t = self.alphas_cumprod[t]
|
273 |
-
alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one
|
274 |
-
beta_prod_t = 1 - alpha_prod_t
|
275 |
-
beta_prod_t_prev = 1 - alpha_prod_t_prev
|
276 |
-
|
277 |
-
# 2. compute predicted original sample from predicted noise also called
|
278 |
-
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
|
279 |
-
if self.config.prediction_type == "epsilon":
|
280 |
-
pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
|
281 |
-
elif self.config.prediction_type == "sample":
|
282 |
-
pred_original_sample = model_output
|
283 |
-
elif self.config.prediction_type == "v_prediction":
|
284 |
-
pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
|
285 |
-
else:
|
286 |
-
raise ValueError(
|
287 |
-
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or"
|
288 |
-
" `v_prediction` for the DDPMScheduler."
|
289 |
-
)
|
290 |
-
|
291 |
-
# 3. Clip "predicted x_0"
|
292 |
-
if self.config.clip_sample:
|
293 |
-
pred_original_sample = paddle.clip(pred_original_sample, -1, 1)
|
294 |
-
|
295 |
-
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
|
296 |
-
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
|
297 |
-
pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * self.betas[t]) / beta_prod_t
|
298 |
-
current_sample_coeff = self.alphas[t] ** (0.5) * beta_prod_t_prev / beta_prod_t
|
299 |
-
|
300 |
-
# 5. Compute predicted previous sample µ_t
|
301 |
-
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
|
302 |
-
pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
|
303 |
-
|
304 |
-
# 6. Add noise
|
305 |
-
variance = 0
|
306 |
-
if t > 0:
|
307 |
-
variance_noise = paddle.randn(model_output.shape, generator=generator, dtype=model_output.dtype)
|
308 |
-
if self.variance_type == "fixed_small_log":
|
309 |
-
variance = self._get_variance(t, predicted_variance=predicted_variance) * variance_noise
|
310 |
-
else:
|
311 |
-
variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * variance_noise
|
312 |
-
|
313 |
-
pred_prev_sample = pred_prev_sample + variance
|
314 |
-
|
315 |
-
if not return_dict:
|
316 |
-
return (pred_prev_sample,)
|
317 |
-
|
318 |
-
return DDPMSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample)
|
319 |
-
|
320 |
-
def add_noise(
|
321 |
-
self,
|
322 |
-
original_samples: paddle.Tensor,
|
323 |
-
noise: paddle.Tensor,
|
324 |
-
timesteps: paddle.Tensor,
|
325 |
-
) -> paddle.Tensor:
|
326 |
-
# Make sure alphas_cumprod and timestep have same dtype as original_samples
|
327 |
-
self.alphas_cumprod = self.alphas_cumprod.cast(original_samples.dtype)
|
328 |
-
|
329 |
-
sqrt_alpha_prod = self.alphas_cumprod[timesteps] ** 0.5
|
330 |
-
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
|
331 |
-
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
|
332 |
-
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
|
333 |
-
|
334 |
-
sqrt_one_minus_alpha_prod = (1 - self.alphas_cumprod[timesteps]) ** 0.5
|
335 |
-
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
|
336 |
-
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
|
337 |
-
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
|
338 |
-
|
339 |
-
noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
|
340 |
-
return noisy_samples
|
341 |
-
|
342 |
-
def get_velocity(self, sample: paddle.Tensor, noise: paddle.Tensor, timesteps: paddle.Tensor) -> paddle.Tensor:
|
343 |
-
# Make sure alphas_cumprod and timestep have same dtype as original_samples
|
344 |
-
self.alphas_cumprod = self.alphas_cumprod.cast(sample.dtype)
|
345 |
-
|
346 |
-
sqrt_alpha_prod = self.alphas_cumprod[timesteps] ** 0.5
|
347 |
-
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
|
348 |
-
while len(sqrt_alpha_prod.shape) < len(sample.shape):
|
349 |
-
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
|
350 |
-
|
351 |
-
sqrt_one_minus_alpha_prod = (1 - self.alphas_cumprod[timesteps]) ** 0.5
|
352 |
-
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
|
353 |
-
while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):
|
354 |
-
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
|
355 |
-
|
356 |
-
velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
|
357 |
-
return velocity
|
358 |
-
|
359 |
-
def __len__(self):
|
360 |
-
return self.config.num_train_timesteps
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2ndelement/voicevox/test/test_synthesis_engine.py
DELETED
@@ -1,654 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
from copy import deepcopy
|
3 |
-
from random import random
|
4 |
-
from typing import Union
|
5 |
-
from unittest import TestCase
|
6 |
-
from unittest.mock import Mock
|
7 |
-
|
8 |
-
import numpy
|
9 |
-
|
10 |
-
from voicevox_engine.acoustic_feature_extractor import OjtPhoneme
|
11 |
-
from voicevox_engine.model import AccentPhrase, AudioQuery, Mora
|
12 |
-
from voicevox_engine.synthesis_engine import SynthesisEngine
|
13 |
-
|
14 |
-
# TODO: import from voicevox_engine.synthesis_engine.mora
|
15 |
-
from voicevox_engine.synthesis_engine.synthesis_engine import (
|
16 |
-
mora_phoneme_list,
|
17 |
-
pre_process,
|
18 |
-
split_mora,
|
19 |
-
to_flatten_moras,
|
20 |
-
to_phoneme_data_list,
|
21 |
-
unvoiced_mora_phoneme_list,
|
22 |
-
)
|
23 |
-
|
24 |
-
|
25 |
-
def yukarin_s_mock(length: int, phoneme_list: numpy.ndarray, speaker_id: numpy.ndarray):
|
26 |
-
result = []
|
27 |
-
# mockとしての適当な処理、特に意味はない
|
28 |
-
for i in range(length):
|
29 |
-
result.append(float(phoneme_list[i] * 0.5 + speaker_id))
|
30 |
-
return numpy.array(result)
|
31 |
-
|
32 |
-
|
33 |
-
def yukarin_sa_mock(
|
34 |
-
length: int,
|
35 |
-
vowel_phoneme_list: numpy.ndarray,
|
36 |
-
consonant_phoneme_list: numpy.ndarray,
|
37 |
-
start_accent_list: numpy.ndarray,
|
38 |
-
end_accent_list: numpy.ndarray,
|
39 |
-
start_accent_phrase_list: numpy.ndarray,
|
40 |
-
end_accent_phrase_list: numpy.ndarray,
|
41 |
-
speaker_id: numpy.ndarray,
|
42 |
-
):
|
43 |
-
result = []
|
44 |
-
# mockとしての適当な処理、特に意味はない
|
45 |
-
for i in range(length):
|
46 |
-
result.append(
|
47 |
-
float(
|
48 |
-
(
|
49 |
-
vowel_phoneme_list[0][i]
|
50 |
-
+ consonant_phoneme_list[0][i]
|
51 |
-
+ start_accent_list[0][i]
|
52 |
-
+ end_accent_list[0][i]
|
53 |
-
+ start_accent_phrase_list[0][i]
|
54 |
-
+ end_accent_phrase_list[0][i]
|
55 |
-
)
|
56 |
-
* 0.5
|
57 |
-
+ speaker_id
|
58 |
-
)
|
59 |
-
)
|
60 |
-
return numpy.array(result)[numpy.newaxis]
|
61 |
-
|
62 |
-
|
63 |
-
def decode_mock(
|
64 |
-
length: int,
|
65 |
-
phoneme_size: int,
|
66 |
-
f0: numpy.ndarray,
|
67 |
-
phoneme: numpy.ndarray,
|
68 |
-
speaker_id: Union[numpy.ndarray, int],
|
69 |
-
):
|
70 |
-
result = []
|
71 |
-
# mockとしての適当な処理、特に意味はない
|
72 |
-
for i in range(length):
|
73 |
-
# decode forwardはデータサイズがlengthの256倍になるのでとりあえず256回データをresultに入れる
|
74 |
-
for _ in range(256):
|
75 |
-
result.append(
|
76 |
-
float(
|
77 |
-
f0[i][0] * (numpy.where(phoneme[i] == 1)[0] / phoneme_size)
|
78 |
-
+ speaker_id
|
79 |
-
)
|
80 |
-
)
|
81 |
-
return numpy.array(result)
|
82 |
-
|
83 |
-
|
84 |
-
class MockCore:
|
85 |
-
yukarin_s_forward = Mock(side_effect=yukarin_s_mock)
|
86 |
-
yukarin_sa_forward = Mock(side_effect=yukarin_sa_mock)
|
87 |
-
decode_forward = Mock(side_effect=decode_mock)
|
88 |
-
|
89 |
-
def metas(self):
|
90 |
-
return ""
|
91 |
-
|
92 |
-
def supported_devices(self):
|
93 |
-
return ""
|
94 |
-
|
95 |
-
def is_model_loaded(self, speaker_id):
|
96 |
-
return True
|
97 |
-
|
98 |
-
|
99 |
-
class TestSynthesisEngine(TestCase):
|
100 |
-
def setUp(self):
|
101 |
-
super().setUp()
|
102 |
-
self.str_list_hello_hiho = (
|
103 |
-
"sil k o N n i ch i w a pau h i h o d e s U sil".split()
|
104 |
-
)
|
105 |
-
self.phoneme_data_list_hello_hiho = [
|
106 |
-
OjtPhoneme(phoneme=p, start=i, end=i + 1)
|
107 |
-
for i, p in enumerate(
|
108 |
-
"pau k o N n i ch i w a pau h i h o d e s U pau".split()
|
109 |
-
)
|
110 |
-
]
|
111 |
-
self.accent_phrases_hello_hiho = [
|
112 |
-
AccentPhrase(
|
113 |
-
moras=[
|
114 |
-
Mora(
|
115 |
-
text="コ",
|
116 |
-
consonant="k",
|
117 |
-
consonant_length=0.0,
|
118 |
-
vowel="o",
|
119 |
-
vowel_length=0.0,
|
120 |
-
pitch=0.0,
|
121 |
-
),
|
122 |
-
Mora(
|
123 |
-
text="ン",
|
124 |
-
consonant=None,
|
125 |
-
consonant_length=None,
|
126 |
-
vowel="N",
|
127 |
-
vowel_length=0.0,
|
128 |
-
pitch=0.0,
|
129 |
-
),
|
130 |
-
Mora(
|
131 |
-
text="ニ",
|
132 |
-
consonant="n",
|
133 |
-
consonant_length=0.0,
|
134 |
-
vowel="i",
|
135 |
-
vowel_length=0.0,
|
136 |
-
pitch=0.0,
|
137 |
-
),
|
138 |
-
Mora(
|
139 |
-
text="チ",
|
140 |
-
consonant="ch",
|
141 |
-
consonant_length=0.0,
|
142 |
-
vowel="i",
|
143 |
-
vowel_length=0.0,
|
144 |
-
pitch=0.0,
|
145 |
-
),
|
146 |
-
Mora(
|
147 |
-
text="ワ",
|
148 |
-
consonant="w",
|
149 |
-
consonant_length=0.0,
|
150 |
-
vowel="a",
|
151 |
-
vowel_length=0.0,
|
152 |
-
pitch=0.0,
|
153 |
-
),
|
154 |
-
],
|
155 |
-
accent=5,
|
156 |
-
pause_mora=Mora(
|
157 |
-
text="、",
|
158 |
-
consonant=None,
|
159 |
-
consonant_length=None,
|
160 |
-
vowel="pau",
|
161 |
-
vowel_length=0.0,
|
162 |
-
pitch=0.0,
|
163 |
-
),
|
164 |
-
),
|
165 |
-
AccentPhrase(
|
166 |
-
moras=[
|
167 |
-
Mora(
|
168 |
-
text="ヒ",
|
169 |
-
consonant="h",
|
170 |
-
consonant_length=0.0,
|
171 |
-
vowel="i",
|
172 |
-
vowel_length=0.0,
|
173 |
-
pitch=0.0,
|
174 |
-
),
|
175 |
-
Mora(
|
176 |
-
text="ホ",
|
177 |
-
consonant="h",
|
178 |
-
consonant_length=0.0,
|
179 |
-
vowel="o",
|
180 |
-
vowel_length=0.0,
|
181 |
-
pitch=0.0,
|
182 |
-
),
|
183 |
-
Mora(
|
184 |
-
text="デ",
|
185 |
-
consonant="d",
|
186 |
-
consonant_length=0.0,
|
187 |
-
vowel="e",
|
188 |
-
vowel_length=0.0,
|
189 |
-
pitch=0.0,
|
190 |
-
),
|
191 |
-
Mora(
|
192 |
-
text="ス",
|
193 |
-
consonant="s",
|
194 |
-
consonant_length=0.0,
|
195 |
-
vowel="U",
|
196 |
-
vowel_length=0.0,
|
197 |
-
pitch=0.0,
|
198 |
-
),
|
199 |
-
],
|
200 |
-
accent=1,
|
201 |
-
pause_mora=None,
|
202 |
-
),
|
203 |
-
]
|
204 |
-
core = MockCore()
|
205 |
-
self.yukarin_s_mock = core.yukarin_s_forward
|
206 |
-
self.yukarin_sa_mock = core.yukarin_sa_forward
|
207 |
-
self.decode_mock = core.decode_forward
|
208 |
-
self.synthesis_engine = SynthesisEngine(
|
209 |
-
core=core,
|
210 |
-
)
|
211 |
-
|
212 |
-
def test_to_flatten_moras(self):
|
213 |
-
flatten_moras = to_flatten_moras(self.accent_phrases_hello_hiho)
|
214 |
-
self.assertEqual(
|
215 |
-
flatten_moras,
|
216 |
-
self.accent_phrases_hello_hiho[0].moras
|
217 |
-
+ [self.accent_phrases_hello_hiho[0].pause_mora]
|
218 |
-
+ self.accent_phrases_hello_hiho[1].moras,
|
219 |
-
)
|
220 |
-
|
221 |
-
def test_to_phoneme_data_list(self):
|
222 |
-
phoneme_data_list = to_phoneme_data_list(self.str_list_hello_hiho)
|
223 |
-
self.assertEqual(phoneme_data_list, self.phoneme_data_list_hello_hiho)
|
224 |
-
|
225 |
-
def test_split_mora(self):
|
226 |
-
consonant_phoneme_list, vowel_phoneme_list, vowel_indexes = split_mora(
|
227 |
-
self.phoneme_data_list_hello_hiho
|
228 |
-
)
|
229 |
-
|
230 |
-
self.assertEqual(vowel_indexes, [0, 2, 3, 5, 7, 9, 10, 12, 14, 16, 18, 19])
|
231 |
-
self.assertEqual(
|
232 |
-
vowel_phoneme_list,
|
233 |
-
[
|
234 |
-
OjtPhoneme(phoneme="pau", start=0, end=1),
|
235 |
-
OjtPhoneme(phoneme="o", start=2, end=3),
|
236 |
-
OjtPhoneme(phoneme="N", start=3, end=4),
|
237 |
-
OjtPhoneme(phoneme="i", start=5, end=6),
|
238 |
-
OjtPhoneme(phoneme="i", start=7, end=8),
|
239 |
-
OjtPhoneme(phoneme="a", start=9, end=10),
|
240 |
-
OjtPhoneme(phoneme="pau", start=10, end=11),
|
241 |
-
OjtPhoneme(phoneme="i", start=12, end=13),
|
242 |
-
OjtPhoneme(phoneme="o", start=14, end=15),
|
243 |
-
OjtPhoneme(phoneme="e", start=16, end=17),
|
244 |
-
OjtPhoneme(phoneme="U", start=18, end=19),
|
245 |
-
OjtPhoneme(phoneme="pau", start=19, end=20),
|
246 |
-
],
|
247 |
-
)
|
248 |
-
self.assertEqual(
|
249 |
-
consonant_phoneme_list,
|
250 |
-
[
|
251 |
-
None,
|
252 |
-
OjtPhoneme(phoneme="k", start=1, end=2),
|
253 |
-
None,
|
254 |
-
OjtPhoneme(phoneme="n", start=4, end=5),
|
255 |
-
OjtPhoneme(phoneme="ch", start=6, end=7),
|
256 |
-
OjtPhoneme(phoneme="w", start=8, end=9),
|
257 |
-
None,
|
258 |
-
OjtPhoneme(phoneme="h", start=11, end=12),
|
259 |
-
OjtPhoneme(phoneme="h", start=13, end=14),
|
260 |
-
OjtPhoneme(phoneme="d", start=15, end=16),
|
261 |
-
OjtPhoneme(phoneme="s", start=17, end=18),
|
262 |
-
None,
|
263 |
-
],
|
264 |
-
)
|
265 |
-
|
266 |
-
def test_pre_process(self):
|
267 |
-
flatten_moras, phoneme_data_list = pre_process(
|
268 |
-
deepcopy(self.accent_phrases_hello_hiho)
|
269 |
-
)
|
270 |
-
|
271 |
-
mora_index = 0
|
272 |
-
phoneme_index = 1
|
273 |
-
|
274 |
-
self.assertEqual(phoneme_data_list[0], OjtPhoneme("pau", 0, 1))
|
275 |
-
for accent_phrase in self.accent_phrases_hello_hiho:
|
276 |
-
moras = accent_phrase.moras
|
277 |
-
for mora in moras:
|
278 |
-
self.assertEqual(flatten_moras[mora_index], mora)
|
279 |
-
mora_index += 1
|
280 |
-
if mora.consonant is not None:
|
281 |
-
self.assertEqual(
|
282 |
-
phoneme_data_list[phoneme_index],
|
283 |
-
OjtPhoneme(mora.consonant, phoneme_index, phoneme_index + 1),
|
284 |
-
)
|
285 |
-
phoneme_index += 1
|
286 |
-
self.assertEqual(
|
287 |
-
phoneme_data_list[phoneme_index],
|
288 |
-
OjtPhoneme(mora.vowel, phoneme_index, phoneme_index + 1),
|
289 |
-
)
|
290 |
-
phoneme_index += 1
|
291 |
-
if accent_phrase.pause_mora:
|
292 |
-
self.assertEqual(flatten_moras[mora_index], accent_phrase.pause_mora)
|
293 |
-
mora_index += 1
|
294 |
-
self.assertEqual(
|
295 |
-
phoneme_data_list[phoneme_index],
|
296 |
-
OjtPhoneme("pau", phoneme_index, phoneme_index + 1),
|
297 |
-
)
|
298 |
-
phoneme_index += 1
|
299 |
-
self.assertEqual(
|
300 |
-
phoneme_data_list[phoneme_index],
|
301 |
-
OjtPhoneme("pau", phoneme_index, phoneme_index + 1),
|
302 |
-
)
|
303 |
-
|
304 |
-
def test_replace_phoneme_length(self):
|
305 |
-
result = self.synthesis_engine.replace_phoneme_length(
|
306 |
-
accent_phrases=deepcopy(self.accent_phrases_hello_hiho), speaker_id=1
|
307 |
-
)
|
308 |
-
|
309 |
-
# yukarin_sに渡される値の検証
|
310 |
-
yukarin_s_args = self.yukarin_s_mock.call_args[1]
|
311 |
-
list_length = yukarin_s_args["length"]
|
312 |
-
phoneme_list = yukarin_s_args["phoneme_list"]
|
313 |
-
self.assertEqual(list_length, 20)
|
314 |
-
self.assertEqual(list_length, len(phoneme_list))
|
315 |
-
numpy.testing.assert_array_equal(
|
316 |
-
phoneme_list,
|
317 |
-
numpy.array(
|
318 |
-
[
|
319 |
-
0,
|
320 |
-
23,
|
321 |
-
30,
|
322 |
-
4,
|
323 |
-
28,
|
324 |
-
21,
|
325 |
-
10,
|
326 |
-
21,
|
327 |
-
42,
|
328 |
-
7,
|
329 |
-
0,
|
330 |
-
19,
|
331 |
-
21,
|
332 |
-
19,
|
333 |
-
30,
|
334 |
-
12,
|
335 |
-
14,
|
336 |
-
35,
|
337 |
-
6,
|
338 |
-
0,
|
339 |
-
],
|
340 |
-
dtype=numpy.int64,
|
341 |
-
),
|
342 |
-
)
|
343 |
-
self.assertEqual(yukarin_s_args["speaker_id"], 1)
|
344 |
-
|
345 |
-
# flatten_morasを使わずに愚直にaccent_phrasesにデータを反映させてみる
|
346 |
-
true_result = deepcopy(self.accent_phrases_hello_hiho)
|
347 |
-
index = 1
|
348 |
-
|
349 |
-
def result_value(i: int):
|
350 |
-
return float(phoneme_list[i] * 0.5 + 1)
|
351 |
-
|
352 |
-
for accent_phrase in true_result:
|
353 |
-
moras = accent_phrase.moras
|
354 |
-
for mora in moras:
|
355 |
-
if mora.consonant is not None:
|
356 |
-
mora.consonant_length = result_value(index)
|
357 |
-
index += 1
|
358 |
-
mora.vowel_length = result_value(index)
|
359 |
-
index += 1
|
360 |
-
if accent_phrase.pause_mora is not None:
|
361 |
-
accent_phrase.pause_mora.vowel_length = result_value(index)
|
362 |
-
index += 1
|
363 |
-
|
364 |
-
self.assertEqual(result, true_result)
|
365 |
-
|
366 |
-
def test_replace_mora_pitch(self):
|
367 |
-
# 空のリストでエラーを吐かないか
|
368 |
-
empty_accent_phrases = []
|
369 |
-
self.assertEqual(
|
370 |
-
self.synthesis_engine.replace_mora_pitch(
|
371 |
-
accent_phrases=empty_accent_phrases, speaker_id=1
|
372 |
-
),
|
373 |
-
[],
|
374 |
-
)
|
375 |
-
|
376 |
-
result = self.synthesis_engine.replace_mora_pitch(
|
377 |
-
accent_phrases=deepcopy(self.accent_phrases_hello_hiho), speaker_id=1
|
378 |
-
)
|
379 |
-
|
380 |
-
# yukarin_saに渡される値の検証
|
381 |
-
yukarin_sa_args = self.yukarin_sa_mock.call_args[1]
|
382 |
-
list_length = yukarin_sa_args["length"]
|
383 |
-
vowel_phoneme_list = yukarin_sa_args["vowel_phoneme_list"][0]
|
384 |
-
consonant_phoneme_list = yukarin_sa_args["consonant_phoneme_list"][0]
|
385 |
-
start_accent_list = yukarin_sa_args["start_accent_list"][0]
|
386 |
-
end_accent_list = yukarin_sa_args["end_accent_list"][0]
|
387 |
-
start_accent_phrase_list = yukarin_sa_args["start_accent_phrase_list"][0]
|
388 |
-
end_accent_phrase_list = yukarin_sa_args["end_accent_phrase_list"][0]
|
389 |
-
self.assertEqual(list_length, 12)
|
390 |
-
self.assertEqual(list_length, len(vowel_phoneme_list))
|
391 |
-
self.assertEqual(list_length, len(consonant_phoneme_list))
|
392 |
-
self.assertEqual(list_length, len(start_accent_list))
|
393 |
-
self.assertEqual(list_length, len(end_accent_list))
|
394 |
-
self.assertEqual(list_length, len(start_accent_phrase_list))
|
395 |
-
self.assertEqual(list_length, len(end_accent_phrase_list))
|
396 |
-
self.assertEqual(yukarin_sa_args["speaker_id"], 1)
|
397 |
-
|
398 |
-
numpy.testing.assert_array_equal(
|
399 |
-
vowel_phoneme_list,
|
400 |
-
numpy.array(
|
401 |
-
[
|
402 |
-
0,
|
403 |
-
30,
|
404 |
-
4,
|
405 |
-
21,
|
406 |
-
21,
|
407 |
-
7,
|
408 |
-
0,
|
409 |
-
21,
|
410 |
-
30,
|
411 |
-
14,
|
412 |
-
6,
|
413 |
-
0,
|
414 |
-
]
|
415 |
-
),
|
416 |
-
)
|
417 |
-
numpy.testing.assert_array_equal(
|
418 |
-
consonant_phoneme_list,
|
419 |
-
numpy.array(
|
420 |
-
[
|
421 |
-
-1,
|
422 |
-
23,
|
423 |
-
-1,
|
424 |
-
28,
|
425 |
-
10,
|
426 |
-
42,
|
427 |
-
-1,
|
428 |
-
19,
|
429 |
-
19,
|
430 |
-
12,
|
431 |
-
35,
|
432 |
-
-1,
|
433 |
-
]
|
434 |
-
),
|
435 |
-
)
|
436 |
-
numpy.testing.assert_array_equal(
|
437 |
-
start_accent_list, numpy.array([0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0])
|
438 |
-
)
|
439 |
-
numpy.testing.assert_array_equal(
|
440 |
-
end_accent_list, numpy.array([0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0])
|
441 |
-
)
|
442 |
-
numpy.testing.assert_array_equal(
|
443 |
-
start_accent_phrase_list, numpy.array([0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0])
|
444 |
-
)
|
445 |
-
numpy.testing.assert_array_equal(
|
446 |
-
end_accent_phrase_list, numpy.array([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0])
|
447 |
-
)
|
448 |
-
|
449 |
-
# flatten_morasを使わずに愚直にaccent_phrasesにデータを反映させてみる
|
450 |
-
true_result = deepcopy(self.accent_phrases_hello_hiho)
|
451 |
-
index = 1
|
452 |
-
|
453 |
-
def result_value(i: int):
|
454 |
-
# unvoiced_mora_phoneme_listのPhoneme ID版
|
455 |
-
unvoiced_mora_phoneme_id_list = [
|
456 |
-
OjtPhoneme(p, 0, 0).phoneme_id for p in unvoiced_mora_phoneme_list
|
457 |
-
]
|
458 |
-
if vowel_phoneme_list[i] in unvoiced_mora_phoneme_id_list:
|
459 |
-
return 0
|
460 |
-
return (
|
461 |
-
vowel_phoneme_list[i]
|
462 |
-
+ consonant_phoneme_list[i]
|
463 |
-
+ start_accent_list[i]
|
464 |
-
+ end_accent_list[i]
|
465 |
-
+ start_accent_phrase_list[i]
|
466 |
-
+ end_accent_phrase_list[i]
|
467 |
-
) * 0.5 + 1
|
468 |
-
|
469 |
-
for accent_phrase in true_result:
|
470 |
-
moras = accent_phrase.moras
|
471 |
-
for mora in moras:
|
472 |
-
mora.pitch = result_value(index)
|
473 |
-
index += 1
|
474 |
-
if accent_phrase.pause_mora is not None:
|
475 |
-
accent_phrase.pause_mora.pitch = result_value(index)
|
476 |
-
index += 1
|
477 |
-
|
478 |
-
self.assertEqual(result, true_result)
|
479 |
-
|
480 |
-
def synthesis_test_base(self, audio_query: AudioQuery):
|
481 |
-
accent_phrases = audio_query.accent_phrases
|
482 |
-
|
483 |
-
# decode forwardのために適当にpitchとlengthを設定し、リストで持っておく
|
484 |
-
phoneme_length_list = [0.0]
|
485 |
-
phoneme_id_list = [0]
|
486 |
-
f0_list = [0.0]
|
487 |
-
for accent_phrase in accent_phrases:
|
488 |
-
moras = accent_phrase.moras
|
489 |
-
for mora in moras:
|
490 |
-
if mora.consonant is not None:
|
491 |
-
mora.consonant_length = 0.1
|
492 |
-
phoneme_length_list.append(0.1)
|
493 |
-
phoneme_id_list.append(OjtPhoneme(mora.consonant, 0, 0).phoneme_id)
|
494 |
-
mora.vowel_length = 0.2
|
495 |
-
phoneme_length_list.append(0.2)
|
496 |
-
phoneme_id_list.append(OjtPhoneme(mora.vowel, 0, 0).phoneme_id)
|
497 |
-
if mora.vowel not in unvoiced_mora_phoneme_list:
|
498 |
-
mora.pitch = 5.0 + random()
|
499 |
-
f0_list.append(mora.pitch)
|
500 |
-
if accent_phrase.pause_mora is not None:
|
501 |
-
accent_phrase.pause_mora.vowel_length = 0.2
|
502 |
-
phoneme_length_list.append(0.2)
|
503 |
-
phoneme_id_list.append(OjtPhoneme("pau", 0, 0).phoneme_id)
|
504 |
-
f0_list.append(0.0)
|
505 |
-
phoneme_length_list.append(0.0)
|
506 |
-
phoneme_id_list.append(0)
|
507 |
-
f0_list.append(0.0)
|
508 |
-
|
509 |
-
phoneme_length_list[0] = audio_query.prePhonemeLength
|
510 |
-
phoneme_length_list[-1] = audio_query.postPhonemeLength
|
511 |
-
|
512 |
-
for i in range(len(phoneme_length_list)):
|
513 |
-
phoneme_length_list[i] /= audio_query.speedScale
|
514 |
-
|
515 |
-
result = self.synthesis_engine.synthesis(query=audio_query, speaker_id=1)
|
516 |
-
|
517 |
-
# decodeに渡される値の検証
|
518 |
-
decode_args = self.decode_mock.call_args[1]
|
519 |
-
list_length = decode_args["length"]
|
520 |
-
self.assertEqual(
|
521 |
-
list_length,
|
522 |
-
int(sum([round(p * 24000 / 256) for p in phoneme_length_list])),
|
523 |
-
)
|
524 |
-
|
525 |
-
num_phoneme = OjtPhoneme.num_phoneme
|
526 |
-
# mora_phoneme_listのPhoneme ID版
|
527 |
-
mora_phoneme_id_list = [
|
528 |
-
OjtPhoneme(p, 0, 0).phoneme_id for p in mora_phoneme_list
|
529 |
-
]
|
530 |
-
|
531 |
-
# numpy.repeatをfor文でやる
|
532 |
-
f0 = []
|
533 |
-
phoneme = []
|
534 |
-
f0_index = 0
|
535 |
-
mean_f0 = []
|
536 |
-
for i, phoneme_length in enumerate(phoneme_length_list):
|
537 |
-
f0_single = numpy.array(f0_list[f0_index], dtype=numpy.float32) * (
|
538 |
-
2**audio_query.pitchScale
|
539 |
-
)
|
540 |
-
for _ in range(int(round(phoneme_length * (24000 / 256)))):
|
541 |
-
f0.append([f0_single])
|
542 |
-
phoneme_s = []
|
543 |
-
for _ in range(num_phoneme):
|
544 |
-
phoneme_s.append(0)
|
545 |
-
# one hot
|
546 |
-
phoneme_s[phoneme_id_list[i]] = 1
|
547 |
-
phoneme.append(phoneme_s)
|
548 |
-
# consonantとvowelを判別し、vowelであればf0_indexを一つ進める
|
549 |
-
if phoneme_id_list[i] in mora_phoneme_id_list:
|
550 |
-
if f0_single > 0:
|
551 |
-
mean_f0.append(f0_single)
|
552 |
-
f0_index += 1
|
553 |
-
|
554 |
-
mean_f0 = numpy.array(mean_f0, dtype=numpy.float32).mean()
|
555 |
-
f0 = numpy.array(f0, dtype=numpy.float32)
|
556 |
-
for i in range(len(f0)):
|
557 |
-
if f0[i][0] != 0.0:
|
558 |
-
f0[i][0] = (f0[i][0] - mean_f0) * audio_query.intonationScale + mean_f0
|
559 |
-
|
560 |
-
phoneme = numpy.array(phoneme, dtype=numpy.float32)
|
561 |
-
|
562 |
-
# 乱数の影響で数値の位置がずれが生じるので、大半(4/5)があっていればよしとする
|
563 |
-
# また、上の部分のint(round(phoneme_length * (24000 / 256)))の影響で
|
564 |
-
# 本来のf0/phonemeとテスト生成したf0/phonemeの長さが変わることがあり、
|
565 |
-
# テスト生成したものが若干長くなることがあるので、本来のものの長さを基準にassertする
|
566 |
-
assert_f0_count = 0
|
567 |
-
decode_f0 = decode_args["f0"]
|
568 |
-
for i in range(len(decode_f0)):
|
569 |
-
# 乱数の影響等で数値にずれが生じるので、10の-5乗までの近似値であれば許容する
|
570 |
-
assert_f0_count += math.isclose(f0[i][0], decode_f0[i][0], rel_tol=10e-5)
|
571 |
-
self.assertTrue(assert_f0_count >= int(len(decode_f0) / 5) * 4)
|
572 |
-
assert_phoneme_count = 0
|
573 |
-
decode_phoneme = decode_args["phoneme"]
|
574 |
-
for i in range(len(decode_phoneme)):
|
575 |
-
assert_true_count = 0
|
576 |
-
for j in range(len(decode_phoneme[i])):
|
577 |
-
assert_true_count += bool(phoneme[i][j] == decode_phoneme[i][j])
|
578 |
-
assert_phoneme_count += assert_true_count == num_phoneme
|
579 |
-
self.assertTrue(assert_phoneme_count >= int(len(decode_phoneme) / 5) * 4)
|
580 |
-
self.assertEqual(decode_args["speaker_id"], 1)
|
581 |
-
|
582 |
-
# decode forwarderのmockを使う
|
583 |
-
true_result = decode_mock(list_length, num_phoneme, f0, phoneme, 1)
|
584 |
-
|
585 |
-
true_result *= audio_query.volumeScale
|
586 |
-
|
587 |
-
# TODO: resampyの部分は値の検証しようがないので、パスする
|
588 |
-
if audio_query.outputSamplingRate != 24000:
|
589 |
-
return
|
590 |
-
|
591 |
-
assert_result_count = 0
|
592 |
-
for i in range(len(true_result)):
|
593 |
-
if audio_query.outputStereo:
|
594 |
-
assert_result_count += math.isclose(
|
595 |
-
true_result[i], result[i][0], rel_tol=10e-5
|
596 |
-
) and math.isclose(true_result[i], result[i][1], rel_tol=10e-5)
|
597 |
-
else:
|
598 |
-
assert_result_count += math.isclose(
|
599 |
-
true_result[i], result[i], rel_tol=10e-5
|
600 |
-
)
|
601 |
-
self.assertTrue(assert_result_count >= int(len(true_result) / 5) * 4)
|
602 |
-
|
603 |
-
def test_synthesis(self):
|
604 |
-
audio_query = AudioQuery(
|
605 |
-
accent_phrases=deepcopy(self.accent_phrases_hello_hiho),
|
606 |
-
speedScale=1.0,
|
607 |
-
pitchScale=1.0,
|
608 |
-
intonationScale=1.0,
|
609 |
-
volumeScale=1.0,
|
610 |
-
prePhonemeLength=0.1,
|
611 |
-
postPhonemeLength=0.1,
|
612 |
-
outputSamplingRate=24000,
|
613 |
-
outputStereo=False,
|
614 |
-
# このテスト内では使わないので生成不要
|
615 |
-
kana="",
|
616 |
-
)
|
617 |
-
|
618 |
-
self.synthesis_test_base(audio_query)
|
619 |
-
|
620 |
-
# speed scaleのテスト
|
621 |
-
audio_query.speedScale = 1.2
|
622 |
-
self.synthesis_test_base(audio_query)
|
623 |
-
|
624 |
-
# pitch scaleのテスト
|
625 |
-
audio_query.pitchScale = 1.5
|
626 |
-
audio_query.speedScale = 1.0
|
627 |
-
self.synthesis_test_base(audio_query)
|
628 |
-
|
629 |
-
# intonation scaleのテスト
|
630 |
-
audio_query.pitchScale = 1.0
|
631 |
-
audio_query.intonationScale = 1.4
|
632 |
-
self.synthesis_test_base(audio_query)
|
633 |
-
|
634 |
-
# volume scaleのテスト
|
635 |
-
audio_query.intonationScale = 1.0
|
636 |
-
audio_query.volumeScale = 2.0
|
637 |
-
self.synthesis_test_base(audio_query)
|
638 |
-
|
639 |
-
# pre/post phoneme lengthのテスト
|
640 |
-
audio_query.volumeScale = 1.0
|
641 |
-
audio_query.prePhonemeLength = 0.5
|
642 |
-
audio_query.postPhonemeLength = 0.5
|
643 |
-
self.synthesis_test_base(audio_query)
|
644 |
-
|
645 |
-
# output sampling rateのテスト
|
646 |
-
audio_query.prePhonemeLength = 0.1
|
647 |
-
audio_query.postPhonemeLength = 0.1
|
648 |
-
audio_query.outputSamplingRate = 48000
|
649 |
-
self.synthesis_test_base(audio_query)
|
650 |
-
|
651 |
-
# output stereoのテスト
|
652 |
-
audio_query.outputSamplingRate = 24000
|
653 |
-
audio_query.outputStereo = True
|
654 |
-
self.synthesis_test_base(audio_query)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/utils/utils_callbacks.py
DELETED
@@ -1,117 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
import os
|
3 |
-
import time
|
4 |
-
from typing import List
|
5 |
-
|
6 |
-
import torch
|
7 |
-
|
8 |
-
from eval import verification
|
9 |
-
from utils.utils_logging import AverageMeter
|
10 |
-
|
11 |
-
|
12 |
-
class CallBackVerification(object):
|
13 |
-
def __init__(self, frequent, rank, val_targets, rec_prefix, image_size=(112, 112)):
|
14 |
-
self.frequent: int = frequent
|
15 |
-
self.rank: int = rank
|
16 |
-
self.highest_acc: float = 0.0
|
17 |
-
self.highest_acc_list: List[float] = [0.0] * len(val_targets)
|
18 |
-
self.ver_list: List[object] = []
|
19 |
-
self.ver_name_list: List[str] = []
|
20 |
-
if self.rank is 0:
|
21 |
-
self.init_dataset(val_targets=val_targets, data_dir=rec_prefix, image_size=image_size)
|
22 |
-
|
23 |
-
def ver_test(self, backbone: torch.nn.Module, global_step: int):
|
24 |
-
results = []
|
25 |
-
for i in range(len(self.ver_list)):
|
26 |
-
acc1, std1, acc2, std2, xnorm, embeddings_list = verification.test(
|
27 |
-
self.ver_list[i], backbone, 10, 10)
|
28 |
-
logging.info('[%s][%d]XNorm: %f' % (self.ver_name_list[i], global_step, xnorm))
|
29 |
-
logging.info('[%s][%d]Accuracy-Flip: %1.5f+-%1.5f' % (self.ver_name_list[i], global_step, acc2, std2))
|
30 |
-
if acc2 > self.highest_acc_list[i]:
|
31 |
-
self.highest_acc_list[i] = acc2
|
32 |
-
logging.info(
|
33 |
-
'[%s][%d]Accuracy-Highest: %1.5f' % (self.ver_name_list[i], global_step, self.highest_acc_list[i]))
|
34 |
-
results.append(acc2)
|
35 |
-
|
36 |
-
def init_dataset(self, val_targets, data_dir, image_size):
|
37 |
-
for name in val_targets:
|
38 |
-
path = os.path.join(data_dir, name + ".bin")
|
39 |
-
if os.path.exists(path):
|
40 |
-
data_set = verification.load_bin(path, image_size)
|
41 |
-
self.ver_list.append(data_set)
|
42 |
-
self.ver_name_list.append(name)
|
43 |
-
|
44 |
-
def __call__(self, num_update, backbone: torch.nn.Module):
|
45 |
-
if self.rank is 0 and num_update > 0 and num_update % self.frequent == 0:
|
46 |
-
backbone.eval()
|
47 |
-
self.ver_test(backbone, num_update)
|
48 |
-
backbone.train()
|
49 |
-
|
50 |
-
|
51 |
-
class CallBackLogging(object):
|
52 |
-
def __init__(self, frequent, rank, total_step, batch_size, world_size, writer=None):
|
53 |
-
self.frequent: int = frequent
|
54 |
-
self.rank: int = rank
|
55 |
-
self.time_start = time.time()
|
56 |
-
self.total_step: int = total_step
|
57 |
-
self.batch_size: int = batch_size
|
58 |
-
self.world_size: int = world_size
|
59 |
-
self.writer = writer
|
60 |
-
|
61 |
-
self.init = False
|
62 |
-
self.tic = 0
|
63 |
-
|
64 |
-
def __call__(self,
|
65 |
-
global_step: int,
|
66 |
-
loss: AverageMeter,
|
67 |
-
epoch: int,
|
68 |
-
fp16: bool,
|
69 |
-
learning_rate: float,
|
70 |
-
grad_scaler: torch.cuda.amp.GradScaler):
|
71 |
-
if self.rank == 0 and global_step > 0 and global_step % self.frequent == 0:
|
72 |
-
if self.init:
|
73 |
-
try:
|
74 |
-
speed: float = self.frequent * self.batch_size / (time.time() - self.tic)
|
75 |
-
speed_total = speed * self.world_size
|
76 |
-
except ZeroDivisionError:
|
77 |
-
speed_total = float('inf')
|
78 |
-
|
79 |
-
time_now = (time.time() - self.time_start) / 3600
|
80 |
-
time_total = time_now / ((global_step + 1) / self.total_step)
|
81 |
-
time_for_end = time_total - time_now
|
82 |
-
if self.writer is not None:
|
83 |
-
self.writer.add_scalar('time_for_end', time_for_end, global_step)
|
84 |
-
self.writer.add_scalar('learning_rate', learning_rate, global_step)
|
85 |
-
self.writer.add_scalar('loss', loss.avg, global_step)
|
86 |
-
if fp16:
|
87 |
-
msg = "Speed %.2f samples/sec Loss %.4f LearningRate %.4f Epoch: %d Global Step: %d " \
|
88 |
-
"Fp16 Grad Scale: %2.f Required: %1.f hours" % (
|
89 |
-
speed_total, loss.avg, learning_rate, epoch, global_step,
|
90 |
-
grad_scaler.get_scale(), time_for_end
|
91 |
-
)
|
92 |
-
else:
|
93 |
-
msg = "Speed %.2f samples/sec Loss %.4f LearningRate %.4f Epoch: %d Global Step: %d " \
|
94 |
-
"Required: %1.f hours" % (
|
95 |
-
speed_total, loss.avg, learning_rate, epoch, global_step, time_for_end
|
96 |
-
)
|
97 |
-
logging.info(msg)
|
98 |
-
loss.reset()
|
99 |
-
self.tic = time.time()
|
100 |
-
else:
|
101 |
-
self.init = True
|
102 |
-
self.tic = time.time()
|
103 |
-
|
104 |
-
|
105 |
-
class CallBackModelCheckpoint(object):
|
106 |
-
def __init__(self, rank, output="./"):
|
107 |
-
self.rank: int = rank
|
108 |
-
self.output: str = output
|
109 |
-
|
110 |
-
def __call__(self, global_step, backbone, partial_fc, ):
|
111 |
-
if global_step > 100 and self.rank == 0:
|
112 |
-
path_module = os.path.join(self.output, "backbone.pth")
|
113 |
-
torch.save(backbone.module.state_dict(), path_module)
|
114 |
-
logging.info("Pytorch Model Saved in '{}'".format(path_module))
|
115 |
-
|
116 |
-
if global_step > 100 and partial_fc is not None:
|
117 |
-
partial_fc.save_params()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/7hao/bingo/README.md
DELETED
@@ -1,195 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: bingo
|
3 |
-
emoji: 📉
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: red
|
6 |
-
sdk: docker
|
7 |
-
pinned: true
|
8 |
-
license: mit
|
9 |
-
duplicated_from: hf4all/bingo
|
10 |
-
---
|
11 |
-
|
12 |
-
<div align="center">
|
13 |
-
|
14 |
-
# Bingo
|
15 |
-
|
16 |
-
Bingo,一个让你呼吸顺畅 New Bing。
|
17 |
-
|
18 |
-
高度还原 New Bing 网页版的主要操作,国内可用,兼容绝大多数微软 Bing AI 的功能,可自行部署使用。
|
19 |
-
|
20 |
-

|
21 |
-

|
22 |
-
[](https://hub.docker.com/repository/docker/weaigc/bingo/)
|
23 |
-
[](https://hub.docker.com/repository/docker/weaigc/bingo/)
|
24 |
-
[](https://github.com/weaigc/bingo/blob/main/license)
|
25 |
-
|
26 |
-
</div>
|
27 |
-
|
28 |
-
## 演示站点
|
29 |
-
|
30 |
-
https://bing.github1s.tk
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
[](https://bing.github1s.tk)
|
35 |
-
|
36 |
-
## 功能和特点
|
37 |
-
|
38 |
-
- 完全基于 Next.js 重写,高度还原 New Bing Web 版 UI,使用体验和 Bing AI 基本一致。
|
39 |
-
- 支持 Docker 构建,方便快捷地部署和访问。
|
40 |
-
- Cookie 可全局配置,全局共享。
|
41 |
-
- 支持持续语音对话
|
42 |
-
|
43 |
-
## RoadMap
|
44 |
-
|
45 |
-
- [x] 支持 wss 转发
|
46 |
-
- [x] 支持一键部署
|
47 |
-
- [x] 优化移动端展示
|
48 |
-
- [x] 支持画图
|
49 |
-
- [x] 支持语音输入(支持语音指令,目前仅支持 PC 版 Edge 及 Chrome 浏览器)
|
50 |
-
- [x] 支持语音输出(需要手动开启)
|
51 |
-
- [x] 支持图片输入
|
52 |
-
- [x] 支持自定义域名
|
53 |
-
- [ ] 支持历史记录
|
54 |
-
- [ ] 适配深色模式
|
55 |
-
- [ ] 支持内置提示词
|
56 |
-
- [ ] 支持离线访问
|
57 |
-
- [ ] 国际化翻译
|
58 |
-
|
59 |
-
## 一键部署
|
60 |
-
你也可以一键部署自己的 New Bing AI 到 🤗 HuggingFace 。
|
61 |
-
|
62 |
-
### 部署到 Huggingface
|
63 |
-
1. 点击此图标
|
64 |
-
[](https://huggingface.co/login?next=%2Fspaces%2Fhf4all%2Fbingo%3Fduplicate%3Dtrue%26visibility%3Dpublic),配置可以不改。
|
65 |
-
|
66 |
-
2. 部署署完成后,点击“设置” 》“站点域名”,点一下,复制一下 HF 域名信息,然后分享给别人即可。
|
67 |
-
|
68 |
-
> Huggingface 不支持绑定自己的域名,不过我们可以使用曲线救国的方式来达到这个目的
|
69 |
-
> 1. 方式二,借助 Cloudflare Workers [部署Cloudflare Workers](#使用Cloudflare-Workers自定义域名)
|
70 |
-
> 2. 方式一,借助 Github Pages 及 iframe [如何绑定域名](https://github.com/weaigc/bingo/issues/4)
|
71 |
-
|
72 |
-
### 使用Cloudflare Workers自定义域名
|
73 |
-
|
74 |
-
> 核心代码 [worker.js](./cloudflare/worker.js)
|
75 |
-
|
76 |
-
- [注册 Cloudflare 账号](https://dash.cloudflare.com/sign-up)
|
77 |
-
|
78 |
-
- 添加一个新的网站,需要你有自己的域名并且将域名`Name Server`托管给 Cloudflare 才行(更多信息可自行 Google)
|
79 |
-
|
80 |
-
- 通过左侧菜单进入「Workers」,并点击「Create a Worker」。
|
81 |
-
|
82 |
-
- 创建 Worker 服务,复制 [worker.js](./cloudflare/worker.js) 全部代码,粘贴至创建的服务中,根据注释进行改动,保存并部署。
|
83 |
-
|
84 |
-
- 触发器 中自定义访问域名。
|
85 |
-
|
86 |
-
### 部署其它平台
|
87 |
-
<details>
|
88 |
-
<summary>
|
89 |
-
由于其他平台目前遭到 New Bing 封杀,会遇到很多问题,不再做推荐,有需要的可以自行查看
|
90 |
-
</summary>
|
91 |
-
|
92 |
-
#### 部署到 Netlify
|
93 |
-
[](https://app.netlify.com/start/deploy?repository=https://github.com/weaigc/bingo)
|
94 |
-
|
95 |
-
#### 部署到 Vercel
|
96 |
-
如果你是 Vercel 付费用户,可以点以下链接一键部署到 Vercel。免费版本有[接口超时限制](https://vercel.com/docs/concepts/limits/overview),不推荐使用
|
97 |
-
|
98 |
-
[](https://vercel.com/new/clone?demo-title=bingo&demo-description=bingo&demo-url=https%3A%2F%2Fbing.github1s.tk%2F&project-name=bingo&repository-name=bingo&repository-url=https%3A%2F%2Fgithub.com%2Fweaigc%2Fbingo&from=templates&skippable-integrations=1&env=BING_HEADER&envDescription=%E5%A6%82%E6%9E%9C%E4%B8%8D%E7%9F%A5%E9%81%93%E6%80%8E%E4%B9%88%E9%85%8D%E7%BD%AE%E8%AF%B7%E7%82%B9%E5%8F%B3%E4%BE%A7Learn+More&envLink=https%3A%2F%2Fgithub.com%2Fweaigc%2Fbingo%2Fblob%2Fmain%2F.env.example)
|
99 |
-
|
100 |
-
#### 部署到 Render
|
101 |
-
|
102 |
-
[](https://render.com/deploy?repo=https://github.com/weaigc/bingo)
|
103 |
-
</details>
|
104 |
-
|
105 |
-
## 环境和依赖
|
106 |
-
|
107 |
-
- Node.js >= 18
|
108 |
-
- Bing AI 的[身份信息](#如何获取-BING_HEADER))
|
109 |
-
|
110 |
-
## 安装和使用
|
111 |
-
|
112 |
-
* 使用 Node 启动
|
113 |
-
|
114 |
-
```bash
|
115 |
-
git clone https://github.com/weaigc/bingo.git
|
116 |
-
npm i # 推荐使用 pnpm i
|
117 |
-
npm run build
|
118 |
-
npm run start
|
119 |
-
```
|
120 |
-
|
121 |
-
* 使用 Docker 启动
|
122 |
-
```bash
|
123 |
-
docker pull weaigc/bingo
|
124 |
-
docker run --rm -it -p 7860:7860 weaigc/bingo
|
125 |
-
# 或者
|
126 |
-
docker run --rm -it -e BING_HEADER=xxxx -p 7860:7860 weaigc/bingo
|
127 |
-
```
|
128 |
-
|
129 |
-
## 如何获取 BING_HEADER
|
130 |
-
> 配置了 BING_HEADER 意味着你将自己的账号共享给所有���用此服务的人,如果不需要免登录画图的功能,不建议设置此变量
|
131 |
-
|
132 |
-
打开 https://www.bing.com 并登录,然后访问 https://www.bing.com/turing/captcha/challenge,通过人机校验,然后
|
133 |
-
|
134 |
-

|
135 |
-
|
136 |
-
> 复制出来的内容应该如下所示。确认格式无误后,打开 https://effulgent-bubblegum-e2f5df.netlify.app/#dialog=%22settings%22 ,粘贴进去,点击“转成 BING_HEADER 并复制”,然后从剪切板粘贴即可得到。(你也可以先在网页上进行验证)
|
137 |
-
|
138 |
-
以下是格式参考,需要注意的是,网页端保存的格式是以`curl`开头, 而服务端配置的 `BING_HEADER` 是 `base64` 格式,两者不能互通。
|
139 |
-
<details>
|
140 |
-
<summary>正常格式/网页端保存的格式(格式仅供参考)</summary>
|
141 |
-
|
142 |
-
```
|
143 |
-
curl 'https://www.bing.com/turing/captcha/challenge' \
|
144 |
-
-H 'authority: www.bing.com' \
|
145 |
-
-H 'accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7' \
|
146 |
-
-H 'accept-language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6' \
|
147 |
-
-H 'cache-control: max-age=0' \
|
148 |
-
-H 'cookie: MicrosoftApplicationsTelemetryDeviceId=3399c004-fd0e-48ec-bb92-d82a27b2bbd4; _EDGE_V=1; SRCHD=AF=NOFORM; SRCHUID=V=2&GUID=29EBDDA4E6674329ACCF1A0A423C3E98&dmnchg=1; _UR=QS=0&TQS=0; _HPVN=CS=eyJQbiI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiUCJ9LCJTYyI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiSCJ9LCJReiI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiVCJ9LCJBcCI6dHJ1ZSwiTXV0ZSI6dHJ1ZSwiTGFkIjoiMjAyMy0wNy0yNVQwMDowMDowMFoiLCJJb3RkIjowLCJHd2IiOjAsIkRmdCI6bnVsbCwiTXZzIjowLCJGbHQiOjAsIkltcCI6Mn0=; _RwBf=ilt=1&ihpd=1&ispd=0&rc=0&rb=0&gb=0&rg=200&pc=0&mtu=0&rbb=0&g=0&cid=&clo=0&v=1&l=2023-07-25T07:00:00.0000000Z&lft=0001-01-01T00:00:00.0000000&aof=0&o=2&p=&c=&t=0&s=0001-01-01T00:00:00.0000000+00:00&ts=2023-07-25T11:00:31.7111548+00:00&rwred=0&wls=&lka=0&lkt=0&TH=&dci=0; ANON=A=0043C6590EA808ED6E395059FFFFFFFF&E=1c8b&W=1; NAP=V=1.9&E=1c31&C=DnaMSbDN_4efZ_xXqBF3Daorjr53kYqYoaP8YHsupjmiXnysX7a37A&W=1; PPLState=1; KievRPSSecAuth=FABSBBRaTOJILtFsMkpLVWSG6AN6C/svRwNmAAAEgAAACMGUA7EGVSjGEAQBGHtNsc5sNL7unmJsfPJ2t6imfo4BeUJlAia3IpMTtMUy4PU/C5QAzRI5pODtsIee0+blgllXt/5IiWwGjwmdhivsFM597pRPkjARPfwsPhNLPNbJrCPNPHdje4Is78MnCADXw6/NBq2FL8V2/byw2fH6IuAMD2MvN/VvqpEa9ZxiDjZtENj4HEj0mO2SgzjfyEhVAkjvznJqU2rw/Q2tHmX94NAM2kzlzKF/hWPhCCUmu8IHLvCnHDS6mSptvJDDP/sp3ovtzOXkP1mlM/Xju5ftesUvccVEQGffXORa1dE5hEMbKIiKXz1tDdduSXE19g9/+mRMAjaQhpwhI8XmilCTx1adb1Ll5qK+VjC9GNfEZzcbsGBPVaOl+anG8rEMq+Xnhjo7J+NqTNolavHgcuV8kJsCeJZIged33UA8eOZeFo+wAECMguxMoSqgpGH+sthqynvD/FJD6r/tiU2N3uqVq8NE8V37asrN6T14Z0FGBJOe6ET1+PGApm3s11OY9/xhFEB9T5BEPUGEbvRcLcW2ncFQX0EU+xweiPqo1Q1hNUg/dCtSI+lZ7c2H8XheePZavZ0TJQ8oNCSAuKiTqJmI0fVGpwbXwfaADkEipuawz3fIuMJBNgMU0OtA7Hm59v2fGLIBuvi6YeKS6GgVk3BIPf+P/eKahwozrxQZaFnoHTSqMkvct7xCP4atBROfXKf5Ww0CcFKp+2WX9BIskTOo2jjk6bAyyYJ+ElUB1fgLKNk5m/YSMc9iYCLIBMIGN8F0Yvy3tZ7cvh7Ue5Klo98US/I+nW1G7ZJMHRgUO8h8lpneHqEMegKd8gynO4VF7RpCjJkunDmW0Ta+RkXAP619pg0dqHMFkoOgknN78oBbGTV6fJUKotv+vi61kLhAeXZGWoHGCRXh2wUC6YgfPgKA6ESRNHtFn7E5B3HHpLc5rVMDSNhKZYfdhupV4Ezf6+5DhMcZLZhi0kk+ivDiN1gdHlVtSN55xpvf+c+XZDzR0uhgcvgy0LAbmzgk6y4WbYH+LQsMpzNNj+aC72vMiWovWrKh9jY4MYCmdgxsS/skPtLdp18muiEIRXTbZQGUmhxFpJAIbBIsCscMpzL0BgeujxUwM5wr79Sd9r4xwbgSMwmBlBfUHRVBdNyg8feepeJbCS63nD6eHOuLqMRsPIio3w/ki/EAa92UUEiZeavLsMUD/y/qAvWUdzdP5Y+C/TM+CMGS/kGL4LEdY/28MQeTvU1qv1X21kQt2aiaj3pPVL36hAzxbcLgqcMo9oymDRy87kdCXW/+g4oKLtMh6fm/G6W6Y/B01JlxohyyvueHQIG557uzkEkTJ3FnOVODSKBKpb3WZ65rExfV71zSZa25F3GmpaIG6HiYrX2YYhQAkIE9pKEQBHbnwHuwNDGottZTXZw=; WLS=C=9df3f9d8518fae19&N=wen; WLID=pGY8HgWCu4p5XYCOk2oa0+DBdftkMUfmNIn8XtSjSTKsgv/Il7GUlYs0Jpjf/E12jZMgV7x44Dy3fXOgjjUoJx7Y/ClLrLhsk20THksJJoI=; _EDGE_S=F=1&SID=17CF6EE006426448213C7DB907436588&mkt=zh-CN; MUID=225621093D8A6C27301632413C0E6D08; MUIDB=225621093D8A6C27301632413C0E6D08; SUID=A; SNRHOP=I=&TS=; _U=nGyzKQruEsDwLiu65fZFIG6e12hf2lwTJmroW__k8joUJIKmG3OIjayXKGW9dCVR3sNhF76mEVxyW6yjUGPodOfjtSa3s3J_DxMOrEK1BqXCOBI9bC66spAIASV7prsYFlVAJz73jVNENp_tBubLHJy6EbT0BKRe4AjrYkH-9uMnmCKB8Zmyg; _SS=SID=17CF6EE006426448213C7DB907436588&R=0&RB=0&GB=0&RG=200&RP=0&PC=U531; SRCHS=PC=U531; USRLOC=HS=1&ELOC=LAT=22.501529693603516|LON=113.9263687133789|N=%E5%8D%97%E5%B1%B1%E5%8C%BA%EF%BC%8C%E5%B9%BF%E4%B8%9C%E7%9C%81|ELT=2|&CLOC=LAT=22.50153029046461|LON=113.92637070632928|A=733.4464586120832|TS=230726151034|SRC=W; SRCHUSR=DOB=20230725&T=1690384908000&POEX=W; ipv6=hit=1690388509974&t=6; SRCHHPGUSR=HV=1690384945&SRCHLANG=zh-Hans&PV=15.0.0&BRW=MW&BRH=MT&CW=410&CH=794&SCW=410&SCH=794&DPR=1.5&UTC=480&DM=0&WTS=63825879627&PRVCW=410&PRVCH=794&PR=1.5; cct=AjWIBYOoVP-Afq6gWwtx80If6yHn6iBuEVHA1XHdAKpny6Y_CVyi_MSyM94VyMWnjdYkkccVtm3czoIAtXUGQA; GC=AjWIBYOoVP-Afq6gWwtx80If6yHn6iBuEVHA1XHdAKpR3Y_D9Ytcks4Ht6XhadXk75dvhzP4YOUS0UmoEyqyxw' \
|
149 |
-
-H 'dnt: 1' \
|
150 |
-
-H 'sec-ch-ua: "Chromium";v="116", "Not)A;Brand";v="24", "Microsoft Edge";v="116"' \
|
151 |
-
-H 'sec-ch-ua-arch: "x86"' \
|
152 |
-
-H 'sec-ch-ua-bitness: "64"' \
|
153 |
-
-H 'sec-ch-ua-full-version: "116.0.1938.29"' \
|
154 |
-
-H 'sec-ch-ua-full-version-list: "Chromium";v="116.0.5845.42", "Not)A;Brand";v="24.0.0.0", "Microsoft Edge";v="116.0.1938.29"' \
|
155 |
-
-H 'sec-ch-ua-mobile: ?0' \
|
156 |
-
-H 'sec-ch-ua-model: ""' \
|
157 |
-
-H 'sec-ch-ua-platform: "Windows"' \
|
158 |
-
-H 'sec-ch-ua-platform-version: "15.0.0"' \
|
159 |
-
-H 'sec-fetch-dest: document' \
|
160 |
-
-H 'sec-fetch-mode: navigate' \
|
161 |
-
-H 'sec-fetch-site: none' \
|
162 |
-
-H 'sec-fetch-user: ?1' \
|
163 |
-
-H 'sec-ms-gec: B3F47AD4A283CAB374C0451C46AAFD147C6A4DACAFF6A1C13F34B2C72B024494' \
|
164 |
-
-H 'sec-ms-gec-version: 1-116.0.1938.29' \
|
165 |
-
-H 'upgrade-insecure-requests: 1' \
|
166 |
-
-H 'user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 Edg/116.0.0.0' \
|
167 |
-
-H 'x-client-data: eyIxIjoiMiIsIjEwIjoiXCJTMGg3R05HOTF2aDQ1TUZSUnZ5NHN2akRmMWdlaVJKenNxNlA3aU1WbnF3PVwiIiwiMiI6IjEiLCIzIjoiMSIsIjQiOiIyMTU4ODQ5NTM4MjY4OTM5NTA3IiwiNSI6IlwiSm9GUWpPTDk3OS9MbkRRZnlCd2N1M2FsOUN3eTZTQmdaMGNYMXBtOWVMZz1cIiIsIjYiOiJiZXRhIiwiNyI6IjE4MDM4ODYyNjQzNSIsIjkiOiJkZXNrdG9wIn0=' \
|
168 |
-
-H 'x-edge-shopping-flag: 1' \
|
169 |
-
--compressed
|
170 |
-
```
|
171 |
-
</details>
|
172 |
-
|
173 |
-
<details>
|
174 |
-
<summary>转成base64之后的格式(BING_HEADER只能使用 base64 之后的格式)</summary>
|
175 |
-
|
176 |
-
```
|
177 |
-
Y3VybCAnaHR0cHM6Ly93d3cuYmluZy5jb20vdHVyaW5nL2NvbnZlcnNhdGlvbi9jcmVhdGUnIFwgICAtSCAnYXV0aG9yaXR5OiB3d3cuYmluZy5jb20nIFwgICAtSCAnYWNjZXB0OiB0ZXh0L2h0bWwsYXBwbGljYXRpb24veGh0bWwreG1sLGFwcGxpY2F0aW9uL3htbDtxPTAuOSxpbWFnZS93ZWJwLGltYWdlL2FwbmcsKi8qO3E9MC44LGFwcGxpY2F0aW9uL3NpZ25lZC1leGNoYW5nZTt2PWIzO3E9MC43JyBcICAgLUggJ2FjY2VwdC1sYW5ndWFnZTogemgtQ04semg7cT0wLjksZW47cT0wLjgsZW4tR0I7cT0wLjcsZW4tVVM7cT0wLjYnIFwgICAtSCAnY2FjaGUtY29udHJvbDogbWF4LWFnZT0wJyBcICAgLUggJ2Nvb2tpZTogTWljcm9zb2Z0QXBwbGljYXRpb25zVGVsZW1ldHJ5RGV2aWNlSWQ9MzM5OWMwMDQtZmQwZS00OGVjLWJiOTItZDgyYTI3YjJiYmQ0OyBfRURHRV9WPTE7IFNSQ0hEPUFGPU5PRk9STTsgU1JDSFVJRD1WPTImR1VJRD0yOUVCRERBNEU2Njc0MzI5QUNDRjFBMEE0MjNDM0U5OCZkbW5jaGc9MTsgX1VSPVFTPTAmVFFTPTA7IF9IUFZOPUNTPWV5SlFiaUk2ZXlKRGJpSTZNU3dpVTNRaU9qQXNJbEZ6SWpvd0xDSlFjbTlrSWpvaVVDSjlMQ0pUWXlJNmV5SkRiaUk2TVN3aVUzUWlPakFzSWxGeklqb3dMQ0pRY205a0lqb2lTQ0o5TENKUmVpSTZleUpEYmlJNk1Td2lVM1FpT2pBc0lsRnpJam93TENKUWNtOWtJam9pVkNKOUxDSkJjQ0k2ZEhKMVpTd2lUWFYwWlNJNmRISjFaU3dpVEdGa0lqb2lNakF5TXkwd055MHlOVlF3TURvd01Eb3dNRm9pTENKSmIzUmtJam93TENKSGQySWlPakFzSWtSbWRDSTZiblZzYkN3aVRYWnpJam93TENKR2JIUWlPakFzSWtsdGNDSTZNbjA9OyBfUndCZj1pbHQ9MSZpaHBkPTEmaXNwZD0wJnJjPTAmcmI9MCZnYj0wJnJnPTIwMCZwYz0wJm10dT0wJnJiYj0wJmc9MCZjaWQ9JmNsbz0wJnY9MSZsPTIwMjMtMDctMjVUMDc6MDA6MDAuMDAwMDAwMFombGZ0PTAwMDEtMDEtMDFUMDA6MDA6MDAuMDAwMDAwMCZhb2Y9MCZvPTImcD0mYz0mdD0wJnM9MDAwMS0wMS0wMVQwMDowMDowMC4wMDAwMDAwKzAwOjAwJnRzPTIwMjMtMDctMjVUMTE6MDA6MzEuNzExMTU0OCswMDowMCZyd3JlZD0wJndscz0mbGthPTAmbGt0PTAmVEg9JmRjaT0wOyBBTk9OPUE9MDA0M0M2NTkwRUE4MDhFRDZFMzk1MDU5RkZGRkZGRkYmRT0xYzhiJlc9MTsgTkFQPVY9MS45JkU9MWMzMSZDPURuYU1TYkROXzRlZlpfeFhxQkYzRGFvcmpyNTNrWXFZb2FQOFlIc3Vwam1pWG55c1g3YTM3QSZXPTE7IFBQTFN0YXRlPTE7IEtpZXZSUFNTZWNBdXRoPUZBQlNCQlJhVE9KSUx0RnNNa3BMVldTRzZBTjZDL3N2UndObUFBQUVnQUFBQ01HVUE3RUdWU2pHRUFRQkdIdE5zYzVzTkw3dW5tSnNmUEoydDZpbWZvNEJlVUpsQWlhM0lwTVR0TVV5NFBVL0M1UUF6Ukk1cE9EdHNJZWUwK2JsZ2xsWHQvNUlpV3dHandtZGhpdnNGTTU5N3BSUGtqQVJQZndzUGhOTFBOYkpyQ1BOUEhkamU0SXM3OE1uQ0FEWHc2L05CcTJGTDhWMi9ieXcyZkg2SXVBTUQyTXZOL1Z2cXBFYTlaeGlEalp0RU5qNEhFajBtTzJTZ3pqZnlFaFZBa2p2em5KcVUycncvUTJ0SG1YOTROQU0ya3psektGL2hXUGhDQ1VtdThJSEx2Q25IRFM2bVNwdHZKRERQL3NwM292dHpPWGtQMW1sTS9YanU1ZnRlc1V2Y2NWRVFHZmZYT1JhMWRFNWhFTWJLSWlLWHoxdERkZHVTWEUxOWc5LyttUk1BamFRaHB3aEk4WG1pbENUeDFhZGIxTGw1cUsrVmpDOUdOZkVaemNic0dCUFZhT2wrYW5HOHJFTXErWG5oam83SitOcVROb2xhdkhnY3VWOGtKc0NlSlpJZ2VkMzNVQThlT1plRm8rd0FFQ01ndXhNb1NxZ3BHSCtzdGhxeW52RC9GSkQ2ci90aVUyTjN1cVZxOE5FOFYzN2Fzck42VDE0WjBGR0JKT2U2RVQxK1BHQXBtM3MxMU9ZOS94aEZFQjlUNUJFUFVHRWJ2UmNMY1cybmNGUVgwRVUreHdlaVBxbzFRMWhOVWcvZEN0U0krbFo3YzJIOFhoZWVQWmF2WjBUSlE4b05DU0F1S2lUcUptSTBmVkdwd2JYd2ZhQURrRWlwdWF3ejNmSXVNSkJOZ01VME90QTdIbTU5djJmR0xJQnV2aTZZZUtTNkdnVmszQklQZitQL2VLYWh3b3pyeFFaYUZub0hUU3FNa3ZjdDd4Q1A0YXRCUk9mWEtmNVd3MENjRktwKzJXWDlCSXNrVE9vMmpqazZiQXl5WUorRWxVQjFmZ0xLTms1bS9ZU01jOWlZQ0xJQk1JR044RjBZdnkzdFo3Y3ZoN1VlNUtsbzk4VVMvSStuVzFHN1pKTUhSZ1VPOGg4bHBuZUhxRU1lZ0tkOGd5bk80VkY3UnBDakprdW5EbVcwVGErUmtYQVA2MTlwZzBkcUhNRmtvT2drbk43OG9CYkdUVjZmSlVLb3R2K3ZpNjFrTGhBZVhaR1dvSEdDUlhoMndVQzZZZ2ZQZ0tBNkVTUk5IdEZuN0U1QjNISHBMYzVyVk1EU05oS1pZZmRodXBWNEV6ZjYrNURoTWNaTFpoaTBraytpdkRpTjFnZEhsVnRTTjU1eHB2ZitjK1haRHpSMHVoZ2N2Z3kwTEFibXpnazZ5NFdiWUgrTFFzTXB6Tk5qK2FDNzJ2TWlXb3ZXcktoOWpZNE1ZQ21kZ3hzUy9za1B0TGRwMThtdWlFSVJYVGJaUUdVbWh4RnBKQUliQklzQ3NjTXB6TDBCZ2V1anhVd001d3I3OVNkOXI0eHdiZ1NNd21CbEJmVUhSVkJkTnlnOGZlZXBlSmJDUzYzbkQ2ZUhPdUxxTVJzUElpbzN3L2tpL0VBYTkyVVVFaVplYXZMc01VRC95L3FBdldVZHpkUDVZK0MvVE0rQ01HUy9rR0w0TEVkWS8yOE1RZVR2VTFxdjFYMjFrUXQyYWlhajNwUFZMMzZoQXp4YmNMZ3FjTW85b3ltRFJ5ODdrZENYVy8rZzRvS0x0TWg2Zm0vRzZXNlkvQjAxSmx4b2h5eXZ1ZUhRSUc1NTd1emtFa1RKM0ZuT1ZPRFNLQktwYjNXWjY1ckV4ZlY3MXpTWmEyNUYzR21wYUlHNkhpWXJYMllZaFFBa0lFOXBLRVFCSGJud0h1d05ER290dFpUWFp3PTsgV0xTPUM9OWRmM2Y5ZDg1MThmYWUxOSZOPXdlbjsgV0xJRD1wR1k4SGdXQ3U0cDVYWUNPazJvYTArREJkZnRrTVVmbU5JbjhYdFNqU1RLc2d2L0lsN0dVbFlzMEpwamYvRTEyalpNZ1Y3eDQ0RHkzZlhPZ2pqVW9KeDdZL0NsTHJMaHNrMjBUSGtzSkpvST07IF9FREdFX1M9Rj0xJlNJRD0xN0NGNkVFMDA2NDI2NDQ4MjEzQzdEQjkwNzQzNjU4OCZta3Q9emgtQ047IE1VSUQ9MjI1NjIxMDkzRDhBNkMyNzMwMTYzMjQxM0MwRTZEMDg7IE1VSURCPTIyNTYyMTA5M0Q4QTZDMjczMDE2MzI0MTNDMEU2RDA4OyBTVUlEPUE7IFNOUkhPUD1JPSZUUz07IF9VPW5HeXpLUXJ1RXNEd0xpdTY1ZlpGSUc2ZTEyaGYybHdUSm1yb1dfX2s4am9VSklLbUczT0lqYXlYS0dXOWRDVlIzc05oRjc2bUVWeHlXNnlqVUdQb2RPZmp0U2EzczNKX0R4TU9yRUsxQnFYQ09CSTliQzY2c3BBSUFTVjdwcnNZRmxWQUp6NzNqVk5FTnBfdEJ1YkxISnk2RWJUMEJLUmU0QWpyWWtILTl1TW5tQ0tCOFpteWc7IF9TUz1TSUQ9MTdDRjZFRTAwNjQyNjQ0ODIxM0M3REI5MDc0MzY1ODgmUj0wJlJCPTAmR0I9MCZSRz0yMDAmUlA9MCZQQz1VNTMxOyBTUkNIUz1QQz1VNTMxOyBVU1JMT0M9SFM9MSZFTE9DPUxBVD0yMi41MDE1Mjk2OTM2MDM1MTZ8TE9OPTExMy45MjYzNjg3MTMzNzg5fE49JUU1JThEJTk3JUU1JUIxJUIxJUU1JThDJUJBJUVGJUJDJThDJUU1JUI5JUJGJUU0JUI4JTlDJUU3JTlDJTgxfEVMVD0yfCZDTE9DPUxBVD0yMi41MDE1MzAyOTA0NjQ2MXxMT049MTEzLjkyNjM3MDcwNjMyOTI4fEE9NzMzLjQ0NjQ1ODYxMjA4MzJ8VFM9MjMwNzI2MTUxMDM0fFNSQz1XOyBTUkNIVVNSPURPQj0yMDIzMDcyNSZUPTE2OTAzODQ5MDgwMDAmUE9FWD1XOyBpcHY2PWhpdD0xNjkwMzg4NTA5OTc0JnQ9NjsgU1JDSEhQR1VTUj1IVj0xNjkwMzg0OTQ1JlNSQ0hMQU5HPXpoLUhhbnMmUFY9MTUuMC4wJkJSVz1NVyZCUkg9TVQmQ1c9NDEwJkNIPTc5NCZTQ1c9NDEwJlNDSD03OTQmRFBSPTEuNSZVVEM9NDgwJkRNPTAmV1RTPTYzODI1ODc5NjI3JlBSVkNXPTQxMCZQUlZDSD03OTQmUFI9MS41OyBjY3Q9QWpXSUJZT29WUC1BZnE2Z1d3dHg4MElmNnlIbjZpQnVFVkhBMVhIZEFLcG55NllfQ1Z5aV9NU3lNOTRWeU1XbmpkWWtrY2NWdG0zY3pvSUF0WFVHUUE7IEdDPUFqV0lCWU9vVlAtQWZxNmdXd3R4ODBJZjZ5SG42aUJ1RVZIQTFYSGRBS3BSM1lfRDlZdGNrczRIdDZYaGFkWGs3NWR2aHpQNFlPVVMwVW1vRXlxeXh3JyBcICAgLUggJ2RudDogMScgXCAgIC1IICdzZWMtY2gtdWE6ICJDaHJvbWl1bSI7dj0iMTE2IiwgIk5vdClBO0JyYW5kIjt2PSIyNCIsICJNaWNyb3NvZnQgRWRnZSI7dj0iMTE2IicgXCAgIC1IICdzZWMtY2gtdWEtYXJjaDogIng4NiInIFwgICAtSCAnc2VjLWNoLXVhLWJpdG5lc3M6ICI2NCInIFwgICAtSCAnc2VjLWNoLXVhLWZ1bGwtdmVyc2lvbjogIjExNi4wLjE5MzguMjkiJyBcICAgLUggJ3NlYy1jaC11YS1mdWxsLXZlcnNpb24tbGlzdDogIkNocm9taXVtIjt2PSIxMTYuMC41ODQ1LjQyIiwgIk5vdClBO0JyYW5kIjt2PSIyNC4wLjAuMCIsICJNaWNyb3NvZnQgRWRnZSI7dj0iMTE2LjAuMTkzOC4yOSInIFwgICAtSCAnc2VjLWNoLXVhLW1vYmlsZTogPzAnIFwgICAtSCAnc2VjLWNoLXVhLW1vZGVsOiAiIicgXCAgIC1IICdzZWMtY2gtdWEtcGxhdGZvcm06ICJXaW5kb3dzIicgXCAgIC1IICdzZWMtY2gtdWEtcGxhdGZvcm0tdmVyc2lvbjogIjE1LjAuMCInIFwgICAtSCAnc2VjLWZldGNoLWRlc3Q6IGRvY3VtZW50JyBcICAgLUggJ3NlYy1mZXRjaC1tb2RlOiBuYXZpZ2F0ZScgXCAgIC1IICdzZWMtZmV0Y2gtc2l0ZTogbm9uZScgXCAgIC1IICdzZWMtZmV0Y2gtdXNlcjogPzEnIFwgICAtSCAnc2VjLW1zLWdlYzogQjNGNDdBRDRBMjgzQ0FCMzc0QzA0NTFDNDZBQUZEMTQ3QzZBNERBQ0FGRjZBMUMxM0YzNEIyQzcyQjAyNDQ5NCcgXCAgIC1IICdzZWMtbXMtZ2VjLXZlcnNpb246IDEtMTE2LjAuMTkzOC4yOScgXCAgIC1IICd1cGdyYWRlLWluc2VjdXJlLXJlcXVlc3RzOiAxJyBcICAgLUggJ3VzZXItYWdlbnQ6IE1vemlsbGEvNS4wIChXaW5kb3dzIE5UIDEwLjA7IFdpbjY0OyB4NjQpIEFwcGxlV2ViS2l0LzUzNy4zNiAoS0hUTUwsIGxpa2UgR2Vja28pIENocm9tZS8xMTYuMC4wLjAgU2FmYXJpLzUzNy4zNiBFZGcvMTE2LjAuMC4wJyBcICAgLUggJ3gtY2xpZW50LWRhdGE6IGV5SXhJam9pTWlJc0lqRXdJam9pWENKVE1HZzNSMDVIT1RGMmFEUTFUVVpTVW5aNU5ITjJha1JtTVdkbGFWSktlbk54TmxBM2FVMVdibkYzUFZ3aUlpd2lNaUk2SWpFaUxDSXpJam9pTVNJc0lqUWlPaUl5TVRVNE9EUTVOVE00TWpZNE9UTTVOVEEzSWl3aU5TSTZJbHdpU205R1VXcFBURGszT1M5TWJrUlJabmxDZDJOMU0yRnNPVU4zZVRaVFFtZGFNR05ZTVhCdE9XVk1aejFjSWlJc0lqWWlPaUppWlhSaElpd2lOeUk2SWpFNE1ETTRPRFl5TmpRek5TSXNJamtpT2lKa1pYTnJkRzl3SW4wPScgXCAgIC1IICd4LWVkZ2Utc2hvcHBpbmctZmxhZzogMScgXCAgIC0tY29tcHJlc3NlZA==
|
178 |
-
```
|
179 |
-
</details>
|
180 |
-
|
181 |
-
|
182 |
-
## 鸣谢
|
183 |
-
- 感谢 [EdgeGPT](https://github.com/acheong08/EdgeGPT) 提供的代理 API 的方法。
|
184 |
-
- 感谢 [Vercel AI](https://github.com/vercel-labs/ai-chatbot) 提供的基础脚手架和 [ChatHub](https://github.com/chathub-dev/chathub) [go-proxy-bingai](https://github.com/adams549659584/go-proxy-bingai) 提供的部分代码。
|
185 |
-
|
186 |
-
|
187 |
-
## 答疑及交流
|
188 |
-
|
189 |
-
<image src="./docs/images/wechat.png" width=240 />
|
190 |
-
|
191 |
-
## License
|
192 |
-
|
193 |
-
MIT © [LICENSE](https://github.com/weaigc/bingo/blob/main/LICENSE).
|
194 |
-
|
195 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/losses/specloss.py
DELETED
@@ -1,149 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
import typing as tp
|
8 |
-
|
9 |
-
import numpy as np
|
10 |
-
from torchaudio.transforms import MelSpectrogram
|
11 |
-
import torch
|
12 |
-
from torch import nn
|
13 |
-
from torch.nn import functional as F
|
14 |
-
|
15 |
-
from ..modules import pad_for_conv1d
|
16 |
-
|
17 |
-
|
18 |
-
class MelSpectrogramWrapper(nn.Module):
|
19 |
-
"""Wrapper around MelSpectrogram torchaudio transform providing proper padding
|
20 |
-
and additional post-processing including log scaling.
|
21 |
-
|
22 |
-
Args:
|
23 |
-
n_mels (int): Number of mel bins.
|
24 |
-
n_fft (int): Number of fft.
|
25 |
-
hop_length (int): Hop size.
|
26 |
-
win_length (int): Window length.
|
27 |
-
n_mels (int): Number of mel bins.
|
28 |
-
sample_rate (int): Sample rate.
|
29 |
-
f_min (float or None): Minimum frequency.
|
30 |
-
f_max (float or None): Maximum frequency.
|
31 |
-
log (bool): Whether to scale with log.
|
32 |
-
normalized (bool): Whether to normalize the melspectrogram.
|
33 |
-
floor_level (float): Floor level based on human perception (default=1e-5).
|
34 |
-
"""
|
35 |
-
def __init__(self, n_fft: int = 1024, hop_length: int = 256, win_length: tp.Optional[int] = None,
|
36 |
-
n_mels: int = 80, sample_rate: float = 22050, f_min: float = 0.0, f_max: tp.Optional[float] = None,
|
37 |
-
log: bool = True, normalized: bool = False, floor_level: float = 1e-5):
|
38 |
-
super().__init__()
|
39 |
-
self.n_fft = n_fft
|
40 |
-
hop_length = int(hop_length)
|
41 |
-
self.hop_length = hop_length
|
42 |
-
self.mel_transform = MelSpectrogram(n_mels=n_mels, sample_rate=sample_rate, n_fft=n_fft, hop_length=hop_length,
|
43 |
-
win_length=win_length, f_min=f_min, f_max=f_max, normalized=normalized,
|
44 |
-
window_fn=torch.hann_window, center=False)
|
45 |
-
self.floor_level = floor_level
|
46 |
-
self.log = log
|
47 |
-
|
48 |
-
def forward(self, x):
|
49 |
-
p = int((self.n_fft - self.hop_length) // 2)
|
50 |
-
if len(x.shape) == 2:
|
51 |
-
x = x.unsqueeze(1)
|
52 |
-
x = F.pad(x, (p, p), "reflect")
|
53 |
-
# Make sure that all the frames are full.
|
54 |
-
# The combination of `pad_for_conv1d` and the above padding
|
55 |
-
# will make the output of size ceil(T / hop).
|
56 |
-
x = pad_for_conv1d(x, self.n_fft, self.hop_length)
|
57 |
-
self.mel_transform.to(x.device)
|
58 |
-
mel_spec = self.mel_transform(x)
|
59 |
-
B, C, freqs, frame = mel_spec.shape
|
60 |
-
if self.log:
|
61 |
-
mel_spec = torch.log10(self.floor_level + mel_spec)
|
62 |
-
return mel_spec.reshape(B, C * freqs, frame)
|
63 |
-
|
64 |
-
|
65 |
-
class MelSpectrogramL1Loss(torch.nn.Module):
|
66 |
-
"""L1 Loss on MelSpectrogram.
|
67 |
-
|
68 |
-
Args:
|
69 |
-
sample_rate (int): Sample rate.
|
70 |
-
n_fft (int): Number of fft.
|
71 |
-
hop_length (int): Hop size.
|
72 |
-
win_length (int): Window length.
|
73 |
-
n_mels (int): Number of mel bins.
|
74 |
-
f_min (float or None): Minimum frequency.
|
75 |
-
f_max (float or None): Maximum frequency.
|
76 |
-
log (bool): Whether to scale with log.
|
77 |
-
normalized (bool): Whether to normalize the melspectrogram.
|
78 |
-
floor_level (float): Floor level value based on human perception (default=1e-5).
|
79 |
-
"""
|
80 |
-
def __init__(self, sample_rate: int, n_fft: int = 1024, hop_length: int = 256, win_length: int = 1024,
|
81 |
-
n_mels: int = 80, f_min: float = 0.0, f_max: tp.Optional[float] = None,
|
82 |
-
log: bool = True, normalized: bool = False, floor_level: float = 1e-5):
|
83 |
-
super().__init__()
|
84 |
-
self.l1 = torch.nn.L1Loss()
|
85 |
-
self.melspec = MelSpectrogramWrapper(n_fft=n_fft, hop_length=hop_length, win_length=win_length,
|
86 |
-
n_mels=n_mels, sample_rate=sample_rate, f_min=f_min, f_max=f_max,
|
87 |
-
log=log, normalized=normalized, floor_level=floor_level)
|
88 |
-
|
89 |
-
def forward(self, x, y):
|
90 |
-
self.melspec.to(x.device)
|
91 |
-
s_x = self.melspec(x)
|
92 |
-
s_y = self.melspec(y)
|
93 |
-
return self.l1(s_x, s_y)
|
94 |
-
|
95 |
-
|
96 |
-
class MultiScaleMelSpectrogramLoss(nn.Module):
|
97 |
-
"""Multi-Scale spectrogram loss (msspec).
|
98 |
-
|
99 |
-
Args:
|
100 |
-
sample_rate (int): Sample rate.
|
101 |
-
range_start (int): Power of 2 to use for the first scale.
|
102 |
-
range_stop (int): Power of 2 to use for the last scale.
|
103 |
-
n_mels (int): Number of mel bins.
|
104 |
-
f_min (float): Minimum frequency.
|
105 |
-
f_max (float or None): Maximum frequency.
|
106 |
-
normalized (bool): Whether to normalize the melspectrogram.
|
107 |
-
alphas (bool): Whether to use alphas as coefficients or not.
|
108 |
-
floor_level (float): Floor level value based on human perception (default=1e-5).
|
109 |
-
"""
|
110 |
-
def __init__(self, sample_rate: int, range_start: int = 6, range_end: int = 11,
|
111 |
-
n_mels: int = 64, f_min: float = 0.0, f_max: tp.Optional[float] = None,
|
112 |
-
normalized: bool = False, alphas: bool = True, floor_level: float = 1e-5):
|
113 |
-
super().__init__()
|
114 |
-
l1s = list()
|
115 |
-
l2s = list()
|
116 |
-
self.alphas = list()
|
117 |
-
self.total = 0
|
118 |
-
self.normalized = normalized
|
119 |
-
for i in range(range_start, range_end):
|
120 |
-
l1s.append(
|
121 |
-
MelSpectrogramWrapper(n_fft=2 ** i, hop_length=(2 ** i) / 4, win_length=2 ** i,
|
122 |
-
n_mels=n_mels, sample_rate=sample_rate, f_min=f_min, f_max=f_max,
|
123 |
-
log=False, normalized=normalized, floor_level=floor_level))
|
124 |
-
l2s.append(
|
125 |
-
MelSpectrogramWrapper(n_fft=2 ** i, hop_length=(2 ** i) / 4, win_length=2 ** i,
|
126 |
-
n_mels=n_mels, sample_rate=sample_rate, f_min=f_min, f_max=f_max,
|
127 |
-
log=True, normalized=normalized, floor_level=floor_level))
|
128 |
-
if alphas:
|
129 |
-
self.alphas.append(np.sqrt(2 ** i - 1))
|
130 |
-
else:
|
131 |
-
self.alphas.append(1)
|
132 |
-
self.total += self.alphas[-1] + 1
|
133 |
-
|
134 |
-
self.l1s = nn.ModuleList(l1s)
|
135 |
-
self.l2s = nn.ModuleList(l2s)
|
136 |
-
|
137 |
-
def forward(self, x, y):
|
138 |
-
loss = 0.0
|
139 |
-
self.l1s.to(x.device)
|
140 |
-
self.l2s.to(x.device)
|
141 |
-
for i in range(len(self.alphas)):
|
142 |
-
s_x_1 = self.l1s[i](x)
|
143 |
-
s_y_1 = self.l1s[i](y)
|
144 |
-
s_x_2 = self.l2s[i](x)
|
145 |
-
s_y_2 = self.l2s[i](y)
|
146 |
-
loss += F.l1_loss(s_x_1, s_y_1) + self.alphas[i] * F.mse_loss(s_x_2, s_y_2)
|
147 |
-
if self.normalized:
|
148 |
-
loss = loss / self.total
|
149 |
-
return loss
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio/vocoder/bigvgan/alias_free_torch/resample.py
DELETED
@@ -1,49 +0,0 @@
|
|
1 |
-
# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
|
2 |
-
# LICENSE is in incl_licenses directory.
|
3 |
-
|
4 |
-
import torch.nn as nn
|
5 |
-
from torch.nn import functional as F
|
6 |
-
from .filter import LowPassFilter1d
|
7 |
-
from .filter import kaiser_sinc_filter1d
|
8 |
-
|
9 |
-
|
10 |
-
class UpSample1d(nn.Module):
|
11 |
-
def __init__(self, ratio=2, kernel_size=None):
|
12 |
-
super().__init__()
|
13 |
-
self.ratio = ratio
|
14 |
-
self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size
|
15 |
-
self.stride = ratio
|
16 |
-
self.pad = self.kernel_size // ratio - 1
|
17 |
-
self.pad_left = self.pad * self.stride + (self.kernel_size - self.stride) // 2
|
18 |
-
self.pad_right = self.pad * self.stride + (self.kernel_size - self.stride + 1) // 2
|
19 |
-
filter = kaiser_sinc_filter1d(cutoff=0.5 / ratio,
|
20 |
-
half_width=0.6 / ratio,
|
21 |
-
kernel_size=self.kernel_size)
|
22 |
-
self.register_buffer("filter", filter)
|
23 |
-
|
24 |
-
# x: [B, C, T]
|
25 |
-
def forward(self, x):
|
26 |
-
_, C, _ = x.shape
|
27 |
-
|
28 |
-
x = F.pad(x, (self.pad, self.pad), mode='replicate')
|
29 |
-
x = self.ratio * F.conv_transpose1d(
|
30 |
-
x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C)
|
31 |
-
x = x[..., self.pad_left:-self.pad_right]
|
32 |
-
|
33 |
-
return x
|
34 |
-
|
35 |
-
|
36 |
-
class DownSample1d(nn.Module):
|
37 |
-
def __init__(self, ratio=2, kernel_size=None):
|
38 |
-
super().__init__()
|
39 |
-
self.ratio = ratio
|
40 |
-
self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size
|
41 |
-
self.lowpass = LowPassFilter1d(cutoff=0.5 / ratio,
|
42 |
-
half_width=0.6 / ratio,
|
43 |
-
stride=ratio,
|
44 |
-
kernel_size=self.kernel_size)
|
45 |
-
|
46 |
-
def forward(self, x):
|
47 |
-
xx = self.lowpass(x)
|
48 |
-
|
49 |
-
return xx
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGText/GlyphControl/ldm/modules/midas/midas/__init__.py
DELETED
File without changes
|
spaces/AIGText/GlyphControl/ldm/modules/midas/midas/dpt_depth.py
DELETED
@@ -1,109 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
|
5 |
-
from .base_model import BaseModel
|
6 |
-
from .blocks import (
|
7 |
-
FeatureFusionBlock,
|
8 |
-
FeatureFusionBlock_custom,
|
9 |
-
Interpolate,
|
10 |
-
_make_encoder,
|
11 |
-
forward_vit,
|
12 |
-
)
|
13 |
-
|
14 |
-
|
15 |
-
def _make_fusion_block(features, use_bn):
|
16 |
-
return FeatureFusionBlock_custom(
|
17 |
-
features,
|
18 |
-
nn.ReLU(False),
|
19 |
-
deconv=False,
|
20 |
-
bn=use_bn,
|
21 |
-
expand=False,
|
22 |
-
align_corners=True,
|
23 |
-
)
|
24 |
-
|
25 |
-
|
26 |
-
class DPT(BaseModel):
|
27 |
-
def __init__(
|
28 |
-
self,
|
29 |
-
head,
|
30 |
-
features=256,
|
31 |
-
backbone="vitb_rn50_384",
|
32 |
-
readout="project",
|
33 |
-
channels_last=False,
|
34 |
-
use_bn=False,
|
35 |
-
):
|
36 |
-
|
37 |
-
super(DPT, self).__init__()
|
38 |
-
|
39 |
-
self.channels_last = channels_last
|
40 |
-
|
41 |
-
hooks = {
|
42 |
-
"vitb_rn50_384": [0, 1, 8, 11],
|
43 |
-
"vitb16_384": [2, 5, 8, 11],
|
44 |
-
"vitl16_384": [5, 11, 17, 23],
|
45 |
-
}
|
46 |
-
|
47 |
-
# Instantiate backbone and reassemble blocks
|
48 |
-
self.pretrained, self.scratch = _make_encoder(
|
49 |
-
backbone,
|
50 |
-
features,
|
51 |
-
False, # Set to true of you want to train from scratch, uses ImageNet weights
|
52 |
-
groups=1,
|
53 |
-
expand=False,
|
54 |
-
exportable=False,
|
55 |
-
hooks=hooks[backbone],
|
56 |
-
use_readout=readout,
|
57 |
-
)
|
58 |
-
|
59 |
-
self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
|
60 |
-
self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
|
61 |
-
self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
|
62 |
-
self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
|
63 |
-
|
64 |
-
self.scratch.output_conv = head
|
65 |
-
|
66 |
-
|
67 |
-
def forward(self, x):
|
68 |
-
if self.channels_last == True:
|
69 |
-
x.contiguous(memory_format=torch.channels_last)
|
70 |
-
|
71 |
-
layer_1, layer_2, layer_3, layer_4 = forward_vit(self.pretrained, x)
|
72 |
-
|
73 |
-
layer_1_rn = self.scratch.layer1_rn(layer_1)
|
74 |
-
layer_2_rn = self.scratch.layer2_rn(layer_2)
|
75 |
-
layer_3_rn = self.scratch.layer3_rn(layer_3)
|
76 |
-
layer_4_rn = self.scratch.layer4_rn(layer_4)
|
77 |
-
|
78 |
-
path_4 = self.scratch.refinenet4(layer_4_rn)
|
79 |
-
path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
|
80 |
-
path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
|
81 |
-
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
|
82 |
-
|
83 |
-
out = self.scratch.output_conv(path_1)
|
84 |
-
|
85 |
-
return out
|
86 |
-
|
87 |
-
|
88 |
-
class DPTDepthModel(DPT):
|
89 |
-
def __init__(self, path=None, non_negative=True, **kwargs):
|
90 |
-
features = kwargs["features"] if "features" in kwargs else 256
|
91 |
-
|
92 |
-
head = nn.Sequential(
|
93 |
-
nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1),
|
94 |
-
Interpolate(scale_factor=2, mode="bilinear", align_corners=True),
|
95 |
-
nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1),
|
96 |
-
nn.ReLU(True),
|
97 |
-
nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
|
98 |
-
nn.ReLU(True) if non_negative else nn.Identity(),
|
99 |
-
nn.Identity(),
|
100 |
-
)
|
101 |
-
|
102 |
-
super().__init__(head, **kwargs)
|
103 |
-
|
104 |
-
if path is not None:
|
105 |
-
self.load(path)
|
106 |
-
|
107 |
-
def forward(self, x):
|
108 |
-
return super().forward(x).squeeze(dim=1)
|
109 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIWaves/Debate/src/agents/Component/ExtraComponent.py
DELETED
@@ -1,128 +0,0 @@
|
|
1 |
-
from .ToolComponent import ToolComponent
|
2 |
-
import json
|
3 |
-
from utils import flatten_dict,get_embedding,matching_category,search_with_api,limit_keys,limit_values
|
4 |
-
import os
|
5 |
-
|
6 |
-
|
7 |
-
class CategoryRequirementsComponent(ToolComponent):
|
8 |
-
def __init__(self, information_path):
|
9 |
-
super().__init__()
|
10 |
-
self.information_dataset = []
|
11 |
-
self.leaf_name = []
|
12 |
-
for toy_path in information_path:
|
13 |
-
with open(toy_path, encoding="utf-8") as json_file:
|
14 |
-
data = json.load(json_file)
|
15 |
-
for d in data:
|
16 |
-
if "/" in d["cat_leaf_name"]:
|
17 |
-
leaf_names = d["cat_leaf_name"].split("/") + [d["cat_leaf_name"]]
|
18 |
-
else:
|
19 |
-
leaf_names = [d["cat_leaf_name"]]
|
20 |
-
for name in leaf_names:
|
21 |
-
self.leaf_name.append(name)
|
22 |
-
new_d = d.copy()
|
23 |
-
new_d["cat_leaf_name"] = name
|
24 |
-
new_d["information"] = flatten_dict(new_d["information"])
|
25 |
-
self.information_dataset.append(new_d)
|
26 |
-
|
27 |
-
self.target_embbeding = get_embedding(
|
28 |
-
self.leaf_name
|
29 |
-
)
|
30 |
-
|
31 |
-
def search_information(self, category, information_dataset):
|
32 |
-
knowledge = {}
|
33 |
-
for d in information_dataset:
|
34 |
-
if category == d["cat_leaf_name"]:
|
35 |
-
knowledge = d["information"]
|
36 |
-
knowledge = {
|
37 |
-
key: value
|
38 |
-
for key, value in knowledge.items()
|
39 |
-
if (value and key != "相关分类")
|
40 |
-
}
|
41 |
-
break
|
42 |
-
return knowledge
|
43 |
-
|
44 |
-
def func(self, agent):
|
45 |
-
prompt = ""
|
46 |
-
messages = agent.long_term_memory
|
47 |
-
outputdict = {}
|
48 |
-
functions = [
|
49 |
-
{
|
50 |
-
"name": "search_information",
|
51 |
-
"description": "根据用户所需要购买商品的种类跟用户的需求去寻找用户所需要的商品",
|
52 |
-
"parameters": {
|
53 |
-
"type": "object",
|
54 |
-
"properties": {
|
55 |
-
"category": {
|
56 |
-
"type": "string",
|
57 |
-
"description": "用户现在所需要的商品类别,比如纸尿布,笔记本电脑等,注意,只能有一个",
|
58 |
-
},
|
59 |
-
"requirements": {
|
60 |
-
"type": "string",
|
61 |
-
"description": "用户现在的需求,比如说便宜,安踏品牌等等,可以有多个需求,中间以“ ”分隔",
|
62 |
-
},
|
63 |
-
},
|
64 |
-
"required": ["category", "requirements"],
|
65 |
-
},
|
66 |
-
}
|
67 |
-
]
|
68 |
-
|
69 |
-
response = agent.LLM.get_response(
|
70 |
-
messages,
|
71 |
-
None,
|
72 |
-
None,
|
73 |
-
functions=functions,
|
74 |
-
stream=False,
|
75 |
-
function_call={"name": "search_information"},
|
76 |
-
)
|
77 |
-
response_message = json.loads(response["function_call"]["arguments"])
|
78 |
-
category = (
|
79 |
-
response_message["category"] if response_message["category"] else None
|
80 |
-
)
|
81 |
-
requirements = (
|
82 |
-
response_message["requirements"]
|
83 |
-
if response_message["requirements"]
|
84 |
-
else category
|
85 |
-
)
|
86 |
-
if not (category or requirements):
|
87 |
-
return {}
|
88 |
-
|
89 |
-
topk_result = matching_category(
|
90 |
-
category, self.leaf_name, None, self.target_embbeding, top_k=3
|
91 |
-
)
|
92 |
-
|
93 |
-
top1_score = topk_result[1][0]
|
94 |
-
request_items, top_category = search_with_api(requirements, category)
|
95 |
-
|
96 |
-
|
97 |
-
MIN_CATEGORY_SIM = eval(os.environ["MIN_CATEGORY_SIM"]
|
98 |
-
) if "MIN_CATEGORY_SIM" in os.environ else 0.7
|
99 |
-
|
100 |
-
if top1_score > MIN_CATEGORY_SIM:
|
101 |
-
agent.environment.shared_memory["category"] = topk_result[0][0]
|
102 |
-
category = topk_result[0][0]
|
103 |
-
information = self.search_information(
|
104 |
-
topk_result[0][0], self.information_dataset
|
105 |
-
)
|
106 |
-
information = limit_keys(information, 3)
|
107 |
-
information = limit_values(information, 2)
|
108 |
-
prompt += f"""你需要知道的是:用户目前选择的商品是{category},该商品信息为{information}。你需要根据这些商品信息来详细介绍商品,比如详细介绍商品有哪些品牌,有哪些分类等等,并且询问用户是否有更多的需求。"""
|
109 |
-
if category in top_category:
|
110 |
-
top_category.remove(category)
|
111 |
-
|
112 |
-
recommend = "\n经过搜索后,推荐商品如下:\n"
|
113 |
-
prompt += "筛选出的商品如下:\n"
|
114 |
-
|
115 |
-
for i, request_item in enumerate(request_items):
|
116 |
-
|
117 |
-
itemTitle = request_item["itemTitle"]
|
118 |
-
itemPrice = request_item["itemPrice"]
|
119 |
-
itemPicUrl = request_item["itemPicUrl"]
|
120 |
-
recommend += f"[{i}.商品名称:{itemTitle},商品价格:{float(itemPrice)/100}]({itemPicUrl})\n"
|
121 |
-
prompt += f"[{i}.商品名称:{itemTitle},商品价格:{float(itemPrice)/100}]\n"
|
122 |
-
outputdict["recommend"] = recommend
|
123 |
-
print(recommend)
|
124 |
-
else:
|
125 |
-
prompt += f"""你需要知道的是:用户目前选择的商品是{category},而我们店里没有这类商品,但是我们店里有一些近似商品,如{top_category},{topk_result[0][0]},你需要对这些近似商品进行介绍,并引导用户购买"""
|
126 |
-
outputdict["prompt"] = prompt
|
127 |
-
return outputdict
|
128 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/custom_cfg.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
_base_ = [ # 此配置文件将继承所有 `_base_` 中的配置
|
2 |
-
'../configs/_base_/models/custom_model.py', # 模型配置
|
3 |
-
'../configs/_base_/datasets/custom_ds.py', # 数据配置
|
4 |
-
'../configs/_base_/schedules/custom_schedule.py', # 训练策略配置
|
5 |
-
'../configs/_base_/default_runtime.py' # 默认运行设置
|
6 |
-
]
|
7 |
-
|
8 |
-
visualizer = dict(
|
9 |
-
vis_backends=[dict(type='LocalVisBackend'),
|
10 |
-
dict(type='WandbVisBackend')])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Bard.py
DELETED
@@ -1,74 +0,0 @@
|
|
1 |
-
import os, requests, json, browser_cookie3, re, random
|
2 |
-
from ...typing import sha256, Dict, get_type_hints
|
3 |
-
|
4 |
-
url = 'https://bard.google.com'
|
5 |
-
model = ['Palm2']
|
6 |
-
supports_stream = False
|
7 |
-
needs_auth = True
|
8 |
-
|
9 |
-
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
10 |
-
psid = {cookie.name: cookie.value for cookie in browser_cookie3.chrome(
|
11 |
-
domain_name='.google.com')}['__Secure-1PSID']
|
12 |
-
|
13 |
-
formatted = '\n'.join([
|
14 |
-
'%s: %s' % (message['role'], message['content']) for message in messages
|
15 |
-
])
|
16 |
-
prompt = f'{formatted}\nAssistant:'
|
17 |
-
|
18 |
-
proxy = kwargs.get('proxy', False)
|
19 |
-
if proxy == False:
|
20 |
-
print('warning!, you did not give a proxy, a lot of countries are banned from Google Bard, so it may not work')
|
21 |
-
|
22 |
-
snlm0e = None
|
23 |
-
conversation_id = None
|
24 |
-
response_id = None
|
25 |
-
choice_id = None
|
26 |
-
|
27 |
-
client = requests.Session()
|
28 |
-
client.proxies = {
|
29 |
-
'http': f'http://{proxy}',
|
30 |
-
'https': f'http://{proxy}'} if proxy else None
|
31 |
-
|
32 |
-
client.headers = {
|
33 |
-
'authority': 'bard.google.com',
|
34 |
-
'content-type': 'application/x-www-form-urlencoded;charset=UTF-8',
|
35 |
-
'origin': 'https://bard.google.com',
|
36 |
-
'referer': 'https://bard.google.com/',
|
37 |
-
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
|
38 |
-
'x-same-domain': '1',
|
39 |
-
'cookie': f'__Secure-1PSID={psid}'
|
40 |
-
}
|
41 |
-
|
42 |
-
snlm0e = re.search(r'SNlM0e\":\"(.*?)\"',
|
43 |
-
client.get('https://bard.google.com/').text).group(1) if not snlm0e else snlm0e
|
44 |
-
|
45 |
-
params = {
|
46 |
-
'bl': 'boq_assistant-bard-web-server_20230326.21_p0',
|
47 |
-
'_reqid': random.randint(1111, 9999),
|
48 |
-
'rt': 'c'
|
49 |
-
}
|
50 |
-
|
51 |
-
data = {
|
52 |
-
'at': snlm0e,
|
53 |
-
'f.req': json.dumps([None, json.dumps([[prompt], None, [conversation_id, response_id, choice_id]])])}
|
54 |
-
|
55 |
-
intents = '.'.join([
|
56 |
-
'assistant',
|
57 |
-
'lamda',
|
58 |
-
'BardFrontendService'
|
59 |
-
])
|
60 |
-
|
61 |
-
response = client.post(f'https://bard.google.com/_/BardChatUi/data/{intents}/StreamGenerate',
|
62 |
-
data=data, params=params)
|
63 |
-
|
64 |
-
chat_data = json.loads(response.content.splitlines()[3])[0][2]
|
65 |
-
if chat_data:
|
66 |
-
json_chat_data = json.loads(chat_data)
|
67 |
-
|
68 |
-
yield json_chat_data[0][0]
|
69 |
-
|
70 |
-
else:
|
71 |
-
yield 'error'
|
72 |
-
|
73 |
-
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
74 |
-
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Adapting/YouTube-Downloader/tube/var.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
from pathlib import Path
|
2 |
-
|
3 |
-
PROJECT_ROOT = Path(__package__).absolute().parent
|
4 |
-
OUTPUT_DIR = Path(PROJECT_ROOT).joinpath('downloads')
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/radio/Factory.d.ts
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
import Radio from './Radio';
|
2 |
-
import Base from '../base/Base';
|
3 |
-
|
4 |
-
export default function Factory(
|
5 |
-
config?: Base.IConfig
|
6 |
-
): Radio;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/dnnlib/util.py
DELETED
@@ -1,477 +0,0 @@
|
|
1 |
-
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
"""Miscellaneous utility classes and functions."""
|
10 |
-
|
11 |
-
import ctypes
|
12 |
-
import fnmatch
|
13 |
-
import importlib
|
14 |
-
import inspect
|
15 |
-
import numpy as np
|
16 |
-
import os
|
17 |
-
import shutil
|
18 |
-
import sys
|
19 |
-
import types
|
20 |
-
import io
|
21 |
-
import pickle
|
22 |
-
import re
|
23 |
-
import requests
|
24 |
-
import html
|
25 |
-
import hashlib
|
26 |
-
import glob
|
27 |
-
import tempfile
|
28 |
-
import urllib
|
29 |
-
import urllib.request
|
30 |
-
import uuid
|
31 |
-
|
32 |
-
from distutils.util import strtobool
|
33 |
-
from typing import Any, List, Tuple, Union
|
34 |
-
|
35 |
-
|
36 |
-
# Util classes
|
37 |
-
# ------------------------------------------------------------------------------------------
|
38 |
-
|
39 |
-
|
40 |
-
class EasyDict(dict):
|
41 |
-
"""Convenience class that behaves like a dict but allows access with the attribute syntax."""
|
42 |
-
|
43 |
-
def __getattr__(self, name: str) -> Any:
|
44 |
-
try:
|
45 |
-
return self[name]
|
46 |
-
except KeyError:
|
47 |
-
raise AttributeError(name)
|
48 |
-
|
49 |
-
def __setattr__(self, name: str, value: Any) -> None:
|
50 |
-
self[name] = value
|
51 |
-
|
52 |
-
def __delattr__(self, name: str) -> None:
|
53 |
-
del self[name]
|
54 |
-
|
55 |
-
|
56 |
-
class Logger(object):
|
57 |
-
"""Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing on both stdout and the file."""
|
58 |
-
|
59 |
-
def __init__(self, file_name: str = None, file_mode: str = "w", should_flush: bool = True):
|
60 |
-
self.file = None
|
61 |
-
|
62 |
-
if file_name is not None:
|
63 |
-
self.file = open(file_name, file_mode)
|
64 |
-
|
65 |
-
self.should_flush = should_flush
|
66 |
-
self.stdout = sys.stdout
|
67 |
-
self.stderr = sys.stderr
|
68 |
-
|
69 |
-
sys.stdout = self
|
70 |
-
sys.stderr = self
|
71 |
-
|
72 |
-
def __enter__(self) -> "Logger":
|
73 |
-
return self
|
74 |
-
|
75 |
-
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
|
76 |
-
self.close()
|
77 |
-
|
78 |
-
def write(self, text: Union[str, bytes]) -> None:
|
79 |
-
"""Write text to stdout (and a file) and optionally flush."""
|
80 |
-
if isinstance(text, bytes):
|
81 |
-
text = text.decode()
|
82 |
-
if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash
|
83 |
-
return
|
84 |
-
|
85 |
-
if self.file is not None:
|
86 |
-
self.file.write(text)
|
87 |
-
|
88 |
-
self.stdout.write(text)
|
89 |
-
|
90 |
-
if self.should_flush:
|
91 |
-
self.flush()
|
92 |
-
|
93 |
-
def flush(self) -> None:
|
94 |
-
"""Flush written text to both stdout and a file, if open."""
|
95 |
-
if self.file is not None:
|
96 |
-
self.file.flush()
|
97 |
-
|
98 |
-
self.stdout.flush()
|
99 |
-
|
100 |
-
def close(self) -> None:
|
101 |
-
"""Flush, close possible files, and remove stdout/stderr mirroring."""
|
102 |
-
self.flush()
|
103 |
-
|
104 |
-
# if using multiple loggers, prevent closing in wrong order
|
105 |
-
if sys.stdout is self:
|
106 |
-
sys.stdout = self.stdout
|
107 |
-
if sys.stderr is self:
|
108 |
-
sys.stderr = self.stderr
|
109 |
-
|
110 |
-
if self.file is not None:
|
111 |
-
self.file.close()
|
112 |
-
self.file = None
|
113 |
-
|
114 |
-
|
115 |
-
# Cache directories
|
116 |
-
# ------------------------------------------------------------------------------------------
|
117 |
-
|
118 |
-
_dnnlib_cache_dir = None
|
119 |
-
|
120 |
-
def set_cache_dir(path: str) -> None:
|
121 |
-
global _dnnlib_cache_dir
|
122 |
-
_dnnlib_cache_dir = path
|
123 |
-
|
124 |
-
def make_cache_dir_path(*paths: str) -> str:
|
125 |
-
if _dnnlib_cache_dir is not None:
|
126 |
-
return os.path.join(_dnnlib_cache_dir, *paths)
|
127 |
-
if 'DNNLIB_CACHE_DIR' in os.environ:
|
128 |
-
return os.path.join(os.environ['DNNLIB_CACHE_DIR'], *paths)
|
129 |
-
if 'HOME' in os.environ:
|
130 |
-
return os.path.join(os.environ['HOME'], '.cache', 'dnnlib', *paths)
|
131 |
-
if 'USERPROFILE' in os.environ:
|
132 |
-
return os.path.join(os.environ['USERPROFILE'], '.cache', 'dnnlib', *paths)
|
133 |
-
return os.path.join(tempfile.gettempdir(), '.cache', 'dnnlib', *paths)
|
134 |
-
|
135 |
-
# Small util functions
|
136 |
-
# ------------------------------------------------------------------------------------------
|
137 |
-
|
138 |
-
|
139 |
-
def format_time(seconds: Union[int, float]) -> str:
|
140 |
-
"""Convert the seconds to human readable string with days, hours, minutes and seconds."""
|
141 |
-
s = int(np.rint(seconds))
|
142 |
-
|
143 |
-
if s < 60:
|
144 |
-
return "{0}s".format(s)
|
145 |
-
elif s < 60 * 60:
|
146 |
-
return "{0}m {1:02}s".format(s // 60, s % 60)
|
147 |
-
elif s < 24 * 60 * 60:
|
148 |
-
return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60)
|
149 |
-
else:
|
150 |
-
return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60)
|
151 |
-
|
152 |
-
|
153 |
-
def ask_yes_no(question: str) -> bool:
|
154 |
-
"""Ask the user the question until the user inputs a valid answer."""
|
155 |
-
while True:
|
156 |
-
try:
|
157 |
-
print("{0} [y/n]".format(question))
|
158 |
-
return strtobool(input().lower())
|
159 |
-
except ValueError:
|
160 |
-
pass
|
161 |
-
|
162 |
-
|
163 |
-
def tuple_product(t: Tuple) -> Any:
|
164 |
-
"""Calculate the product of the tuple elements."""
|
165 |
-
result = 1
|
166 |
-
|
167 |
-
for v in t:
|
168 |
-
result *= v
|
169 |
-
|
170 |
-
return result
|
171 |
-
|
172 |
-
|
173 |
-
_str_to_ctype = {
|
174 |
-
"uint8": ctypes.c_ubyte,
|
175 |
-
"uint16": ctypes.c_uint16,
|
176 |
-
"uint32": ctypes.c_uint32,
|
177 |
-
"uint64": ctypes.c_uint64,
|
178 |
-
"int8": ctypes.c_byte,
|
179 |
-
"int16": ctypes.c_int16,
|
180 |
-
"int32": ctypes.c_int32,
|
181 |
-
"int64": ctypes.c_int64,
|
182 |
-
"float32": ctypes.c_float,
|
183 |
-
"float64": ctypes.c_double
|
184 |
-
}
|
185 |
-
|
186 |
-
|
187 |
-
def get_dtype_and_ctype(type_obj: Any) -> Tuple[np.dtype, Any]:
|
188 |
-
"""Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes."""
|
189 |
-
type_str = None
|
190 |
-
|
191 |
-
if isinstance(type_obj, str):
|
192 |
-
type_str = type_obj
|
193 |
-
elif hasattr(type_obj, "__name__"):
|
194 |
-
type_str = type_obj.__name__
|
195 |
-
elif hasattr(type_obj, "name"):
|
196 |
-
type_str = type_obj.name
|
197 |
-
else:
|
198 |
-
raise RuntimeError("Cannot infer type name from input")
|
199 |
-
|
200 |
-
assert type_str in _str_to_ctype.keys()
|
201 |
-
|
202 |
-
my_dtype = np.dtype(type_str)
|
203 |
-
my_ctype = _str_to_ctype[type_str]
|
204 |
-
|
205 |
-
assert my_dtype.itemsize == ctypes.sizeof(my_ctype)
|
206 |
-
|
207 |
-
return my_dtype, my_ctype
|
208 |
-
|
209 |
-
|
210 |
-
def is_pickleable(obj: Any) -> bool:
|
211 |
-
try:
|
212 |
-
with io.BytesIO() as stream:
|
213 |
-
pickle.dump(obj, stream)
|
214 |
-
return True
|
215 |
-
except:
|
216 |
-
return False
|
217 |
-
|
218 |
-
|
219 |
-
# Functionality to import modules/objects by name, and call functions by name
|
220 |
-
# ------------------------------------------------------------------------------------------
|
221 |
-
|
222 |
-
def get_module_from_obj_name(obj_name: str) -> Tuple[types.ModuleType, str]:
|
223 |
-
"""Searches for the underlying module behind the name to some python object.
|
224 |
-
Returns the module and the object name (original name with module part removed)."""
|
225 |
-
|
226 |
-
# allow convenience shorthands, substitute them by full names
|
227 |
-
obj_name = re.sub("^np.", "numpy.", obj_name)
|
228 |
-
obj_name = re.sub("^tf.", "tensorflow.", obj_name)
|
229 |
-
|
230 |
-
# list alternatives for (module_name, local_obj_name)
|
231 |
-
parts = obj_name.split(".")
|
232 |
-
name_pairs = [(".".join(parts[:i]), ".".join(parts[i:])) for i in range(len(parts), 0, -1)]
|
233 |
-
|
234 |
-
# try each alternative in turn
|
235 |
-
for module_name, local_obj_name in name_pairs:
|
236 |
-
try:
|
237 |
-
module = importlib.import_module(module_name) # may raise ImportError
|
238 |
-
get_obj_from_module(module, local_obj_name) # may raise AttributeError
|
239 |
-
return module, local_obj_name
|
240 |
-
except:
|
241 |
-
pass
|
242 |
-
|
243 |
-
# maybe some of the modules themselves contain errors?
|
244 |
-
for module_name, _local_obj_name in name_pairs:
|
245 |
-
try:
|
246 |
-
importlib.import_module(module_name) # may raise ImportError
|
247 |
-
except ImportError:
|
248 |
-
if not str(sys.exc_info()[1]).startswith("No module named '" + module_name + "'"):
|
249 |
-
raise
|
250 |
-
|
251 |
-
# maybe the requested attribute is missing?
|
252 |
-
for module_name, local_obj_name in name_pairs:
|
253 |
-
try:
|
254 |
-
module = importlib.import_module(module_name) # may raise ImportError
|
255 |
-
get_obj_from_module(module, local_obj_name) # may raise AttributeError
|
256 |
-
except ImportError:
|
257 |
-
pass
|
258 |
-
|
259 |
-
# we are out of luck, but we have no idea why
|
260 |
-
raise ImportError(obj_name)
|
261 |
-
|
262 |
-
|
263 |
-
def get_obj_from_module(module: types.ModuleType, obj_name: str) -> Any:
|
264 |
-
"""Traverses the object name and returns the last (rightmost) python object."""
|
265 |
-
if obj_name == '':
|
266 |
-
return module
|
267 |
-
obj = module
|
268 |
-
for part in obj_name.split("."):
|
269 |
-
obj = getattr(obj, part)
|
270 |
-
return obj
|
271 |
-
|
272 |
-
|
273 |
-
def get_obj_by_name(name: str) -> Any:
|
274 |
-
"""Finds the python object with the given name."""
|
275 |
-
module, obj_name = get_module_from_obj_name(name)
|
276 |
-
return get_obj_from_module(module, obj_name)
|
277 |
-
|
278 |
-
|
279 |
-
def call_func_by_name(*args, func_name: str = None, **kwargs) -> Any:
|
280 |
-
"""Finds the python object with the given name and calls it as a function."""
|
281 |
-
assert func_name is not None
|
282 |
-
func_obj = get_obj_by_name(func_name)
|
283 |
-
assert callable(func_obj)
|
284 |
-
return func_obj(*args, **kwargs)
|
285 |
-
|
286 |
-
|
287 |
-
def construct_class_by_name(*args, class_name: str = None, **kwargs) -> Any:
|
288 |
-
"""Finds the python class with the given name and constructs it with the given arguments."""
|
289 |
-
return call_func_by_name(*args, func_name=class_name, **kwargs)
|
290 |
-
|
291 |
-
|
292 |
-
def get_module_dir_by_obj_name(obj_name: str) -> str:
|
293 |
-
"""Get the directory path of the module containing the given object name."""
|
294 |
-
module, _ = get_module_from_obj_name(obj_name)
|
295 |
-
return os.path.dirname(inspect.getfile(module))
|
296 |
-
|
297 |
-
|
298 |
-
def is_top_level_function(obj: Any) -> bool:
|
299 |
-
"""Determine whether the given object is a top-level function, i.e., defined at module scope using 'def'."""
|
300 |
-
return callable(obj) and obj.__name__ in sys.modules[obj.__module__].__dict__
|
301 |
-
|
302 |
-
|
303 |
-
def get_top_level_function_name(obj: Any) -> str:
|
304 |
-
"""Return the fully-qualified name of a top-level function."""
|
305 |
-
assert is_top_level_function(obj)
|
306 |
-
module = obj.__module__
|
307 |
-
if module == '__main__':
|
308 |
-
module = os.path.splitext(os.path.basename(sys.modules[module].__file__))[0]
|
309 |
-
return module + "." + obj.__name__
|
310 |
-
|
311 |
-
|
312 |
-
# File system helpers
|
313 |
-
# ------------------------------------------------------------------------------------------
|
314 |
-
|
315 |
-
def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]:
|
316 |
-
"""List all files recursively in a given directory while ignoring given file and directory names.
|
317 |
-
Returns list of tuples containing both absolute and relative paths."""
|
318 |
-
assert os.path.isdir(dir_path)
|
319 |
-
base_name = os.path.basename(os.path.normpath(dir_path))
|
320 |
-
|
321 |
-
if ignores is None:
|
322 |
-
ignores = []
|
323 |
-
|
324 |
-
result = []
|
325 |
-
|
326 |
-
for root, dirs, files in os.walk(dir_path, topdown=True):
|
327 |
-
for ignore_ in ignores:
|
328 |
-
dirs_to_remove = [d for d in dirs if fnmatch.fnmatch(d, ignore_)]
|
329 |
-
|
330 |
-
# dirs need to be edited in-place
|
331 |
-
for d in dirs_to_remove:
|
332 |
-
dirs.remove(d)
|
333 |
-
|
334 |
-
files = [f for f in files if not fnmatch.fnmatch(f, ignore_)]
|
335 |
-
|
336 |
-
absolute_paths = [os.path.join(root, f) for f in files]
|
337 |
-
relative_paths = [os.path.relpath(p, dir_path) for p in absolute_paths]
|
338 |
-
|
339 |
-
if add_base_to_relative:
|
340 |
-
relative_paths = [os.path.join(base_name, p) for p in relative_paths]
|
341 |
-
|
342 |
-
assert len(absolute_paths) == len(relative_paths)
|
343 |
-
result += zip(absolute_paths, relative_paths)
|
344 |
-
|
345 |
-
return result
|
346 |
-
|
347 |
-
|
348 |
-
def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None:
|
349 |
-
"""Takes in a list of tuples of (src, dst) paths and copies files.
|
350 |
-
Will create all necessary directories."""
|
351 |
-
for file in files:
|
352 |
-
target_dir_name = os.path.dirname(file[1])
|
353 |
-
|
354 |
-
# will create all intermediate-level directories
|
355 |
-
if not os.path.exists(target_dir_name):
|
356 |
-
os.makedirs(target_dir_name)
|
357 |
-
|
358 |
-
shutil.copyfile(file[0], file[1])
|
359 |
-
|
360 |
-
|
361 |
-
# URL helpers
|
362 |
-
# ------------------------------------------------------------------------------------------
|
363 |
-
|
364 |
-
def is_url(obj: Any, allow_file_urls: bool = False) -> bool:
|
365 |
-
"""Determine whether the given object is a valid URL string."""
|
366 |
-
if not isinstance(obj, str) or not "://" in obj:
|
367 |
-
return False
|
368 |
-
if allow_file_urls and obj.startswith('file://'):
|
369 |
-
return True
|
370 |
-
try:
|
371 |
-
res = requests.compat.urlparse(obj)
|
372 |
-
if not res.scheme or not res.netloc or not "." in res.netloc:
|
373 |
-
return False
|
374 |
-
res = requests.compat.urlparse(requests.compat.urljoin(obj, "/"))
|
375 |
-
if not res.scheme or not res.netloc or not "." in res.netloc:
|
376 |
-
return False
|
377 |
-
except:
|
378 |
-
return False
|
379 |
-
return True
|
380 |
-
|
381 |
-
|
382 |
-
def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True, return_filename: bool = False, cache: bool = True) -> Any:
|
383 |
-
"""Download the given URL and return a binary-mode file object to access the data."""
|
384 |
-
assert num_attempts >= 1
|
385 |
-
assert not (return_filename and (not cache))
|
386 |
-
|
387 |
-
# Doesn't look like an URL scheme so interpret it as a local filename.
|
388 |
-
if not re.match('^[a-z]+://', url):
|
389 |
-
return url if return_filename else open(url, "rb")
|
390 |
-
|
391 |
-
# Handle file URLs. This code handles unusual file:// patterns that
|
392 |
-
# arise on Windows:
|
393 |
-
#
|
394 |
-
# file:///c:/foo.txt
|
395 |
-
#
|
396 |
-
# which would translate to a local '/c:/foo.txt' filename that's
|
397 |
-
# invalid. Drop the forward slash for such pathnames.
|
398 |
-
#
|
399 |
-
# If you touch this code path, you should test it on both Linux and
|
400 |
-
# Windows.
|
401 |
-
#
|
402 |
-
# Some internet resources suggest using urllib.request.url2pathname() but
|
403 |
-
# but that converts forward slashes to backslashes and this causes
|
404 |
-
# its own set of problems.
|
405 |
-
if url.startswith('file://'):
|
406 |
-
filename = urllib.parse.urlparse(url).path
|
407 |
-
if re.match(r'^/[a-zA-Z]:', filename):
|
408 |
-
filename = filename[1:]
|
409 |
-
return filename if return_filename else open(filename, "rb")
|
410 |
-
|
411 |
-
assert is_url(url)
|
412 |
-
|
413 |
-
# Lookup from cache.
|
414 |
-
if cache_dir is None:
|
415 |
-
cache_dir = make_cache_dir_path('downloads')
|
416 |
-
|
417 |
-
url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest()
|
418 |
-
if cache:
|
419 |
-
cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*"))
|
420 |
-
if len(cache_files) == 1:
|
421 |
-
filename = cache_files[0]
|
422 |
-
return filename if return_filename else open(filename, "rb")
|
423 |
-
|
424 |
-
# Download.
|
425 |
-
url_name = None
|
426 |
-
url_data = None
|
427 |
-
with requests.Session() as session:
|
428 |
-
if verbose:
|
429 |
-
print("Downloading %s ..." % url, end="", flush=True)
|
430 |
-
for attempts_left in reversed(range(num_attempts)):
|
431 |
-
try:
|
432 |
-
with session.get(url) as res:
|
433 |
-
res.raise_for_status()
|
434 |
-
if len(res.content) == 0:
|
435 |
-
raise IOError("No data received")
|
436 |
-
|
437 |
-
if len(res.content) < 8192:
|
438 |
-
content_str = res.content.decode("utf-8")
|
439 |
-
if "download_warning" in res.headers.get("Set-Cookie", ""):
|
440 |
-
links = [html.unescape(link) for link in content_str.split('"') if "export=download" in link]
|
441 |
-
if len(links) == 1:
|
442 |
-
url = requests.compat.urljoin(url, links[0])
|
443 |
-
raise IOError("Google Drive virus checker nag")
|
444 |
-
if "Google Drive - Quota exceeded" in content_str:
|
445 |
-
raise IOError("Google Drive download quota exceeded -- please try again later")
|
446 |
-
|
447 |
-
match = re.search(r'filename="([^"]*)"', res.headers.get("Content-Disposition", ""))
|
448 |
-
url_name = match[1] if match else url
|
449 |
-
url_data = res.content
|
450 |
-
if verbose:
|
451 |
-
print(" done")
|
452 |
-
break
|
453 |
-
except KeyboardInterrupt:
|
454 |
-
raise
|
455 |
-
except:
|
456 |
-
if not attempts_left:
|
457 |
-
if verbose:
|
458 |
-
print(" failed")
|
459 |
-
raise
|
460 |
-
if verbose:
|
461 |
-
print(".", end="", flush=True)
|
462 |
-
|
463 |
-
# Save to cache.
|
464 |
-
if cache:
|
465 |
-
safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name)
|
466 |
-
cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name)
|
467 |
-
temp_file = os.path.join(cache_dir, "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name)
|
468 |
-
os.makedirs(cache_dir, exist_ok=True)
|
469 |
-
with open(temp_file, "wb") as f:
|
470 |
-
f.write(url_data)
|
471 |
-
os.replace(temp_file, cache_file) # atomic
|
472 |
-
if return_filename:
|
473 |
-
return cache_file
|
474 |
-
|
475 |
-
# Return data as file object.
|
476 |
-
assert not return_filename
|
477 |
-
return io.BytesIO(url_data)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/audioldm/__init__.py
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
from ...utils import (
|
2 |
-
OptionalDependencyNotAvailable,
|
3 |
-
is_torch_available,
|
4 |
-
is_transformers_available,
|
5 |
-
is_transformers_version,
|
6 |
-
)
|
7 |
-
|
8 |
-
|
9 |
-
try:
|
10 |
-
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")):
|
11 |
-
raise OptionalDependencyNotAvailable()
|
12 |
-
except OptionalDependencyNotAvailable:
|
13 |
-
from ...utils.dummy_torch_and_transformers_objects import (
|
14 |
-
AudioLDMPipeline,
|
15 |
-
)
|
16 |
-
else:
|
17 |
-
from .pipeline_audioldm import AudioLDMPipeline
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py
DELETED
@@ -1,128 +0,0 @@
|
|
1 |
-
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
import inspect
|
16 |
-
from typing import List, Optional, Tuple, Union
|
17 |
-
|
18 |
-
import torch
|
19 |
-
|
20 |
-
from ...models import UNet2DModel, VQModel
|
21 |
-
from ...schedulers import DDIMScheduler
|
22 |
-
from ...utils import randn_tensor
|
23 |
-
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
|
24 |
-
|
25 |
-
|
26 |
-
class LDMPipeline(DiffusionPipeline):
|
27 |
-
r"""
|
28 |
-
Pipeline for unconditional image generation using latent diffusion.
|
29 |
-
|
30 |
-
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
31 |
-
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
32 |
-
|
33 |
-
Parameters:
|
34 |
-
vqvae ([`VQModel`]):
|
35 |
-
Vector-quantized (VQ) model to encode and decode images to and from latent representations.
|
36 |
-
unet ([`UNet2DModel`]):
|
37 |
-
A `UNet2DModel` to denoise the encoded image latents.
|
38 |
-
scheduler ([`SchedulerMixin`]):
|
39 |
-
[`DDIMScheduler`] is used in combination with `unet` to denoise the encoded image latents.
|
40 |
-
"""
|
41 |
-
|
42 |
-
def __init__(self, vqvae: VQModel, unet: UNet2DModel, scheduler: DDIMScheduler):
|
43 |
-
super().__init__()
|
44 |
-
self.register_modules(vqvae=vqvae, unet=unet, scheduler=scheduler)
|
45 |
-
|
46 |
-
@torch.no_grad()
|
47 |
-
def __call__(
|
48 |
-
self,
|
49 |
-
batch_size: int = 1,
|
50 |
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
51 |
-
eta: float = 0.0,
|
52 |
-
num_inference_steps: int = 50,
|
53 |
-
output_type: Optional[str] = "pil",
|
54 |
-
return_dict: bool = True,
|
55 |
-
**kwargs,
|
56 |
-
) -> Union[Tuple, ImagePipelineOutput]:
|
57 |
-
r"""
|
58 |
-
The call function to the pipeline for generation.
|
59 |
-
|
60 |
-
Args:
|
61 |
-
batch_size (`int`, *optional*, defaults to 1):
|
62 |
-
Number of images to generate.
|
63 |
-
generator (`torch.Generator`, *optional*):
|
64 |
-
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
65 |
-
generation deterministic.
|
66 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
67 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
68 |
-
expense of slower inference.
|
69 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
70 |
-
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
71 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
72 |
-
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
|
73 |
-
|
74 |
-
Example:
|
75 |
-
|
76 |
-
```py
|
77 |
-
>>> from diffusers import LDMPipeline
|
78 |
-
|
79 |
-
>>> # load model and scheduler
|
80 |
-
>>> pipe = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256")
|
81 |
-
|
82 |
-
>>> # run pipeline in inference (sample random noise and denoise)
|
83 |
-
>>> image = pipe().images[0]
|
84 |
-
```
|
85 |
-
|
86 |
-
Returns:
|
87 |
-
[`~pipelines.ImagePipelineOutput`] or `tuple`:
|
88 |
-
If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
|
89 |
-
returned where the first element is a list with the generated images
|
90 |
-
"""
|
91 |
-
|
92 |
-
latents = randn_tensor(
|
93 |
-
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
|
94 |
-
generator=generator,
|
95 |
-
)
|
96 |
-
latents = latents.to(self.device)
|
97 |
-
|
98 |
-
# scale the initial noise by the standard deviation required by the scheduler
|
99 |
-
latents = latents * self.scheduler.init_noise_sigma
|
100 |
-
|
101 |
-
self.scheduler.set_timesteps(num_inference_steps)
|
102 |
-
|
103 |
-
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
104 |
-
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
105 |
-
|
106 |
-
extra_kwargs = {}
|
107 |
-
if accepts_eta:
|
108 |
-
extra_kwargs["eta"] = eta
|
109 |
-
|
110 |
-
for t in self.progress_bar(self.scheduler.timesteps):
|
111 |
-
latent_model_input = self.scheduler.scale_model_input(latents, t)
|
112 |
-
# predict the noise residual
|
113 |
-
noise_prediction = self.unet(latent_model_input, t).sample
|
114 |
-
# compute the previous noisy sample x_t -> x_t-1
|
115 |
-
latents = self.scheduler.step(noise_prediction, t, latents, **extra_kwargs).prev_sample
|
116 |
-
|
117 |
-
# decode the image latents with the VAE
|
118 |
-
image = self.vqvae.decode(latents).sample
|
119 |
-
|
120 |
-
image = (image / 2 + 0.5).clamp(0, 1)
|
121 |
-
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
122 |
-
if output_type == "pil":
|
123 |
-
image = self.numpy_to_pil(image)
|
124 |
-
|
125 |
-
if not return_dict:
|
126 |
-
return (image,)
|
127 |
-
|
128 |
-
return ImagePipelineOutput(images=image)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
_base_ = './ms_rcnn_r50_fpn_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://resnext101_64x4d',
|
4 |
-
backbone=dict(
|
5 |
-
type='ResNeXt',
|
6 |
-
depth=101,
|
7 |
-
groups=64,
|
8 |
-
base_width=4,
|
9 |
-
num_stages=4,
|
10 |
-
out_indices=(0, 1, 2, 3),
|
11 |
-
frozen_stages=1,
|
12 |
-
norm_cfg=dict(type='BN', requires_grad=True),
|
13 |
-
style='pytorch'))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/nasfcos.py
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
from ..builder import DETECTORS
|
2 |
-
from .single_stage import SingleStageDetector
|
3 |
-
|
4 |
-
|
5 |
-
@DETECTORS.register_module()
|
6 |
-
class NASFCOS(SingleStageDetector):
|
7 |
-
"""NAS-FCOS: Fast Neural Architecture Search for Object Detection.
|
8 |
-
|
9 |
-
https://arxiv.org/abs/1906.0442
|
10 |
-
"""
|
11 |
-
|
12 |
-
def __init__(self,
|
13 |
-
backbone,
|
14 |
-
neck,
|
15 |
-
bbox_head,
|
16 |
-
train_cfg=None,
|
17 |
-
test_cfg=None,
|
18 |
-
pretrained=None):
|
19 |
-
super(NASFCOS, self).__init__(backbone, neck, bbox_head, train_cfg,
|
20 |
-
test_cfg, pretrained)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/single_stage.py
DELETED
@@ -1,154 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
|
4 |
-
from mmdet.core import bbox2result
|
5 |
-
from ..builder import DETECTORS, build_backbone, build_head, build_neck
|
6 |
-
from .base import BaseDetector
|
7 |
-
|
8 |
-
|
9 |
-
@DETECTORS.register_module()
|
10 |
-
class SingleStageDetector(BaseDetector):
|
11 |
-
"""Base class for single-stage detectors.
|
12 |
-
|
13 |
-
Single-stage detectors directly and densely predict bounding boxes on the
|
14 |
-
output features of the backbone+neck.
|
15 |
-
"""
|
16 |
-
|
17 |
-
def __init__(self,
|
18 |
-
backbone,
|
19 |
-
neck=None,
|
20 |
-
bbox_head=None,
|
21 |
-
train_cfg=None,
|
22 |
-
test_cfg=None,
|
23 |
-
pretrained=None):
|
24 |
-
super(SingleStageDetector, self).__init__()
|
25 |
-
self.backbone = build_backbone(backbone)
|
26 |
-
if neck is not None:
|
27 |
-
self.neck = build_neck(neck)
|
28 |
-
bbox_head.update(train_cfg=train_cfg)
|
29 |
-
bbox_head.update(test_cfg=test_cfg)
|
30 |
-
self.bbox_head = build_head(bbox_head)
|
31 |
-
self.train_cfg = train_cfg
|
32 |
-
self.test_cfg = test_cfg
|
33 |
-
self.init_weights(pretrained=pretrained)
|
34 |
-
|
35 |
-
def init_weights(self, pretrained=None):
|
36 |
-
"""Initialize the weights in detector.
|
37 |
-
|
38 |
-
Args:
|
39 |
-
pretrained (str, optional): Path to pre-trained weights.
|
40 |
-
Defaults to None.
|
41 |
-
"""
|
42 |
-
super(SingleStageDetector, self).init_weights(pretrained)
|
43 |
-
self.backbone.init_weights(pretrained=pretrained)
|
44 |
-
if self.with_neck:
|
45 |
-
if isinstance(self.neck, nn.Sequential):
|
46 |
-
for m in self.neck:
|
47 |
-
m.init_weights()
|
48 |
-
else:
|
49 |
-
self.neck.init_weights()
|
50 |
-
self.bbox_head.init_weights()
|
51 |
-
|
52 |
-
def extract_feat(self, img):
|
53 |
-
"""Directly extract features from the backbone+neck."""
|
54 |
-
x = self.backbone(img)
|
55 |
-
if self.with_neck:
|
56 |
-
x = self.neck(x)
|
57 |
-
return x
|
58 |
-
|
59 |
-
def forward_dummy(self, img):
|
60 |
-
"""Used for computing network flops.
|
61 |
-
|
62 |
-
See `mmdetection/tools/analysis_tools/get_flops.py`
|
63 |
-
"""
|
64 |
-
x = self.extract_feat(img)
|
65 |
-
outs = self.bbox_head(x)
|
66 |
-
return outs
|
67 |
-
|
68 |
-
def forward_train(self,
|
69 |
-
img,
|
70 |
-
img_metas,
|
71 |
-
gt_bboxes,
|
72 |
-
gt_labels,
|
73 |
-
gt_bboxes_ignore=None):
|
74 |
-
"""
|
75 |
-
Args:
|
76 |
-
img (Tensor): Input images of shape (N, C, H, W).
|
77 |
-
Typically these should be mean centered and std scaled.
|
78 |
-
img_metas (list[dict]): A List of image info dict where each dict
|
79 |
-
has: 'img_shape', 'scale_factor', 'flip', and may also contain
|
80 |
-
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
|
81 |
-
For details on the values of these keys see
|
82 |
-
:class:`mmdet.datasets.pipelines.Collect`.
|
83 |
-
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
|
84 |
-
image in [tl_x, tl_y, br_x, br_y] format.
|
85 |
-
gt_labels (list[Tensor]): Class indices corresponding to each box
|
86 |
-
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
|
87 |
-
boxes can be ignored when computing the loss.
|
88 |
-
|
89 |
-
Returns:
|
90 |
-
dict[str, Tensor]: A dictionary of loss components.
|
91 |
-
"""
|
92 |
-
super(SingleStageDetector, self).forward_train(img, img_metas)
|
93 |
-
x = self.extract_feat(img)
|
94 |
-
losses = self.bbox_head.forward_train(x, img_metas, gt_bboxes,
|
95 |
-
gt_labels, gt_bboxes_ignore)
|
96 |
-
return losses
|
97 |
-
|
98 |
-
def simple_test(self, img, img_metas, rescale=False):
|
99 |
-
"""Test function without test time augmentation.
|
100 |
-
|
101 |
-
Args:
|
102 |
-
imgs (list[torch.Tensor]): List of multiple images
|
103 |
-
img_metas (list[dict]): List of image information.
|
104 |
-
rescale (bool, optional): Whether to rescale the results.
|
105 |
-
Defaults to False.
|
106 |
-
|
107 |
-
Returns:
|
108 |
-
list[list[np.ndarray]]: BBox results of each image and classes.
|
109 |
-
The outer list corresponds to each image. The inner list
|
110 |
-
corresponds to each class.
|
111 |
-
"""
|
112 |
-
x = self.extract_feat(img)
|
113 |
-
outs = self.bbox_head(x)
|
114 |
-
# get origin input shape to support onnx dynamic shape
|
115 |
-
if torch.onnx.is_in_onnx_export():
|
116 |
-
# get shape as tensor
|
117 |
-
img_shape = torch._shape_as_tensor(img)[2:]
|
118 |
-
img_metas[0]['img_shape_for_onnx'] = img_shape
|
119 |
-
bbox_list = self.bbox_head.get_bboxes(
|
120 |
-
*outs, img_metas, rescale=rescale)
|
121 |
-
# skip post-processing when exporting to ONNX
|
122 |
-
if torch.onnx.is_in_onnx_export():
|
123 |
-
return bbox_list
|
124 |
-
|
125 |
-
bbox_results = [
|
126 |
-
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
|
127 |
-
for det_bboxes, det_labels in bbox_list
|
128 |
-
]
|
129 |
-
return bbox_results
|
130 |
-
|
131 |
-
def aug_test(self, imgs, img_metas, rescale=False):
|
132 |
-
"""Test function with test time augmentation.
|
133 |
-
|
134 |
-
Args:
|
135 |
-
imgs (list[Tensor]): the outer list indicates test-time
|
136 |
-
augmentations and inner Tensor should have a shape NxCxHxW,
|
137 |
-
which contains all images in the batch.
|
138 |
-
img_metas (list[list[dict]]): the outer list indicates test-time
|
139 |
-
augs (multiscale, flip, etc.) and the inner list indicates
|
140 |
-
images in a batch. each dict has image information.
|
141 |
-
rescale (bool, optional): Whether to rescale the results.
|
142 |
-
Defaults to False.
|
143 |
-
|
144 |
-
Returns:
|
145 |
-
list[list[np.ndarray]]: BBox results of each image and classes.
|
146 |
-
The outer list corresponds to each image. The inner list
|
147 |
-
corresponds to each class.
|
148 |
-
"""
|
149 |
-
assert hasattr(self.bbox_head, 'aug_test'), \
|
150 |
-
f'{self.bbox_head.__class__.__name__}' \
|
151 |
-
' does not support test-time augmentation'
|
152 |
-
|
153 |
-
feats = self.extract_feats(imgs)
|
154 |
-
return [self.bbox_head.aug_test(feats, img_metas, rescale=rescale)]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/two_stage.py
DELETED
@@ -1,215 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
|
4 |
-
# from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler
|
5 |
-
from ..builder import DETECTORS, build_backbone, build_head, build_neck
|
6 |
-
from .base import BaseDetector
|
7 |
-
|
8 |
-
|
9 |
-
@DETECTORS.register_module()
|
10 |
-
class TwoStageDetector(BaseDetector):
|
11 |
-
"""Base class for two-stage detectors.
|
12 |
-
|
13 |
-
Two-stage detectors typically consisting of a region proposal network and a
|
14 |
-
task-specific regression head.
|
15 |
-
"""
|
16 |
-
|
17 |
-
def __init__(self,
|
18 |
-
backbone,
|
19 |
-
neck=None,
|
20 |
-
rpn_head=None,
|
21 |
-
roi_head=None,
|
22 |
-
train_cfg=None,
|
23 |
-
test_cfg=None,
|
24 |
-
pretrained=None):
|
25 |
-
super(TwoStageDetector, self).__init__()
|
26 |
-
self.backbone = build_backbone(backbone)
|
27 |
-
|
28 |
-
if neck is not None:
|
29 |
-
self.neck = build_neck(neck)
|
30 |
-
|
31 |
-
if rpn_head is not None:
|
32 |
-
rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
|
33 |
-
rpn_head_ = rpn_head.copy()
|
34 |
-
rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)
|
35 |
-
self.rpn_head = build_head(rpn_head_)
|
36 |
-
|
37 |
-
if roi_head is not None:
|
38 |
-
# update train and test cfg here for now
|
39 |
-
# TODO: refactor assigner & sampler
|
40 |
-
rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None
|
41 |
-
roi_head.update(train_cfg=rcnn_train_cfg)
|
42 |
-
roi_head.update(test_cfg=test_cfg.rcnn)
|
43 |
-
self.roi_head = build_head(roi_head)
|
44 |
-
|
45 |
-
self.train_cfg = train_cfg
|
46 |
-
self.test_cfg = test_cfg
|
47 |
-
|
48 |
-
self.init_weights(pretrained=pretrained)
|
49 |
-
|
50 |
-
@property
|
51 |
-
def with_rpn(self):
|
52 |
-
"""bool: whether the detector has RPN"""
|
53 |
-
return hasattr(self, 'rpn_head') and self.rpn_head is not None
|
54 |
-
|
55 |
-
@property
|
56 |
-
def with_roi_head(self):
|
57 |
-
"""bool: whether the detector has a RoI head"""
|
58 |
-
return hasattr(self, 'roi_head') and self.roi_head is not None
|
59 |
-
|
60 |
-
def init_weights(self, pretrained=None):
|
61 |
-
"""Initialize the weights in detector.
|
62 |
-
|
63 |
-
Args:
|
64 |
-
pretrained (str, optional): Path to pre-trained weights.
|
65 |
-
Defaults to None.
|
66 |
-
"""
|
67 |
-
super(TwoStageDetector, self).init_weights(pretrained)
|
68 |
-
self.backbone.init_weights(pretrained=pretrained)
|
69 |
-
if self.with_neck:
|
70 |
-
if isinstance(self.neck, nn.Sequential):
|
71 |
-
for m in self.neck:
|
72 |
-
m.init_weights()
|
73 |
-
else:
|
74 |
-
self.neck.init_weights()
|
75 |
-
if self.with_rpn:
|
76 |
-
self.rpn_head.init_weights()
|
77 |
-
if self.with_roi_head:
|
78 |
-
self.roi_head.init_weights(pretrained)
|
79 |
-
|
80 |
-
def extract_feat(self, img):
|
81 |
-
"""Directly extract features from the backbone+neck."""
|
82 |
-
x = self.backbone(img)
|
83 |
-
if self.with_neck:
|
84 |
-
x = self.neck(x)
|
85 |
-
return x
|
86 |
-
|
87 |
-
def forward_dummy(self, img):
|
88 |
-
"""Used for computing network flops.
|
89 |
-
|
90 |
-
See `mmdetection/tools/analysis_tools/get_flops.py`
|
91 |
-
"""
|
92 |
-
outs = ()
|
93 |
-
# backbone
|
94 |
-
x = self.extract_feat(img)
|
95 |
-
# rpn
|
96 |
-
if self.with_rpn:
|
97 |
-
rpn_outs = self.rpn_head(x)
|
98 |
-
outs = outs + (rpn_outs, )
|
99 |
-
proposals = torch.randn(1000, 4).to(img.device)
|
100 |
-
# roi_head
|
101 |
-
roi_outs = self.roi_head.forward_dummy(x, proposals)
|
102 |
-
outs = outs + (roi_outs, )
|
103 |
-
return outs
|
104 |
-
|
105 |
-
def forward_train(self,
|
106 |
-
img,
|
107 |
-
img_metas,
|
108 |
-
gt_bboxes,
|
109 |
-
gt_labels,
|
110 |
-
gt_bboxes_ignore=None,
|
111 |
-
gt_masks=None,
|
112 |
-
proposals=None,
|
113 |
-
**kwargs):
|
114 |
-
"""
|
115 |
-
Args:
|
116 |
-
img (Tensor): of shape (N, C, H, W) encoding input images.
|
117 |
-
Typically these should be mean centered and std scaled.
|
118 |
-
|
119 |
-
img_metas (list[dict]): list of image info dict where each dict
|
120 |
-
has: 'img_shape', 'scale_factor', 'flip', and may also contain
|
121 |
-
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
|
122 |
-
For details on the values of these keys see
|
123 |
-
`mmdet/datasets/pipelines/formatting.py:Collect`.
|
124 |
-
|
125 |
-
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
|
126 |
-
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
|
127 |
-
|
128 |
-
gt_labels (list[Tensor]): class indices corresponding to each box
|
129 |
-
|
130 |
-
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
|
131 |
-
boxes can be ignored when computing the loss.
|
132 |
-
|
133 |
-
gt_masks (None | Tensor) : true segmentation masks for each box
|
134 |
-
used if the architecture supports a segmentation task.
|
135 |
-
|
136 |
-
proposals : override rpn proposals with custom proposals. Use when
|
137 |
-
`with_rpn` is False.
|
138 |
-
|
139 |
-
Returns:
|
140 |
-
dict[str, Tensor]: a dictionary of loss components
|
141 |
-
"""
|
142 |
-
x = self.extract_feat(img)
|
143 |
-
|
144 |
-
losses = dict()
|
145 |
-
|
146 |
-
# RPN forward and loss
|
147 |
-
if self.with_rpn:
|
148 |
-
proposal_cfg = self.train_cfg.get('rpn_proposal',
|
149 |
-
self.test_cfg.rpn)
|
150 |
-
rpn_losses, proposal_list = self.rpn_head.forward_train(
|
151 |
-
x,
|
152 |
-
img_metas,
|
153 |
-
gt_bboxes,
|
154 |
-
gt_labels=None,
|
155 |
-
gt_bboxes_ignore=gt_bboxes_ignore,
|
156 |
-
proposal_cfg=proposal_cfg)
|
157 |
-
losses.update(rpn_losses)
|
158 |
-
else:
|
159 |
-
proposal_list = proposals
|
160 |
-
|
161 |
-
roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list,
|
162 |
-
gt_bboxes, gt_labels,
|
163 |
-
gt_bboxes_ignore, gt_masks,
|
164 |
-
**kwargs)
|
165 |
-
losses.update(roi_losses)
|
166 |
-
|
167 |
-
return losses
|
168 |
-
|
169 |
-
async def async_simple_test(self,
|
170 |
-
img,
|
171 |
-
img_meta,
|
172 |
-
proposals=None,
|
173 |
-
rescale=False):
|
174 |
-
"""Async test without augmentation."""
|
175 |
-
assert self.with_bbox, 'Bbox head must be implemented.'
|
176 |
-
x = self.extract_feat(img)
|
177 |
-
|
178 |
-
if proposals is None:
|
179 |
-
proposal_list = await self.rpn_head.async_simple_test_rpn(
|
180 |
-
x, img_meta)
|
181 |
-
else:
|
182 |
-
proposal_list = proposals
|
183 |
-
|
184 |
-
return await self.roi_head.async_simple_test(
|
185 |
-
x, proposal_list, img_meta, rescale=rescale)
|
186 |
-
|
187 |
-
def simple_test(self, img, img_metas, proposals=None, rescale=False):
|
188 |
-
"""Test without augmentation."""
|
189 |
-
assert self.with_bbox, 'Bbox head must be implemented.'
|
190 |
-
|
191 |
-
x = self.extract_feat(img)
|
192 |
-
|
193 |
-
# get origin input shape to onnx dynamic input shape
|
194 |
-
if torch.onnx.is_in_onnx_export():
|
195 |
-
img_shape = torch._shape_as_tensor(img)[2:]
|
196 |
-
img_metas[0]['img_shape_for_onnx'] = img_shape
|
197 |
-
|
198 |
-
if proposals is None:
|
199 |
-
proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
|
200 |
-
else:
|
201 |
-
proposal_list = proposals
|
202 |
-
|
203 |
-
return self.roi_head.simple_test(
|
204 |
-
x, proposal_list, img_metas, rescale=rescale)
|
205 |
-
|
206 |
-
def aug_test(self, imgs, img_metas, rescale=False):
|
207 |
-
"""Test with augmentations.
|
208 |
-
|
209 |
-
If rescale is False, then returned bboxes and masks will fit the scale
|
210 |
-
of imgs[0].
|
211 |
-
"""
|
212 |
-
x = self.extract_feats(imgs)
|
213 |
-
proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)
|
214 |
-
return self.roi_head.aug_test(
|
215 |
-
x, proposal_list, img_metas, rescale=rescale)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andyrasika/xlm-roberta-base-finetuned-panx-de/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/Andyrasika/xlm-roberta-base-finetuned-panx-de").launch()
|
|
|
|
|
|
|
|
spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/resample.py
DELETED
@@ -1,154 +0,0 @@
|
|
1 |
-
from abc import ABC, abstractmethod
|
2 |
-
|
3 |
-
import numpy as np
|
4 |
-
import torch as th
|
5 |
-
import torch.distributed as dist
|
6 |
-
|
7 |
-
|
8 |
-
def create_named_schedule_sampler(name, diffusion):
|
9 |
-
"""
|
10 |
-
Create a ScheduleSampler from a library of pre-defined samplers.
|
11 |
-
|
12 |
-
:param name: the name of the sampler.
|
13 |
-
:param diffusion: the diffusion object to sample for.
|
14 |
-
"""
|
15 |
-
if name == "uniform":
|
16 |
-
return UniformSampler(diffusion)
|
17 |
-
elif name == "loss-second-moment":
|
18 |
-
return LossSecondMomentResampler(diffusion)
|
19 |
-
else:
|
20 |
-
raise NotImplementedError(f"unknown schedule sampler: {name}")
|
21 |
-
|
22 |
-
|
23 |
-
class ScheduleSampler(ABC):
|
24 |
-
"""
|
25 |
-
A distribution over timesteps in the diffusion process, intended to reduce
|
26 |
-
variance of the objective.
|
27 |
-
|
28 |
-
By default, samplers perform unbiased importance sampling, in which the
|
29 |
-
objective's mean is unchanged.
|
30 |
-
However, subclasses may override sample() to change how the resampled
|
31 |
-
terms are reweighted, allowing for actual changes in the objective.
|
32 |
-
"""
|
33 |
-
|
34 |
-
@abstractmethod
|
35 |
-
def weights(self):
|
36 |
-
"""
|
37 |
-
Get a numpy array of weights, one per diffusion step.
|
38 |
-
|
39 |
-
The weights needn't be normalized, but must be positive.
|
40 |
-
"""
|
41 |
-
|
42 |
-
def sample(self, batch_size, device):
|
43 |
-
"""
|
44 |
-
Importance-sample timesteps for a batch.
|
45 |
-
|
46 |
-
:param batch_size: the number of timesteps.
|
47 |
-
:param device: the torch device to save to.
|
48 |
-
:return: a tuple (timesteps, weights):
|
49 |
-
- timesteps: a tensor of timestep indices.
|
50 |
-
- weights: a tensor of weights to scale the resulting losses.
|
51 |
-
"""
|
52 |
-
w = self.weights()
|
53 |
-
p = w / np.sum(w)
|
54 |
-
indices_np = np.random.choice(len(p), size=(batch_size,), p=p)
|
55 |
-
indices = th.from_numpy(indices_np).long().to(device)
|
56 |
-
weights_np = 1 / (len(p) * p[indices_np])
|
57 |
-
weights = th.from_numpy(weights_np).float().to(device)
|
58 |
-
return indices, weights
|
59 |
-
|
60 |
-
|
61 |
-
class UniformSampler(ScheduleSampler):
|
62 |
-
def __init__(self, diffusion):
|
63 |
-
self.diffusion = diffusion
|
64 |
-
self._weights = np.ones([diffusion.num_timesteps])
|
65 |
-
|
66 |
-
def weights(self):
|
67 |
-
return self._weights
|
68 |
-
|
69 |
-
|
70 |
-
class LossAwareSampler(ScheduleSampler):
|
71 |
-
def update_with_local_losses(self, local_ts, local_losses):
|
72 |
-
"""
|
73 |
-
Update the reweighting using losses from a model.
|
74 |
-
|
75 |
-
Call this method from each rank with a batch of timesteps and the
|
76 |
-
corresponding losses for each of those timesteps.
|
77 |
-
This method will perform synchronization to make sure all of the ranks
|
78 |
-
maintain the exact same reweighting.
|
79 |
-
|
80 |
-
:param local_ts: an integer Tensor of timesteps.
|
81 |
-
:param local_losses: a 1D Tensor of losses.
|
82 |
-
"""
|
83 |
-
batch_sizes = [
|
84 |
-
th.tensor([0], dtype=th.int32, device=local_ts.device)
|
85 |
-
for _ in range(dist.get_world_size())
|
86 |
-
]
|
87 |
-
dist.all_gather(
|
88 |
-
batch_sizes,
|
89 |
-
th.tensor([len(local_ts)], dtype=th.int32, device=local_ts.device),
|
90 |
-
)
|
91 |
-
|
92 |
-
# Pad all_gather batches to be the maximum batch size.
|
93 |
-
batch_sizes = [x.item() for x in batch_sizes]
|
94 |
-
max_bs = max(batch_sizes)
|
95 |
-
|
96 |
-
timestep_batches = [th.zeros(max_bs).to(local_ts) for bs in batch_sizes]
|
97 |
-
loss_batches = [th.zeros(max_bs).to(local_losses) for bs in batch_sizes]
|
98 |
-
dist.all_gather(timestep_batches, local_ts)
|
99 |
-
dist.all_gather(loss_batches, local_losses)
|
100 |
-
timesteps = [
|
101 |
-
x.item() for y, bs in zip(timestep_batches, batch_sizes) for x in y[:bs]
|
102 |
-
]
|
103 |
-
losses = [x.item() for y, bs in zip(loss_batches, batch_sizes) for x in y[:bs]]
|
104 |
-
self.update_with_all_losses(timesteps, losses)
|
105 |
-
|
106 |
-
@abstractmethod
|
107 |
-
def update_with_all_losses(self, ts, losses):
|
108 |
-
"""
|
109 |
-
Update the reweighting using losses from a model.
|
110 |
-
|
111 |
-
Sub-classes should override this method to update the reweighting
|
112 |
-
using losses from the model.
|
113 |
-
|
114 |
-
This method directly updates the reweighting without synchronizing
|
115 |
-
between workers. It is called by update_with_local_losses from all
|
116 |
-
ranks with identical arguments. Thus, it should have deterministic
|
117 |
-
behavior to maintain state across workers.
|
118 |
-
|
119 |
-
:param ts: a list of int timesteps.
|
120 |
-
:param losses: a list of float losses, one per timestep.
|
121 |
-
"""
|
122 |
-
|
123 |
-
|
124 |
-
class LossSecondMomentResampler(LossAwareSampler):
|
125 |
-
def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001):
|
126 |
-
self.diffusion = diffusion
|
127 |
-
self.history_per_term = history_per_term
|
128 |
-
self.uniform_prob = uniform_prob
|
129 |
-
self._loss_history = np.zeros(
|
130 |
-
[diffusion.num_timesteps, history_per_term], dtype=np.float64
|
131 |
-
)
|
132 |
-
self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int)
|
133 |
-
|
134 |
-
def weights(self):
|
135 |
-
if not self._warmed_up():
|
136 |
-
return np.ones([self.diffusion.num_timesteps], dtype=np.float64)
|
137 |
-
weights = np.sqrt(np.mean(self._loss_history ** 2, axis=-1))
|
138 |
-
weights /= np.sum(weights)
|
139 |
-
weights *= 1 - self.uniform_prob
|
140 |
-
weights += self.uniform_prob / len(weights)
|
141 |
-
return weights
|
142 |
-
|
143 |
-
def update_with_all_losses(self, ts, losses):
|
144 |
-
for t, loss in zip(ts, losses):
|
145 |
-
if self._loss_counts[t] == self.history_per_term:
|
146 |
-
# Shift out the oldest loss term.
|
147 |
-
self._loss_history[t, :-1] = self._loss_history[t, 1:]
|
148 |
-
self._loss_history[t, -1] = loss
|
149 |
-
else:
|
150 |
-
self._loss_history[t, self._loss_counts[t]] = loss
|
151 |
-
self._loss_counts[t] += 1
|
152 |
-
|
153 |
-
def _warmed_up(self):
|
154 |
-
return (self._loss_counts == self.history_per_term).all()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/bricks/depthwise_separable_conv_module.py
DELETED
@@ -1,96 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
import torch.nn as nn
|
3 |
-
|
4 |
-
from .conv_module import ConvModule
|
5 |
-
|
6 |
-
|
7 |
-
class DepthwiseSeparableConvModule(nn.Module):
|
8 |
-
"""Depthwise separable convolution module.
|
9 |
-
|
10 |
-
See https://arxiv.org/pdf/1704.04861.pdf for details.
|
11 |
-
|
12 |
-
This module can replace a ConvModule with the conv block replaced by two
|
13 |
-
conv block: depthwise conv block and pointwise conv block. The depthwise
|
14 |
-
conv block contains depthwise-conv/norm/activation layers. The pointwise
|
15 |
-
conv block contains pointwise-conv/norm/activation layers. It should be
|
16 |
-
noted that there will be norm/activation layer in the depthwise conv block
|
17 |
-
if `norm_cfg` and `act_cfg` are specified.
|
18 |
-
|
19 |
-
Args:
|
20 |
-
in_channels (int): Number of channels in the input feature map.
|
21 |
-
Same as that in ``nn._ConvNd``.
|
22 |
-
out_channels (int): Number of channels produced by the convolution.
|
23 |
-
Same as that in ``nn._ConvNd``.
|
24 |
-
kernel_size (int | tuple[int]): Size of the convolving kernel.
|
25 |
-
Same as that in ``nn._ConvNd``.
|
26 |
-
stride (int | tuple[int]): Stride of the convolution.
|
27 |
-
Same as that in ``nn._ConvNd``. Default: 1.
|
28 |
-
padding (int | tuple[int]): Zero-padding added to both sides of
|
29 |
-
the input. Same as that in ``nn._ConvNd``. Default: 0.
|
30 |
-
dilation (int | tuple[int]): Spacing between kernel elements.
|
31 |
-
Same as that in ``nn._ConvNd``. Default: 1.
|
32 |
-
norm_cfg (dict): Default norm config for both depthwise ConvModule and
|
33 |
-
pointwise ConvModule. Default: None.
|
34 |
-
act_cfg (dict): Default activation config for both depthwise ConvModule
|
35 |
-
and pointwise ConvModule. Default: dict(type='ReLU').
|
36 |
-
dw_norm_cfg (dict): Norm config of depthwise ConvModule. If it is
|
37 |
-
'default', it will be the same as `norm_cfg`. Default: 'default'.
|
38 |
-
dw_act_cfg (dict): Activation config of depthwise ConvModule. If it is
|
39 |
-
'default', it will be the same as `act_cfg`. Default: 'default'.
|
40 |
-
pw_norm_cfg (dict): Norm config of pointwise ConvModule. If it is
|
41 |
-
'default', it will be the same as `norm_cfg`. Default: 'default'.
|
42 |
-
pw_act_cfg (dict): Activation config of pointwise ConvModule. If it is
|
43 |
-
'default', it will be the same as `act_cfg`. Default: 'default'.
|
44 |
-
kwargs (optional): Other shared arguments for depthwise and pointwise
|
45 |
-
ConvModule. See ConvModule for ref.
|
46 |
-
"""
|
47 |
-
|
48 |
-
def __init__(self,
|
49 |
-
in_channels,
|
50 |
-
out_channels,
|
51 |
-
kernel_size,
|
52 |
-
stride=1,
|
53 |
-
padding=0,
|
54 |
-
dilation=1,
|
55 |
-
norm_cfg=None,
|
56 |
-
act_cfg=dict(type='ReLU'),
|
57 |
-
dw_norm_cfg='default',
|
58 |
-
dw_act_cfg='default',
|
59 |
-
pw_norm_cfg='default',
|
60 |
-
pw_act_cfg='default',
|
61 |
-
**kwargs):
|
62 |
-
super(DepthwiseSeparableConvModule, self).__init__()
|
63 |
-
assert 'groups' not in kwargs, 'groups should not be specified'
|
64 |
-
|
65 |
-
# if norm/activation config of depthwise/pointwise ConvModule is not
|
66 |
-
# specified, use default config.
|
67 |
-
dw_norm_cfg = dw_norm_cfg if dw_norm_cfg != 'default' else norm_cfg
|
68 |
-
dw_act_cfg = dw_act_cfg if dw_act_cfg != 'default' else act_cfg
|
69 |
-
pw_norm_cfg = pw_norm_cfg if pw_norm_cfg != 'default' else norm_cfg
|
70 |
-
pw_act_cfg = pw_act_cfg if pw_act_cfg != 'default' else act_cfg
|
71 |
-
|
72 |
-
# depthwise convolution
|
73 |
-
self.depthwise_conv = ConvModule(
|
74 |
-
in_channels,
|
75 |
-
in_channels,
|
76 |
-
kernel_size,
|
77 |
-
stride=stride,
|
78 |
-
padding=padding,
|
79 |
-
dilation=dilation,
|
80 |
-
groups=in_channels,
|
81 |
-
norm_cfg=dw_norm_cfg,
|
82 |
-
act_cfg=dw_act_cfg,
|
83 |
-
**kwargs)
|
84 |
-
|
85 |
-
self.pointwise_conv = ConvModule(
|
86 |
-
in_channels,
|
87 |
-
out_channels,
|
88 |
-
1,
|
89 |
-
norm_cfg=pw_norm_cfg,
|
90 |
-
act_cfg=pw_act_cfg,
|
91 |
-
**kwargs)
|
92 |
-
|
93 |
-
def forward(self, x):
|
94 |
-
x = self.depthwise_conv(x)
|
95 |
-
x = self.pointwise_conv(x)
|
96 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Antonpy/stable-diffusion-license/index.html
DELETED
The diff for this file is too large to render.
See raw diff
|
|
spaces/ArtGAN/Diffusion-API/diffusion_webui/diffusion_models/__init__.py
DELETED
File without changes
|
spaces/Artrajz/vits-simple-api/static/js/bootstrap.bundle.min.js
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
/*!
|
2 |
-
* Bootstrap v4.6.2 (https://getbootstrap.com/)
|
3 |
-
* Copyright 2011-2022 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors)
|
4 |
-
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)
|
5 |
-
*/
|
6 |
-
!function(t,e){"object"==typeof exports&&"undefined"!=typeof module?e(exports,require("jquery")):"function"==typeof define&&define.amd?define(["exports","jquery"],e):e((t="undefined"!=typeof globalThis?globalThis:t||self).bootstrap={},t.jQuery)}(this,(function(t,e){"use strict";function n(t){return t&&"object"==typeof t&&"default"in t?t:{default:t}}var i=n(e);function o(t,e){for(var n=0;n<e.length;n++){var i=e[n];i.enumerable=i.enumerable||!1,i.configurable=!0,"value"in i&&(i.writable=!0),Object.defineProperty(t,i.key,i)}}function r(t,e,n){return e&&o(t.prototype,e),n&&o(t,n),Object.defineProperty(t,"prototype",{writable:!1}),t}function a(){return a=Object.assign?Object.assign.bind():function(t){for(var e=1;e<arguments.length;e++){var n=arguments[e];for(var i in n)Object.prototype.hasOwnProperty.call(n,i)&&(t[i]=n[i])}return t},a.apply(this,arguments)}function s(t,e){return s=Object.setPrototypeOf?Object.setPrototypeOf.bind():function(t,e){return t.__proto__=e,t},s(t,e)}var l="transitionend";var u={TRANSITION_END:"bsTransitionEnd",getUID:function(t){do{t+=~~(1e6*Math.random())}while(document.getElementById(t));return t},getSelectorFromElement:function(t){var e=t.getAttribute("data-target");if(!e||"#"===e){var n=t.getAttribute("href");e=n&&"#"!==n?n.trim():""}try{return document.querySelector(e)?e:null}catch(t){return null}},getTransitionDurationFromElement:function(t){if(!t)return 0;var e=i.default(t).css("transition-duration"),n=i.default(t).css("transition-delay"),o=parseFloat(e),r=parseFloat(n);return o||r?(e=e.split(",")[0],n=n.split(",")[0],1e3*(parseFloat(e)+parseFloat(n))):0},reflow:function(t){return t.offsetHeight},triggerTransitionEnd:function(t){i.default(t).trigger(l)},supportsTransitionEnd:function(){return Boolean(l)},isElement:function(t){return(t[0]||t).nodeType},typeCheckConfig:function(t,e,n){for(var i in n)if(Object.prototype.hasOwnProperty.call(n,i)){var o=n[i],r=e[i],a=r&&u.isElement(r)?"element":null===(s=r)||"undefined"==typeof s?""+s:{}.toString.call(s).match(/\s([a-z]+)/i)[1].toLowerCase();if(!new RegExp(o).test(a))throw new Error(t.toUpperCase()+': Option "'+i+'" provided type "'+a+'" but expected type "'+o+'".')}var s},findShadowRoot:function(t){if(!document.documentElement.attachShadow)return null;if("function"==typeof t.getRootNode){var e=t.getRootNode();return e instanceof ShadowRoot?e:null}return t instanceof ShadowRoot?t:t.parentNode?u.findShadowRoot(t.parentNode):null},jQueryDetection:function(){if("undefined"==typeof i.default)throw new TypeError("Bootstrap's JavaScript requires jQuery. jQuery must be included before Bootstrap's JavaScript.");var t=i.default.fn.jquery.split(" ")[0].split(".");if(t[0]<2&&t[1]<9||1===t[0]&&9===t[1]&&t[2]<1||t[0]>=4)throw new Error("Bootstrap's JavaScript requires at least jQuery v1.9.1 but less than v4.0.0")}};u.jQueryDetection(),i.default.fn.emulateTransitionEnd=function(t){var e=this,n=!1;return i.default(this).one(u.TRANSITION_END,(function(){n=!0})),setTimeout((function(){n||u.triggerTransitionEnd(e)}),t),this},i.default.event.special[u.TRANSITION_END]={bindType:l,delegateType:l,handle:function(t){if(i.default(t.target).is(this))return t.handleObj.handler.apply(this,arguments)}};var f="bs.alert",d=i.default.fn.alert,c=function(){function t(t){this._element=t}var e=t.prototype;return e.close=function(t){var e=this._element;t&&(e=this._getRootElement(t)),this._triggerCloseEvent(e).isDefaultPrevented()||this._removeElement(e)},e.dispose=function(){i.default.removeData(this._element,f),this._element=null},e._getRootElement=function(t){var e=u.getSelectorFromElement(t),n=!1;return e&&(n=document.querySelector(e)),n||(n=i.default(t).closest(".alert")[0]),n},e._triggerCloseEvent=function(t){var e=i.default.Event("close.bs.alert");return i.default(t).trigger(e),e},e._removeElement=function(t){var e=this;if(i.default(t).removeClass("show"),i.default(t).hasClass("fade")){var n=u.getTransitionDurationFromElement(t);i.default(t).one(u.TRANSITION_END,(function(n){return e._destroyElement(t,n)})).emulateTransitionEnd(n)}else this._destroyElement(t)},e._destroyElement=function(t){i.default(t).detach().trigger("closed.bs.alert").remove()},t._jQueryInterface=function(e){return this.each((function(){var n=i.default(this),o=n.data(f);o||(o=new t(this),n.data(f,o)),"close"===e&&o[e](this)}))},t._handleDismiss=function(t){return function(e){e&&e.preventDefault(),t.close(this)}},r(t,null,[{key:"VERSION",get:function(){return"4.6.2"}}]),t}();i.default(document).on("click.bs.alert.data-api",'[data-dismiss="alert"]',c._handleDismiss(new c)),i.default.fn.alert=c._jQueryInterface,i.default.fn.alert.Constructor=c,i.default.fn.alert.noConflict=function(){return i.default.fn.alert=d,c._jQueryInterface};var h="bs.button",p=i.default.fn.button,m="active",g='[data-toggle^="button"]',_='input:not([type="hidden"])',v=".btn",b=function(){function t(t){this._element=t,this.shouldAvoidTriggerChange=!1}var e=t.prototype;return e.toggle=function(){var t=!0,e=!0,n=i.default(this._element).closest('[data-toggle="buttons"]')[0];if(n){var o=this._element.querySelector(_);if(o){if("radio"===o.type)if(o.checked&&this._element.classList.contains(m))t=!1;else{var r=n.querySelector(".active");r&&i.default(r).removeClass(m)}t&&("checkbox"!==o.type&&"radio"!==o.type||(o.checked=!this._element.classList.contains(m)),this.shouldAvoidTriggerChange||i.default(o).trigger("change")),o.focus(),e=!1}}this._element.hasAttribute("disabled")||this._element.classList.contains("disabled")||(e&&this._element.setAttribute("aria-pressed",!this._element.classList.contains(m)),t&&i.default(this._element).toggleClass(m))},e.dispose=function(){i.default.removeData(this._element,h),this._element=null},t._jQueryInterface=function(e,n){return this.each((function(){var o=i.default(this),r=o.data(h);r||(r=new t(this),o.data(h,r)),r.shouldAvoidTriggerChange=n,"toggle"===e&&r[e]()}))},r(t,null,[{key:"VERSION",get:function(){return"4.6.2"}}]),t}();i.default(document).on("click.bs.button.data-api",g,(function(t){var e=t.target,n=e;if(i.default(e).hasClass("btn")||(e=i.default(e).closest(v)[0]),!e||e.hasAttribute("disabled")||e.classList.contains("disabled"))t.preventDefault();else{var o=e.querySelector(_);if(o&&(o.hasAttribute("disabled")||o.classList.contains("disabled")))return void t.preventDefault();"INPUT"!==n.tagName&&"LABEL"===e.tagName||b._jQueryInterface.call(i.default(e),"toggle","INPUT"===n.tagName)}})).on("focus.bs.button.data-api blur.bs.button.data-api",g,(function(t){var e=i.default(t.target).closest(v)[0];i.default(e).toggleClass("focus",/^focus(in)?$/.test(t.type))})),i.default(window).on("load.bs.button.data-api",(function(){for(var t=[].slice.call(document.querySelectorAll('[data-toggle="buttons"] .btn')),e=0,n=t.length;e<n;e++){var i=t[e],o=i.querySelector(_);o.checked||o.hasAttribute("checked")?i.classList.add(m):i.classList.remove(m)}for(var r=0,a=(t=[].slice.call(document.querySelectorAll('[data-toggle="button"]'))).length;r<a;r++){var s=t[r];"true"===s.getAttribute("aria-pressed")?s.classList.add(m):s.classList.remove(m)}})),i.default.fn.button=b._jQueryInterface,i.default.fn.button.Constructor=b,i.default.fn.button.noConflict=function(){return i.default.fn.button=p,b._jQueryInterface};var y="carousel",E="bs.carousel",w=i.default.fn[y],T="active",C="next",S="prev",N="slid.bs.carousel",D=".active.carousel-item",A={interval:5e3,keyboard:!0,slide:!1,pause:"hover",wrap:!0,touch:!0},k={interval:"(number|boolean)",keyboard:"boolean",slide:"(boolean|string)",pause:"(string|boolean)",wrap:"boolean",touch:"boolean"},I={TOUCH:"touch",PEN:"pen"},O=function(){function t(t,e){this._items=null,this._interval=null,this._activeElement=null,this._isPaused=!1,this._isSliding=!1,this.touchTimeout=null,this.touchStartX=0,this.touchDeltaX=0,this._config=this._getConfig(e),this._element=t,this._indicatorsElement=this._element.querySelector(".carousel-indicators"),this._touchSupported="ontouchstart"in document.documentElement||navigator.maxTouchPoints>0,this._pointerEvent=Boolean(window.PointerEvent||window.MSPointerEvent),this._addEventListeners()}var e=t.prototype;return e.next=function(){this._isSliding||this._slide(C)},e.nextWhenVisible=function(){var t=i.default(this._element);!document.hidden&&t.is(":visible")&&"hidden"!==t.css("visibility")&&this.next()},e.prev=function(){this._isSliding||this._slide(S)},e.pause=function(t){t||(this._isPaused=!0),this._element.querySelector(".carousel-item-next, .carousel-item-prev")&&(u.triggerTransitionEnd(this._element),this.cycle(!0)),clearInterval(this._interval),this._interval=null},e.cycle=function(t){t||(this._isPaused=!1),this._interval&&(clearInterval(this._interval),this._interval=null),this._config.interval&&!this._isPaused&&(this._updateInterval(),this._interval=setInterval((document.visibilityState?this.nextWhenVisible:this.next).bind(this),this._config.interval))},e.to=function(t){var e=this;this._activeElement=this._element.querySelector(D);var n=this._getItemIndex(this._activeElement);if(!(t>this._items.length-1||t<0))if(this._isSliding)i.default(this._element).one(N,(function(){return e.to(t)}));else{if(n===t)return this.pause(),void this.cycle();var o=t>n?C:S;this._slide(o,this._items[t])}},e.dispose=function(){i.default(this._element).off(".bs.carousel"),i.default.removeData(this._element,E),this._items=null,this._config=null,this._element=null,this._interval=null,this._isPaused=null,this._isSliding=null,this._activeElement=null,this._indicatorsElement=null},e._getConfig=function(t){return t=a({},A,t),u.typeCheckConfig(y,t,k),t},e._handleSwipe=function(){var t=Math.abs(this.touchDeltaX);if(!(t<=40)){var e=t/this.touchDeltaX;this.touchDeltaX=0,e>0&&this.prev(),e<0&&this.next()}},e._addEventListeners=function(){var t=this;this._config.keyboard&&i.default(this._element).on("keydown.bs.carousel",(function(e){return t._keydown(e)})),"hover"===this._config.pause&&i.default(this._element).on("mouseenter.bs.carousel",(function(e){return t.pause(e)})).on("mouseleave.bs.carousel",(function(e){return t.cycle(e)})),this._config.touch&&this._addTouchEventListeners()},e._addTouchEventListeners=function(){var t=this;if(this._touchSupported){var e=function(e){t._pointerEvent&&I[e.originalEvent.pointerType.toUpperCase()]?t.touchStartX=e.originalEvent.clientX:t._pointerEvent||(t.touchStartX=e.originalEvent.touches[0].clientX)},n=function(e){t._pointerEvent&&I[e.originalEvent.pointerType.toUpperCase()]&&(t.touchDeltaX=e.originalEvent.clientX-t.touchStartX),t._handleSwipe(),"hover"===t._config.pause&&(t.pause(),t.touchTimeout&&clearTimeout(t.touchTimeout),t.touchTimeout=setTimeout((function(e){return t.cycle(e)}),500+t._config.interval))};i.default(this._element.querySelectorAll(".carousel-item img")).on("dragstart.bs.carousel",(function(t){return t.preventDefault()})),this._pointerEvent?(i.default(this._element).on("pointerdown.bs.carousel",(function(t){return e(t)})),i.default(this._element).on("pointerup.bs.carousel",(function(t){return n(t)})),this._element.classList.add("pointer-event")):(i.default(this._element).on("touchstart.bs.carousel",(function(t){return e(t)})),i.default(this._element).on("touchmove.bs.carousel",(function(e){return function(e){t.touchDeltaX=e.originalEvent.touches&&e.originalEvent.touches.length>1?0:e.originalEvent.touches[0].clientX-t.touchStartX}(e)})),i.default(this._element).on("touchend.bs.carousel",(function(t){return n(t)})))}},e._keydown=function(t){if(!/input|textarea/i.test(t.target.tagName))switch(t.which){case 37:t.preventDefault(),this.prev();break;case 39:t.preventDefault(),this.next()}},e._getItemIndex=function(t){return this._items=t&&t.parentNode?[].slice.call(t.parentNode.querySelectorAll(".carousel-item")):[],this._items.indexOf(t)},e._getItemByDirection=function(t,e){var n=t===C,i=t===S,o=this._getItemIndex(e),r=this._items.length-1;if((i&&0===o||n&&o===r)&&!this._config.wrap)return e;var a=(o+(t===S?-1:1))%this._items.length;return-1===a?this._items[this._items.length-1]:this._items[a]},e._triggerSlideEvent=function(t,e){var n=this._getItemIndex(t),o=this._getItemIndex(this._element.querySelector(D)),r=i.default.Event("slide.bs.carousel",{relatedTarget:t,direction:e,from:o,to:n});return i.default(this._element).trigger(r),r},e._setActiveIndicatorElement=function(t){if(this._indicatorsElement){var e=[].slice.call(this._indicatorsElement.querySelectorAll(".active"));i.default(e).removeClass(T);var n=this._indicatorsElement.children[this._getItemIndex(t)];n&&i.default(n).addClass(T)}},e._updateInterval=function(){var t=this._activeElement||this._element.querySelector(D);if(t){var e=parseInt(t.getAttribute("data-interval"),10);e?(this._config.defaultInterval=this._config.defaultInterval||this._config.interval,this._config.interval=e):this._config.interval=this._config.defaultInterval||this._config.interval}},e._slide=function(t,e){var n,o,r,a=this,s=this._element.querySelector(D),l=this._getItemIndex(s),f=e||s&&this._getItemByDirection(t,s),d=this._getItemIndex(f),c=Boolean(this._interval);if(t===C?(n="carousel-item-left",o="carousel-item-next",r="left"):(n="carousel-item-right",o="carousel-item-prev",r="right"),f&&i.default(f).hasClass(T))this._isSliding=!1;else if(!this._triggerSlideEvent(f,r).isDefaultPrevented()&&s&&f){this._isSliding=!0,c&&this.pause(),this._setActiveIndicatorElement(f),this._activeElement=f;var h=i.default.Event(N,{relatedTarget:f,direction:r,from:l,to:d});if(i.default(this._element).hasClass("slide")){i.default(f).addClass(o),u.reflow(f),i.default(s).addClass(n),i.default(f).addClass(n);var p=u.getTransitionDurationFromElement(s);i.default(s).one(u.TRANSITION_END,(function(){i.default(f).removeClass(n+" "+o).addClass(T),i.default(s).removeClass("active "+o+" "+n),a._isSliding=!1,setTimeout((function(){return i.default(a._element).trigger(h)}),0)})).emulateTransitionEnd(p)}else i.default(s).removeClass(T),i.default(f).addClass(T),this._isSliding=!1,i.default(this._element).trigger(h);c&&this.cycle()}},t._jQueryInterface=function(e){return this.each((function(){var n=i.default(this).data(E),o=a({},A,i.default(this).data());"object"==typeof e&&(o=a({},o,e));var r="string"==typeof e?e:o.slide;if(n||(n=new t(this,o),i.default(this).data(E,n)),"number"==typeof e)n.to(e);else if("string"==typeof r){if("undefined"==typeof n[r])throw new TypeError('No method named "'+r+'"');n[r]()}else o.interval&&o.ride&&(n.pause(),n.cycle())}))},t._dataApiClickHandler=function(e){var n=u.getSelectorFromElement(this);if(n){var o=i.default(n)[0];if(o&&i.default(o).hasClass("carousel")){var r=a({},i.default(o).data(),i.default(this).data()),s=this.getAttribute("data-slide-to");s&&(r.interval=!1),t._jQueryInterface.call(i.default(o),r),s&&i.default(o).data(E).to(s),e.preventDefault()}}},r(t,null,[{key:"VERSION",get:function(){return"4.6.2"}},{key:"Default",get:function(){return A}}]),t}();i.default(document).on("click.bs.carousel.data-api","[data-slide], [data-slide-to]",O._dataApiClickHandler),i.default(window).on("load.bs.carousel.data-api",(function(){for(var t=[].slice.call(document.querySelectorAll('[data-ride="carousel"]')),e=0,n=t.length;e<n;e++){var o=i.default(t[e]);O._jQueryInterface.call(o,o.data())}})),i.default.fn[y]=O._jQueryInterface,i.default.fn[y].Constructor=O,i.default.fn[y].noConflict=function(){return i.default.fn[y]=w,O._jQueryInterface};var x="collapse",j="bs.collapse",L=i.default.fn[x],P="show",F="collapse",R="collapsing",B="collapsed",H="width",M='[data-toggle="collapse"]',q={toggle:!0,parent:""},Q={toggle:"boolean",parent:"(string|element)"},W=function(){function t(t,e){this._isTransitioning=!1,this._element=t,this._config=this._getConfig(e),this._triggerArray=[].slice.call(document.querySelectorAll('[data-toggle="collapse"][href="#'+t.id+'"],[data-toggle="collapse"][data-target="#'+t.id+'"]'));for(var n=[].slice.call(document.querySelectorAll(M)),i=0,o=n.length;i<o;i++){var r=n[i],a=u.getSelectorFromElement(r),s=[].slice.call(document.querySelectorAll(a)).filter((function(e){return e===t}));null!==a&&s.length>0&&(this._selector=a,this._triggerArray.push(r))}this._parent=this._config.parent?this._getParent():null,this._config.parent||this._addAriaAndCollapsedClass(this._element,this._triggerArray),this._config.toggle&&this.toggle()}var e=t.prototype;return e.toggle=function(){i.default(this._element).hasClass(P)?this.hide():this.show()},e.show=function(){var e,n,o=this;if(!(this._isTransitioning||i.default(this._element).hasClass(P)||(this._parent&&0===(e=[].slice.call(this._parent.querySelectorAll(".show, .collapsing")).filter((function(t){return"string"==typeof o._config.parent?t.getAttribute("data-parent")===o._config.parent:t.classList.contains(F)}))).length&&(e=null),e&&(n=i.default(e).not(this._selector).data(j))&&n._isTransitioning))){var r=i.default.Event("show.bs.collapse");if(i.default(this._element).trigger(r),!r.isDefaultPrevented()){e&&(t._jQueryInterface.call(i.default(e).not(this._selector),"hide"),n||i.default(e).data(j,null));var a=this._getDimension();i.default(this._element).removeClass(F).addClass(R),this._element.style[a]=0,this._triggerArray.length&&i.default(this._triggerArray).removeClass(B).attr("aria-expanded",!0),this.setTransitioning(!0);var s="scroll"+(a[0].toUpperCase()+a.slice(1)),l=u.getTransitionDurationFromElement(this._element);i.default(this._element).one(u.TRANSITION_END,(function(){i.default(o._element).removeClass(R).addClass("collapse show"),o._element.style[a]="",o.setTransitioning(!1),i.default(o._element).trigger("shown.bs.collapse")})).emulateTransitionEnd(l),this._element.style[a]=this._element[s]+"px"}}},e.hide=function(){var t=this;if(!this._isTransitioning&&i.default(this._element).hasClass(P)){var e=i.default.Event("hide.bs.collapse");if(i.default(this._element).trigger(e),!e.isDefaultPrevented()){var n=this._getDimension();this._element.style[n]=this._element.getBoundingClientRect()[n]+"px",u.reflow(this._element),i.default(this._element).addClass(R).removeClass("collapse show");var o=this._triggerArray.length;if(o>0)for(var r=0;r<o;r++){var a=this._triggerArray[r],s=u.getSelectorFromElement(a);null!==s&&(i.default([].slice.call(document.querySelectorAll(s))).hasClass(P)||i.default(a).addClass(B).attr("aria-expanded",!1))}this.setTransitioning(!0),this._element.style[n]="";var l=u.getTransitionDurationFromElement(this._element);i.default(this._element).one(u.TRANSITION_END,(function(){t.setTransitioning(!1),i.default(t._element).removeClass(R).addClass(F).trigger("hidden.bs.collapse")})).emulateTransitionEnd(l)}}},e.setTransitioning=function(t){this._isTransitioning=t},e.dispose=function(){i.default.removeData(this._element,j),this._config=null,this._parent=null,this._element=null,this._triggerArray=null,this._isTransitioning=null},e._getConfig=function(t){return(t=a({},q,t)).toggle=Boolean(t.toggle),u.typeCheckConfig(x,t,Q),t},e._getDimension=function(){return i.default(this._element).hasClass(H)?H:"height"},e._getParent=function(){var e,n=this;u.isElement(this._config.parent)?(e=this._config.parent,"undefined"!=typeof this._config.parent.jquery&&(e=this._config.parent[0])):e=document.querySelector(this._config.parent);var o='[data-toggle="collapse"][data-parent="'+this._config.parent+'"]',r=[].slice.call(e.querySelectorAll(o));return i.default(r).each((function(e,i){n._addAriaAndCollapsedClass(t._getTargetFromElement(i),[i])})),e},e._addAriaAndCollapsedClass=function(t,e){var n=i.default(t).hasClass(P);e.length&&i.default(e).toggleClass(B,!n).attr("aria-expanded",n)},t._getTargetFromElement=function(t){var e=u.getSelectorFromElement(t);return e?document.querySelector(e):null},t._jQueryInterface=function(e){return this.each((function(){var n=i.default(this),o=n.data(j),r=a({},q,n.data(),"object"==typeof e&&e?e:{});if(!o&&r.toggle&&"string"==typeof e&&/show|hide/.test(e)&&(r.toggle=!1),o||(o=new t(this,r),n.data(j,o)),"string"==typeof e){if("undefined"==typeof o[e])throw new TypeError('No method named "'+e+'"');o[e]()}}))},r(t,null,[{key:"VERSION",get:function(){return"4.6.2"}},{key:"Default",get:function(){return q}}]),t}();i.default(document).on("click.bs.collapse.data-api",M,(function(t){"A"===t.currentTarget.tagName&&t.preventDefault();var e=i.default(this),n=u.getSelectorFromElement(this),o=[].slice.call(document.querySelectorAll(n));i.default(o).each((function(){var t=i.default(this),n=t.data(j)?"toggle":e.data();W._jQueryInterface.call(t,n)}))})),i.default.fn[x]=W._jQueryInterface,i.default.fn[x].Constructor=W,i.default.fn[x].noConflict=function(){return i.default.fn[x]=L,W._jQueryInterface};var U="undefined"!=typeof window&&"undefined"!=typeof document&&"undefined"!=typeof navigator,V=function(){for(var t=["Edge","Trident","Firefox"],e=0;e<t.length;e+=1)if(U&&navigator.userAgent.indexOf(t[e])>=0)return 1;return 0}(),Y=U&&window.Promise?function(t){var e=!1;return function(){e||(e=!0,window.Promise.resolve().then((function(){e=!1,t()})))}}:function(t){var e=!1;return function(){e||(e=!0,setTimeout((function(){e=!1,t()}),V))}};function z(t){return t&&"[object Function]"==={}.toString.call(t)}function K(t,e){if(1!==t.nodeType)return[];var n=t.ownerDocument.defaultView.getComputedStyle(t,null);return e?n[e]:n}function X(t){return"HTML"===t.nodeName?t:t.parentNode||t.host}function G(t){if(!t)return document.body;switch(t.nodeName){case"HTML":case"BODY":return t.ownerDocument.body;case"#document":return t.body}var e=K(t),n=e.overflow,i=e.overflowX,o=e.overflowY;return/(auto|scroll|overlay)/.test(n+o+i)?t:G(X(t))}function $(t){return t&&t.referenceNode?t.referenceNode:t}var J=U&&!(!window.MSInputMethodContext||!document.documentMode),Z=U&&/MSIE 10/.test(navigator.userAgent);function tt(t){return 11===t?J:10===t?Z:J||Z}function et(t){if(!t)return document.documentElement;for(var e=tt(10)?document.body:null,n=t.offsetParent||null;n===e&&t.nextElementSibling;)n=(t=t.nextElementSibling).offsetParent;var i=n&&n.nodeName;return i&&"BODY"!==i&&"HTML"!==i?-1!==["TH","TD","TABLE"].indexOf(n.nodeName)&&"static"===K(n,"position")?et(n):n:t?t.ownerDocument.documentElement:document.documentElement}function nt(t){return null!==t.parentNode?nt(t.parentNode):t}function it(t,e){if(!(t&&t.nodeType&&e&&e.nodeType))return document.documentElement;var n=t.compareDocumentPosition(e)&Node.DOCUMENT_POSITION_FOLLOWING,i=n?t:e,o=n?e:t,r=document.createRange();r.setStart(i,0),r.setEnd(o,0);var a,s,l=r.commonAncestorContainer;if(t!==l&&e!==l||i.contains(o))return"BODY"===(s=(a=l).nodeName)||"HTML"!==s&&et(a.firstElementChild)!==a?et(l):l;var u=nt(t);return u.host?it(u.host,e):it(t,nt(e).host)}function ot(t){var e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"top",n="top"===e?"scrollTop":"scrollLeft",i=t.nodeName;if("BODY"===i||"HTML"===i){var o=t.ownerDocument.documentElement,r=t.ownerDocument.scrollingElement||o;return r[n]}return t[n]}function rt(t,e){var n=arguments.length>2&&void 0!==arguments[2]&&arguments[2],i=ot(e,"top"),o=ot(e,"left"),r=n?-1:1;return t.top+=i*r,t.bottom+=i*r,t.left+=o*r,t.right+=o*r,t}function at(t,e){var n="x"===e?"Left":"Top",i="Left"===n?"Right":"Bottom";return parseFloat(t["border"+n+"Width"])+parseFloat(t["border"+i+"Width"])}function st(t,e,n,i){return Math.max(e["offset"+t],e["scroll"+t],n["client"+t],n["offset"+t],n["scroll"+t],tt(10)?parseInt(n["offset"+t])+parseInt(i["margin"+("Height"===t?"Top":"Left")])+parseInt(i["margin"+("Height"===t?"Bottom":"Right")]):0)}function lt(t){var e=t.body,n=t.documentElement,i=tt(10)&&getComputedStyle(n);return{height:st("Height",e,n,i),width:st("Width",e,n,i)}}var ut=function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")},ft=function(){function t(t,e){for(var n=0;n<e.length;n++){var i=e[n];i.enumerable=i.enumerable||!1,i.configurable=!0,"value"in i&&(i.writable=!0),Object.defineProperty(t,i.key,i)}}return function(e,n,i){return n&&t(e.prototype,n),i&&t(e,i),e}}(),dt=function(t,e,n){return e in t?Object.defineProperty(t,e,{value:n,enumerable:!0,configurable:!0,writable:!0}):t[e]=n,t},ct=Object.assign||function(t){for(var e=1;e<arguments.length;e++){var n=arguments[e];for(var i in n)Object.prototype.hasOwnProperty.call(n,i)&&(t[i]=n[i])}return t};function ht(t){return ct({},t,{right:t.left+t.width,bottom:t.top+t.height})}function pt(t){var e={};try{if(tt(10)){e=t.getBoundingClientRect();var n=ot(t,"top"),i=ot(t,"left");e.top+=n,e.left+=i,e.bottom+=n,e.right+=i}else e=t.getBoundingClientRect()}catch(t){}var o={left:e.left,top:e.top,width:e.right-e.left,height:e.bottom-e.top},r="HTML"===t.nodeName?lt(t.ownerDocument):{},a=r.width||t.clientWidth||o.width,s=r.height||t.clientHeight||o.height,l=t.offsetWidth-a,u=t.offsetHeight-s;if(l||u){var f=K(t);l-=at(f,"x"),u-=at(f,"y"),o.width-=l,o.height-=u}return ht(o)}function mt(t,e){var n=arguments.length>2&&void 0!==arguments[2]&&arguments[2],i=tt(10),o="HTML"===e.nodeName,r=pt(t),a=pt(e),s=G(t),l=K(e),u=parseFloat(l.borderTopWidth),f=parseFloat(l.borderLeftWidth);n&&o&&(a.top=Math.max(a.top,0),a.left=Math.max(a.left,0));var d=ht({top:r.top-a.top-u,left:r.left-a.left-f,width:r.width,height:r.height});if(d.marginTop=0,d.marginLeft=0,!i&&o){var c=parseFloat(l.marginTop),h=parseFloat(l.marginLeft);d.top-=u-c,d.bottom-=u-c,d.left-=f-h,d.right-=f-h,d.marginTop=c,d.marginLeft=h}return(i&&!n?e.contains(s):e===s&&"BODY"!==s.nodeName)&&(d=rt(d,e)),d}function gt(t){var e=arguments.length>1&&void 0!==arguments[1]&&arguments[1],n=t.ownerDocument.documentElement,i=mt(t,n),o=Math.max(n.clientWidth,window.innerWidth||0),r=Math.max(n.clientHeight,window.innerHeight||0),a=e?0:ot(n),s=e?0:ot(n,"left"),l={top:a-i.top+i.marginTop,left:s-i.left+i.marginLeft,width:o,height:r};return ht(l)}function _t(t){var e=t.nodeName;if("BODY"===e||"HTML"===e)return!1;if("fixed"===K(t,"position"))return!0;var n=X(t);return!!n&&_t(n)}function vt(t){if(!t||!t.parentElement||tt())return document.documentElement;for(var e=t.parentElement;e&&"none"===K(e,"transform");)e=e.parentElement;return e||document.documentElement}function bt(t,e,n,i){var o=arguments.length>4&&void 0!==arguments[4]&&arguments[4],r={top:0,left:0},a=o?vt(t):it(t,$(e));if("viewport"===i)r=gt(a,o);else{var s=void 0;"scrollParent"===i?"BODY"===(s=G(X(e))).nodeName&&(s=t.ownerDocument.documentElement):s="window"===i?t.ownerDocument.documentElement:i;var l=mt(s,a,o);if("HTML"!==s.nodeName||_t(a))r=l;else{var u=lt(t.ownerDocument),f=u.height,d=u.width;r.top+=l.top-l.marginTop,r.bottom=f+l.top,r.left+=l.left-l.marginLeft,r.right=d+l.left}}var c="number"==typeof(n=n||0);return r.left+=c?n:n.left||0,r.top+=c?n:n.top||0,r.right-=c?n:n.right||0,r.bottom-=c?n:n.bottom||0,r}function yt(t){return t.width*t.height}function Et(t,e,n,i,o){var r=arguments.length>5&&void 0!==arguments[5]?arguments[5]:0;if(-1===t.indexOf("auto"))return t;var a=bt(n,i,r,o),s={top:{width:a.width,height:e.top-a.top},right:{width:a.right-e.right,height:a.height},bottom:{width:a.width,height:a.bottom-e.bottom},left:{width:e.left-a.left,height:a.height}},l=Object.keys(s).map((function(t){return ct({key:t},s[t],{area:yt(s[t])})})).sort((function(t,e){return e.area-t.area})),u=l.filter((function(t){var e=t.width,i=t.height;return e>=n.clientWidth&&i>=n.clientHeight})),f=u.length>0?u[0].key:l[0].key,d=t.split("-")[1];return f+(d?"-"+d:"")}function wt(t,e,n){var i=arguments.length>3&&void 0!==arguments[3]?arguments[3]:null,o=i?vt(e):it(e,$(n));return mt(n,o,i)}function Tt(t){var e=t.ownerDocument.defaultView.getComputedStyle(t),n=parseFloat(e.marginTop||0)+parseFloat(e.marginBottom||0),i=parseFloat(e.marginLeft||0)+parseFloat(e.marginRight||0);return{width:t.offsetWidth+i,height:t.offsetHeight+n}}function Ct(t){var e={left:"right",right:"left",bottom:"top",top:"bottom"};return t.replace(/left|right|bottom|top/g,(function(t){return e[t]}))}function St(t,e,n){n=n.split("-")[0];var i=Tt(t),o={width:i.width,height:i.height},r=-1!==["right","left"].indexOf(n),a=r?"top":"left",s=r?"left":"top",l=r?"height":"width",u=r?"width":"height";return o[a]=e[a]+e[l]/2-i[l]/2,o[s]=n===s?e[s]-i[u]:e[Ct(s)],o}function Nt(t,e){return Array.prototype.find?t.find(e):t.filter(e)[0]}function Dt(t,e,n){return(void 0===n?t:t.slice(0,function(t,e,n){if(Array.prototype.findIndex)return t.findIndex((function(t){return t.name===n}));var i=Nt(t,(function(t){return t.name===n}));return t.indexOf(i)}(t,0,n))).forEach((function(t){t.function&&console.warn("`modifier.function` is deprecated, use `modifier.fn`!");var n=t.function||t.fn;t.enabled&&z(n)&&(e.offsets.popper=ht(e.offsets.popper),e.offsets.reference=ht(e.offsets.reference),e=n(e,t))})),e}function At(){if(!this.state.isDestroyed){var t={instance:this,styles:{},arrowStyles:{},attributes:{},flipped:!1,offsets:{}};t.offsets.reference=wt(this.state,this.popper,this.reference,this.options.positionFixed),t.placement=Et(this.options.placement,t.offsets.reference,this.popper,this.reference,this.options.modifiers.flip.boundariesElement,this.options.modifiers.flip.padding),t.originalPlacement=t.placement,t.positionFixed=this.options.positionFixed,t.offsets.popper=St(this.popper,t.offsets.reference,t.placement),t.offsets.popper.position=this.options.positionFixed?"fixed":"absolute",t=Dt(this.modifiers,t),this.state.isCreated?this.options.onUpdate(t):(this.state.isCreated=!0,this.options.onCreate(t))}}function kt(t,e){return t.some((function(t){var n=t.name;return t.enabled&&n===e}))}function It(t){for(var e=[!1,"ms","Webkit","Moz","O"],n=t.charAt(0).toUpperCase()+t.slice(1),i=0;i<e.length;i++){var o=e[i],r=o?""+o+n:t;if("undefined"!=typeof document.body.style[r])return r}return null}function Ot(){return this.state.isDestroyed=!0,kt(this.modifiers,"applyStyle")&&(this.popper.removeAttribute("x-placement"),this.popper.style.position="",this.popper.style.top="",this.popper.style.left="",this.popper.style.right="",this.popper.style.bottom="",this.popper.style.willChange="",this.popper.style[It("transform")]=""),this.disableEventListeners(),this.options.removeOnDestroy&&this.popper.parentNode.removeChild(this.popper),this}function xt(t){var e=t.ownerDocument;return e?e.defaultView:window}function jt(t,e,n,i){var o="BODY"===t.nodeName,r=o?t.ownerDocument.defaultView:t;r.addEventListener(e,n,{passive:!0}),o||jt(G(r.parentNode),e,n,i),i.push(r)}function Lt(t,e,n,i){n.updateBound=i,xt(t).addEventListener("resize",n.updateBound,{passive:!0});var o=G(t);return jt(o,"scroll",n.updateBound,n.scrollParents),n.scrollElement=o,n.eventsEnabled=!0,n}function Pt(){this.state.eventsEnabled||(this.state=Lt(this.reference,this.options,this.state,this.scheduleUpdate))}function Ft(){var t,e;this.state.eventsEnabled&&(cancelAnimationFrame(this.scheduleUpdate),this.state=(t=this.reference,e=this.state,xt(t).removeEventListener("resize",e.updateBound),e.scrollParents.forEach((function(t){t.removeEventListener("scroll",e.updateBound)})),e.updateBound=null,e.scrollParents=[],e.scrollElement=null,e.eventsEnabled=!1,e))}function Rt(t){return""!==t&&!isNaN(parseFloat(t))&&isFinite(t)}function Bt(t,e){Object.keys(e).forEach((function(n){var i="";-1!==["width","height","top","right","bottom","left"].indexOf(n)&&Rt(e[n])&&(i="px"),t.style[n]=e[n]+i}))}var Ht=U&&/Firefox/i.test(navigator.userAgent);function Mt(t,e,n){var i=Nt(t,(function(t){return t.name===e})),o=!!i&&t.some((function(t){return t.name===n&&t.enabled&&t.order<i.order}));if(!o){var r="`"+e+"`",a="`"+n+"`";console.warn(a+" modifier is required by "+r+" modifier in order to work, be sure to include it before "+r+"!")}return o}var qt=["auto-start","auto","auto-end","top-start","top","top-end","right-start","right","right-end","bottom-end","bottom","bottom-start","left-end","left","left-start"],Qt=qt.slice(3);function Wt(t){var e=arguments.length>1&&void 0!==arguments[1]&&arguments[1],n=Qt.indexOf(t),i=Qt.slice(n+1).concat(Qt.slice(0,n));return e?i.reverse():i}var Ut={placement:"bottom",positionFixed:!1,eventsEnabled:!0,removeOnDestroy:!1,onCreate:function(){},onUpdate:function(){},modifiers:{shift:{order:100,enabled:!0,fn:function(t){var e=t.placement,n=e.split("-")[0],i=e.split("-")[1];if(i){var o=t.offsets,r=o.reference,a=o.popper,s=-1!==["bottom","top"].indexOf(n),l=s?"left":"top",u=s?"width":"height",f={start:dt({},l,r[l]),end:dt({},l,r[l]+r[u]-a[u])};t.offsets.popper=ct({},a,f[i])}return t}},offset:{order:200,enabled:!0,fn:function(t,e){var n,i=e.offset,o=t.placement,r=t.offsets,a=r.popper,s=r.reference,l=o.split("-")[0];return n=Rt(+i)?[+i,0]:function(t,e,n,i){var o=[0,0],r=-1!==["right","left"].indexOf(i),a=t.split(/(\+|\-)/).map((function(t){return t.trim()})),s=a.indexOf(Nt(a,(function(t){return-1!==t.search(/,|\s/)})));a[s]&&-1===a[s].indexOf(",")&&console.warn("Offsets separated by white space(s) are deprecated, use a comma (,) instead.");var l=/\s*,\s*|\s+/,u=-1!==s?[a.slice(0,s).concat([a[s].split(l)[0]]),[a[s].split(l)[1]].concat(a.slice(s+1))]:[a];return u=u.map((function(t,i){var o=(1===i?!r:r)?"height":"width",a=!1;return t.reduce((function(t,e){return""===t[t.length-1]&&-1!==["+","-"].indexOf(e)?(t[t.length-1]=e,a=!0,t):a?(t[t.length-1]+=e,a=!1,t):t.concat(e)}),[]).map((function(t){return function(t,e,n,i){var o=t.match(/((?:\-|\+)?\d*\.?\d*)(.*)/),r=+o[1],a=o[2];return r?0===a.indexOf("%")?ht("%p"===a?n:i)[e]/100*r:"vh"===a||"vw"===a?("vh"===a?Math.max(document.documentElement.clientHeight,window.innerHeight||0):Math.max(document.documentElement.clientWidth,window.innerWidth||0))/100*r:r:t}(t,o,e,n)}))})),u.forEach((function(t,e){t.forEach((function(n,i){Rt(n)&&(o[e]+=n*("-"===t[i-1]?-1:1))}))})),o}(i,a,s,l),"left"===l?(a.top+=n[0],a.left-=n[1]):"right"===l?(a.top+=n[0],a.left+=n[1]):"top"===l?(a.left+=n[0],a.top-=n[1]):"bottom"===l&&(a.left+=n[0],a.top+=n[1]),t.popper=a,t},offset:0},preventOverflow:{order:300,enabled:!0,fn:function(t,e){var n=e.boundariesElement||et(t.instance.popper);t.instance.reference===n&&(n=et(n));var i=It("transform"),o=t.instance.popper.style,r=o.top,a=o.left,s=o[i];o.top="",o.left="",o[i]="";var l=bt(t.instance.popper,t.instance.reference,e.padding,n,t.positionFixed);o.top=r,o.left=a,o[i]=s,e.boundaries=l;var u=e.priority,f=t.offsets.popper,d={primary:function(t){var n=f[t];return f[t]<l[t]&&!e.escapeWithReference&&(n=Math.max(f[t],l[t])),dt({},t,n)},secondary:function(t){var n="right"===t?"left":"top",i=f[n];return f[t]>l[t]&&!e.escapeWithReference&&(i=Math.min(f[n],l[t]-("right"===t?f.width:f.height))),dt({},n,i)}};return u.forEach((function(t){var e=-1!==["left","top"].indexOf(t)?"primary":"secondary";f=ct({},f,d[e](t))})),t.offsets.popper=f,t},priority:["left","right","top","bottom"],padding:5,boundariesElement:"scrollParent"},keepTogether:{order:400,enabled:!0,fn:function(t){var e=t.offsets,n=e.popper,i=e.reference,o=t.placement.split("-")[0],r=Math.floor,a=-1!==["top","bottom"].indexOf(o),s=a?"right":"bottom",l=a?"left":"top",u=a?"width":"height";return n[s]<r(i[l])&&(t.offsets.popper[l]=r(i[l])-n[u]),n[l]>r(i[s])&&(t.offsets.popper[l]=r(i[s])),t}},arrow:{order:500,enabled:!0,fn:function(t,e){var n;if(!Mt(t.instance.modifiers,"arrow","keepTogether"))return t;var i=e.element;if("string"==typeof i){if(!(i=t.instance.popper.querySelector(i)))return t}else if(!t.instance.popper.contains(i))return console.warn("WARNING: `arrow.element` must be child of its popper element!"),t;var o=t.placement.split("-")[0],r=t.offsets,a=r.popper,s=r.reference,l=-1!==["left","right"].indexOf(o),u=l?"height":"width",f=l?"Top":"Left",d=f.toLowerCase(),c=l?"left":"top",h=l?"bottom":"right",p=Tt(i)[u];s[h]-p<a[d]&&(t.offsets.popper[d]-=a[d]-(s[h]-p)),s[d]+p>a[h]&&(t.offsets.popper[d]+=s[d]+p-a[h]),t.offsets.popper=ht(t.offsets.popper);var m=s[d]+s[u]/2-p/2,g=K(t.instance.popper),_=parseFloat(g["margin"+f]),v=parseFloat(g["border"+f+"Width"]),b=m-t.offsets.popper[d]-_-v;return b=Math.max(Math.min(a[u]-p,b),0),t.arrowElement=i,t.offsets.arrow=(dt(n={},d,Math.round(b)),dt(n,c,""),n),t},element:"[x-arrow]"},flip:{order:600,enabled:!0,fn:function(t,e){if(kt(t.instance.modifiers,"inner"))return t;if(t.flipped&&t.placement===t.originalPlacement)return t;var n=bt(t.instance.popper,t.instance.reference,e.padding,e.boundariesElement,t.positionFixed),i=t.placement.split("-")[0],o=Ct(i),r=t.placement.split("-")[1]||"",a=[];switch(e.behavior){case"flip":a=[i,o];break;case"clockwise":a=Wt(i);break;case"counterclockwise":a=Wt(i,!0);break;default:a=e.behavior}return a.forEach((function(s,l){if(i!==s||a.length===l+1)return t;i=t.placement.split("-")[0],o=Ct(i);var u=t.offsets.popper,f=t.offsets.reference,d=Math.floor,c="left"===i&&d(u.right)>d(f.left)||"right"===i&&d(u.left)<d(f.right)||"top"===i&&d(u.bottom)>d(f.top)||"bottom"===i&&d(u.top)<d(f.bottom),h=d(u.left)<d(n.left),p=d(u.right)>d(n.right),m=d(u.top)<d(n.top),g=d(u.bottom)>d(n.bottom),_="left"===i&&h||"right"===i&&p||"top"===i&&m||"bottom"===i&&g,v=-1!==["top","bottom"].indexOf(i),b=!!e.flipVariations&&(v&&"start"===r&&h||v&&"end"===r&&p||!v&&"start"===r&&m||!v&&"end"===r&&g),y=!!e.flipVariationsByContent&&(v&&"start"===r&&p||v&&"end"===r&&h||!v&&"start"===r&&g||!v&&"end"===r&&m),E=b||y;(c||_||E)&&(t.flipped=!0,(c||_)&&(i=a[l+1]),E&&(r=function(t){return"end"===t?"start":"start"===t?"end":t}(r)),t.placement=i+(r?"-"+r:""),t.offsets.popper=ct({},t.offsets.popper,St(t.instance.popper,t.offsets.reference,t.placement)),t=Dt(t.instance.modifiers,t,"flip"))})),t},behavior:"flip",padding:5,boundariesElement:"viewport",flipVariations:!1,flipVariationsByContent:!1},inner:{order:700,enabled:!1,fn:function(t){var e=t.placement,n=e.split("-")[0],i=t.offsets,o=i.popper,r=i.reference,a=-1!==["left","right"].indexOf(n),s=-1===["top","left"].indexOf(n);return o[a?"left":"top"]=r[n]-(s?o[a?"width":"height"]:0),t.placement=Ct(e),t.offsets.popper=ht(o),t}},hide:{order:800,enabled:!0,fn:function(t){if(!Mt(t.instance.modifiers,"hide","preventOverflow"))return t;var e=t.offsets.reference,n=Nt(t.instance.modifiers,(function(t){return"preventOverflow"===t.name})).boundaries;if(e.bottom<n.top||e.left>n.right||e.top>n.bottom||e.right<n.left){if(!0===t.hide)return t;t.hide=!0,t.attributes["x-out-of-boundaries"]=""}else{if(!1===t.hide)return t;t.hide=!1,t.attributes["x-out-of-boundaries"]=!1}return t}},computeStyle:{order:850,enabled:!0,fn:function(t,e){var n=e.x,i=e.y,o=t.offsets.popper,r=Nt(t.instance.modifiers,(function(t){return"applyStyle"===t.name})).gpuAcceleration;void 0!==r&&console.warn("WARNING: `gpuAcceleration` option moved to `computeStyle` modifier and will not be supported in future versions of Popper.js!");var a,s,l=void 0!==r?r:e.gpuAcceleration,u=et(t.instance.popper),f=pt(u),d={position:o.position},c=function(t,e){var n=t.offsets,i=n.popper,o=n.reference,r=Math.round,a=Math.floor,s=function(t){return t},l=r(o.width),u=r(i.width),f=-1!==["left","right"].indexOf(t.placement),d=-1!==t.placement.indexOf("-"),c=e?f||d||l%2==u%2?r:a:s,h=e?r:s;return{left:c(l%2==1&&u%2==1&&!d&&e?i.left-1:i.left),top:h(i.top),bottom:h(i.bottom),right:c(i.right)}}(t,window.devicePixelRatio<2||!Ht),h="bottom"===n?"top":"bottom",p="right"===i?"left":"right",m=It("transform");if(s="bottom"===h?"HTML"===u.nodeName?-u.clientHeight+c.bottom:-f.height+c.bottom:c.top,a="right"===p?"HTML"===u.nodeName?-u.clientWidth+c.right:-f.width+c.right:c.left,l&&m)d[m]="translate3d("+a+"px, "+s+"px, 0)",d[h]=0,d[p]=0,d.willChange="transform";else{var g="bottom"===h?-1:1,_="right"===p?-1:1;d[h]=s*g,d[p]=a*_,d.willChange=h+", "+p}var v={"x-placement":t.placement};return t.attributes=ct({},v,t.attributes),t.styles=ct({},d,t.styles),t.arrowStyles=ct({},t.offsets.arrow,t.arrowStyles),t},gpuAcceleration:!0,x:"bottom",y:"right"},applyStyle:{order:900,enabled:!0,fn:function(t){var e,n;return Bt(t.instance.popper,t.styles),e=t.instance.popper,n=t.attributes,Object.keys(n).forEach((function(t){!1!==n[t]?e.setAttribute(t,n[t]):e.removeAttribute(t)})),t.arrowElement&&Object.keys(t.arrowStyles).length&&Bt(t.arrowElement,t.arrowStyles),t},onLoad:function(t,e,n,i,o){var r=wt(o,e,t,n.positionFixed),a=Et(n.placement,r,e,t,n.modifiers.flip.boundariesElement,n.modifiers.flip.padding);return e.setAttribute("x-placement",a),Bt(e,{position:n.positionFixed?"fixed":"absolute"}),n},gpuAcceleration:void 0}}},Vt=function(){function t(e,n){var i=this,o=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};ut(this,t),this.scheduleUpdate=function(){return requestAnimationFrame(i.update)},this.update=Y(this.update.bind(this)),this.options=ct({},t.Defaults,o),this.state={isDestroyed:!1,isCreated:!1,scrollParents:[]},this.reference=e&&e.jquery?e[0]:e,this.popper=n&&n.jquery?n[0]:n,this.options.modifiers={},Object.keys(ct({},t.Defaults.modifiers,o.modifiers)).forEach((function(e){i.options.modifiers[e]=ct({},t.Defaults.modifiers[e]||{},o.modifiers?o.modifiers[e]:{})})),this.modifiers=Object.keys(this.options.modifiers).map((function(t){return ct({name:t},i.options.modifiers[t])})).sort((function(t,e){return t.order-e.order})),this.modifiers.forEach((function(t){t.enabled&&z(t.onLoad)&&t.onLoad(i.reference,i.popper,i.options,t,i.state)})),this.update();var r=this.options.eventsEnabled;r&&this.enableEventListeners(),this.state.eventsEnabled=r}return ft(t,[{key:"update",value:function(){return At.call(this)}},{key:"destroy",value:function(){return Ot.call(this)}},{key:"enableEventListeners",value:function(){return Pt.call(this)}},{key:"disableEventListeners",value:function(){return Ft.call(this)}}]),t}();Vt.Utils=("undefined"!=typeof window?window:global).PopperUtils,Vt.placements=qt,Vt.Defaults=Ut;var Yt=Vt,zt="dropdown",Kt="bs.dropdown",Xt=i.default.fn[zt],Gt=new RegExp("38|40|27"),$t="disabled",Jt="show",Zt="dropdown-menu-right",te="hide.bs.dropdown",ee="hidden.bs.dropdown",ne="click.bs.dropdown.data-api",ie="keydown.bs.dropdown.data-api",oe='[data-toggle="dropdown"]',re=".dropdown-menu",ae={offset:0,flip:!0,boundary:"scrollParent",reference:"toggle",display:"dynamic",popperConfig:null},se={offset:"(number|string|function)",flip:"boolean",boundary:"(string|element)",reference:"(string|element)",display:"string",popperConfig:"(null|object)"},le=function(){function t(t,e){this._element=t,this._popper=null,this._config=this._getConfig(e),this._menu=this._getMenuElement(),this._inNavbar=this._detectNavbar(),this._addEventListeners()}var e=t.prototype;return e.toggle=function(){if(!this._element.disabled&&!i.default(this._element).hasClass($t)){var e=i.default(this._menu).hasClass(Jt);t._clearMenus(),e||this.show(!0)}},e.show=function(e){if(void 0===e&&(e=!1),!(this._element.disabled||i.default(this._element).hasClass($t)||i.default(this._menu).hasClass(Jt))){var n={relatedTarget:this._element},o=i.default.Event("show.bs.dropdown",n),r=t._getParentFromElement(this._element);if(i.default(r).trigger(o),!o.isDefaultPrevented()){if(!this._inNavbar&&e){if("undefined"==typeof Yt)throw new TypeError("Bootstrap's dropdowns require Popper (https://popper.js.org)");var a=this._element;"parent"===this._config.reference?a=r:u.isElement(this._config.reference)&&(a=this._config.reference,"undefined"!=typeof this._config.reference.jquery&&(a=this._config.reference[0])),"scrollParent"!==this._config.boundary&&i.default(r).addClass("position-static"),this._popper=new Yt(a,this._menu,this._getPopperConfig())}"ontouchstart"in document.documentElement&&0===i.default(r).closest(".navbar-nav").length&&i.default(document.body).children().on("mouseover",null,i.default.noop),this._element.focus(),this._element.setAttribute("aria-expanded",!0),i.default(this._menu).toggleClass(Jt),i.default(r).toggleClass(Jt).trigger(i.default.Event("shown.bs.dropdown",n))}}},e.hide=function(){if(!this._element.disabled&&!i.default(this._element).hasClass($t)&&i.default(this._menu).hasClass(Jt)){var e={relatedTarget:this._element},n=i.default.Event(te,e),o=t._getParentFromElement(this._element);i.default(o).trigger(n),n.isDefaultPrevented()||(this._popper&&this._popper.destroy(),i.default(this._menu).toggleClass(Jt),i.default(o).toggleClass(Jt).trigger(i.default.Event(ee,e)))}},e.dispose=function(){i.default.removeData(this._element,Kt),i.default(this._element).off(".bs.dropdown"),this._element=null,this._menu=null,null!==this._popper&&(this._popper.destroy(),this._popper=null)},e.update=function(){this._inNavbar=this._detectNavbar(),null!==this._popper&&this._popper.scheduleUpdate()},e._addEventListeners=function(){var t=this;i.default(this._element).on("click.bs.dropdown",(function(e){e.preventDefault(),e.stopPropagation(),t.toggle()}))},e._getConfig=function(t){return t=a({},this.constructor.Default,i.default(this._element).data(),t),u.typeCheckConfig(zt,t,this.constructor.DefaultType),t},e._getMenuElement=function(){if(!this._menu){var e=t._getParentFromElement(this._element);e&&(this._menu=e.querySelector(re))}return this._menu},e._getPlacement=function(){var t=i.default(this._element.parentNode),e="bottom-start";return t.hasClass("dropup")?e=i.default(this._menu).hasClass(Zt)?"top-end":"top-start":t.hasClass("dropright")?e="right-start":t.hasClass("dropleft")?e="left-start":i.default(this._menu).hasClass(Zt)&&(e="bottom-end"),e},e._detectNavbar=function(){return i.default(this._element).closest(".navbar").length>0},e._getOffset=function(){var t=this,e={};return"function"==typeof this._config.offset?e.fn=function(e){return e.offsets=a({},e.offsets,t._config.offset(e.offsets,t._element)),e}:e.offset=this._config.offset,e},e._getPopperConfig=function(){var t={placement:this._getPlacement(),modifiers:{offset:this._getOffset(),flip:{enabled:this._config.flip},preventOverflow:{boundariesElement:this._config.boundary}}};return"static"===this._config.display&&(t.modifiers.applyStyle={enabled:!1}),a({},t,this._config.popperConfig)},t._jQueryInterface=function(e){return this.each((function(){var n=i.default(this).data(Kt);if(n||(n=new t(this,"object"==typeof e?e:null),i.default(this).data(Kt,n)),"string"==typeof e){if("undefined"==typeof n[e])throw new TypeError('No method named "'+e+'"');n[e]()}}))},t._clearMenus=function(e){if(!e||3!==e.which&&("keyup"!==e.type||9===e.which))for(var n=[].slice.call(document.querySelectorAll(oe)),o=0,r=n.length;o<r;o++){var a=t._getParentFromElement(n[o]),s=i.default(n[o]).data(Kt),l={relatedTarget:n[o]};if(e&&"click"===e.type&&(l.clickEvent=e),s){var u=s._menu;if(i.default(a).hasClass(Jt)&&!(e&&("click"===e.type&&/input|textarea/i.test(e.target.tagName)||"keyup"===e.type&&9===e.which)&&i.default.contains(a,e.target))){var f=i.default.Event(te,l);i.default(a).trigger(f),f.isDefaultPrevented()||("ontouchstart"in document.documentElement&&i.default(document.body).children().off("mouseover",null,i.default.noop),n[o].setAttribute("aria-expanded","false"),s._popper&&s._popper.destroy(),i.default(u).removeClass(Jt),i.default(a).removeClass(Jt).trigger(i.default.Event(ee,l)))}}}},t._getParentFromElement=function(t){var e,n=u.getSelectorFromElement(t);return n&&(e=document.querySelector(n)),e||t.parentNode},t._dataApiKeydownHandler=function(e){if(!(/input|textarea/i.test(e.target.tagName)?32===e.which||27!==e.which&&(40!==e.which&&38!==e.which||i.default(e.target).closest(re).length):!Gt.test(e.which))&&!this.disabled&&!i.default(this).hasClass($t)){var n=t._getParentFromElement(this),o=i.default(n).hasClass(Jt);if(o||27!==e.which){if(e.preventDefault(),e.stopPropagation(),!o||27===e.which||32===e.which)return 27===e.which&&i.default(n.querySelector(oe)).trigger("focus"),void i.default(this).trigger("click");var r=[].slice.call(n.querySelectorAll(".dropdown-menu .dropdown-item:not(.disabled):not(:disabled)")).filter((function(t){return i.default(t).is(":visible")}));if(0!==r.length){var a=r.indexOf(e.target);38===e.which&&a>0&&a--,40===e.which&&a<r.length-1&&a++,a<0&&(a=0),r[a].focus()}}}},r(t,null,[{key:"VERSION",get:function(){return"4.6.2"}},{key:"Default",get:function(){return ae}},{key:"DefaultType",get:function(){return se}}]),t}();i.default(document).on(ie,oe,le._dataApiKeydownHandler).on(ie,re,le._dataApiKeydownHandler).on(ne+" keyup.bs.dropdown.data-api",le._clearMenus).on(ne,oe,(function(t){t.preventDefault(),t.stopPropagation(),le._jQueryInterface.call(i.default(this),"toggle")})).on(ne,".dropdown form",(function(t){t.stopPropagation()})),i.default.fn[zt]=le._jQueryInterface,i.default.fn[zt].Constructor=le,i.default.fn[zt].noConflict=function(){return i.default.fn[zt]=Xt,le._jQueryInterface};var ue="bs.modal",fe=i.default.fn.modal,de="modal-open",ce="fade",he="show",pe="modal-static",me="hidden.bs.modal",ge="show.bs.modal",_e="focusin.bs.modal",ve="resize.bs.modal",be="click.dismiss.bs.modal",ye="keydown.dismiss.bs.modal",Ee="mousedown.dismiss.bs.modal",we=".fixed-top, .fixed-bottom, .is-fixed, .sticky-top",Te={backdrop:!0,keyboard:!0,focus:!0,show:!0},Ce={backdrop:"(boolean|string)",keyboard:"boolean",focus:"boolean",show:"boolean"},Se=function(){function t(t,e){this._config=this._getConfig(e),this._element=t,this._dialog=t.querySelector(".modal-dialog"),this._backdrop=null,this._isShown=!1,this._isBodyOverflowing=!1,this._ignoreBackdropClick=!1,this._isTransitioning=!1,this._scrollbarWidth=0}var e=t.prototype;return e.toggle=function(t){return this._isShown?this.hide():this.show(t)},e.show=function(t){var e=this;if(!this._isShown&&!this._isTransitioning){var n=i.default.Event(ge,{relatedTarget:t});i.default(this._element).trigger(n),n.isDefaultPrevented()||(this._isShown=!0,i.default(this._element).hasClass(ce)&&(this._isTransitioning=!0),this._checkScrollbar(),this._setScrollbar(),this._adjustDialog(),this._setEscapeEvent(),this._setResizeEvent(),i.default(this._element).on(be,'[data-dismiss="modal"]',(function(t){return e.hide(t)})),i.default(this._dialog).on(Ee,(function(){i.default(e._element).one("mouseup.dismiss.bs.modal",(function(t){i.default(t.target).is(e._element)&&(e._ignoreBackdropClick=!0)}))})),this._showBackdrop((function(){return e._showElement(t)})))}},e.hide=function(t){var e=this;if(t&&t.preventDefault(),this._isShown&&!this._isTransitioning){var n=i.default.Event("hide.bs.modal");if(i.default(this._element).trigger(n),this._isShown&&!n.isDefaultPrevented()){this._isShown=!1;var o=i.default(this._element).hasClass(ce);if(o&&(this._isTransitioning=!0),this._setEscapeEvent(),this._setResizeEvent(),i.default(document).off(_e),i.default(this._element).removeClass(he),i.default(this._element).off(be),i.default(this._dialog).off(Ee),o){var r=u.getTransitionDurationFromElement(this._element);i.default(this._element).one(u.TRANSITION_END,(function(t){return e._hideModal(t)})).emulateTransitionEnd(r)}else this._hideModal()}}},e.dispose=function(){[window,this._element,this._dialog].forEach((function(t){return i.default(t).off(".bs.modal")})),i.default(document).off(_e),i.default.removeData(this._element,ue),this._config=null,this._element=null,this._dialog=null,this._backdrop=null,this._isShown=null,this._isBodyOverflowing=null,this._ignoreBackdropClick=null,this._isTransitioning=null,this._scrollbarWidth=null},e.handleUpdate=function(){this._adjustDialog()},e._getConfig=function(t){return t=a({},Te,t),u.typeCheckConfig("modal",t,Ce),t},e._triggerBackdropTransition=function(){var t=this,e=i.default.Event("hidePrevented.bs.modal");if(i.default(this._element).trigger(e),!e.isDefaultPrevented()){var n=this._element.scrollHeight>document.documentElement.clientHeight;n||(this._element.style.overflowY="hidden"),this._element.classList.add(pe);var o=u.getTransitionDurationFromElement(this._dialog);i.default(this._element).off(u.TRANSITION_END),i.default(this._element).one(u.TRANSITION_END,(function(){t._element.classList.remove(pe),n||i.default(t._element).one(u.TRANSITION_END,(function(){t._element.style.overflowY=""})).emulateTransitionEnd(t._element,o)})).emulateTransitionEnd(o),this._element.focus()}},e._showElement=function(t){var e=this,n=i.default(this._element).hasClass(ce),o=this._dialog?this._dialog.querySelector(".modal-body"):null;this._element.parentNode&&this._element.parentNode.nodeType===Node.ELEMENT_NODE||document.body.appendChild(this._element),this._element.style.display="block",this._element.removeAttribute("aria-hidden"),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),i.default(this._dialog).hasClass("modal-dialog-scrollable")&&o?o.scrollTop=0:this._element.scrollTop=0,n&&u.reflow(this._element),i.default(this._element).addClass(he),this._config.focus&&this._enforceFocus();var r=i.default.Event("shown.bs.modal",{relatedTarget:t}),a=function(){e._config.focus&&e._element.focus(),e._isTransitioning=!1,i.default(e._element).trigger(r)};if(n){var s=u.getTransitionDurationFromElement(this._dialog);i.default(this._dialog).one(u.TRANSITION_END,a).emulateTransitionEnd(s)}else a()},e._enforceFocus=function(){var t=this;i.default(document).off(_e).on(_e,(function(e){document!==e.target&&t._element!==e.target&&0===i.default(t._element).has(e.target).length&&t._element.focus()}))},e._setEscapeEvent=function(){var t=this;this._isShown?i.default(this._element).on(ye,(function(e){t._config.keyboard&&27===e.which?(e.preventDefault(),t.hide()):t._config.keyboard||27!==e.which||t._triggerBackdropTransition()})):this._isShown||i.default(this._element).off(ye)},e._setResizeEvent=function(){var t=this;this._isShown?i.default(window).on(ve,(function(e){return t.handleUpdate(e)})):i.default(window).off(ve)},e._hideModal=function(){var t=this;this._element.style.display="none",this._element.setAttribute("aria-hidden",!0),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._isTransitioning=!1,this._showBackdrop((function(){i.default(document.body).removeClass(de),t._resetAdjustments(),t._resetScrollbar(),i.default(t._element).trigger(me)}))},e._removeBackdrop=function(){this._backdrop&&(i.default(this._backdrop).remove(),this._backdrop=null)},e._showBackdrop=function(t){var e=this,n=i.default(this._element).hasClass(ce)?ce:"";if(this._isShown&&this._config.backdrop){if(this._backdrop=document.createElement("div"),this._backdrop.className="modal-backdrop",n&&this._backdrop.classList.add(n),i.default(this._backdrop).appendTo(document.body),i.default(this._element).on(be,(function(t){e._ignoreBackdropClick?e._ignoreBackdropClick=!1:t.target===t.currentTarget&&("static"===e._config.backdrop?e._triggerBackdropTransition():e.hide())})),n&&u.reflow(this._backdrop),i.default(this._backdrop).addClass(he),!t)return;if(!n)return void t();var o=u.getTransitionDurationFromElement(this._backdrop);i.default(this._backdrop).one(u.TRANSITION_END,t).emulateTransitionEnd(o)}else if(!this._isShown&&this._backdrop){i.default(this._backdrop).removeClass(he);var r=function(){e._removeBackdrop(),t&&t()};if(i.default(this._element).hasClass(ce)){var a=u.getTransitionDurationFromElement(this._backdrop);i.default(this._backdrop).one(u.TRANSITION_END,r).emulateTransitionEnd(a)}else r()}else t&&t()},e._adjustDialog=function(){var t=this._element.scrollHeight>document.documentElement.clientHeight;!this._isBodyOverflowing&&t&&(this._element.style.paddingLeft=this._scrollbarWidth+"px"),this._isBodyOverflowing&&!t&&(this._element.style.paddingRight=this._scrollbarWidth+"px")},e._resetAdjustments=function(){this._element.style.paddingLeft="",this._element.style.paddingRight=""},e._checkScrollbar=function(){var t=document.body.getBoundingClientRect();this._isBodyOverflowing=Math.round(t.left+t.right)<window.innerWidth,this._scrollbarWidth=this._getScrollbarWidth()},e._setScrollbar=function(){var t=this;if(this._isBodyOverflowing){var e=[].slice.call(document.querySelectorAll(we)),n=[].slice.call(document.querySelectorAll(".sticky-top"));i.default(e).each((function(e,n){var o=n.style.paddingRight,r=i.default(n).css("padding-right");i.default(n).data("padding-right",o).css("padding-right",parseFloat(r)+t._scrollbarWidth+"px")})),i.default(n).each((function(e,n){var o=n.style.marginRight,r=i.default(n).css("margin-right");i.default(n).data("margin-right",o).css("margin-right",parseFloat(r)-t._scrollbarWidth+"px")}));var o=document.body.style.paddingRight,r=i.default(document.body).css("padding-right");i.default(document.body).data("padding-right",o).css("padding-right",parseFloat(r)+this._scrollbarWidth+"px")}i.default(document.body).addClass(de)},e._resetScrollbar=function(){var t=[].slice.call(document.querySelectorAll(we));i.default(t).each((function(t,e){var n=i.default(e).data("padding-right");i.default(e).removeData("padding-right"),e.style.paddingRight=n||""}));var e=[].slice.call(document.querySelectorAll(".sticky-top"));i.default(e).each((function(t,e){var n=i.default(e).data("margin-right");"undefined"!=typeof n&&i.default(e).css("margin-right",n).removeData("margin-right")}));var n=i.default(document.body).data("padding-right");i.default(document.body).removeData("padding-right"),document.body.style.paddingRight=n||""},e._getScrollbarWidth=function(){var t=document.createElement("div");t.className="modal-scrollbar-measure",document.body.appendChild(t);var e=t.getBoundingClientRect().width-t.clientWidth;return document.body.removeChild(t),e},t._jQueryInterface=function(e,n){return this.each((function(){var o=i.default(this).data(ue),r=a({},Te,i.default(this).data(),"object"==typeof e&&e?e:{});if(o||(o=new t(this,r),i.default(this).data(ue,o)),"string"==typeof e){if("undefined"==typeof o[e])throw new TypeError('No method named "'+e+'"');o[e](n)}else r.show&&o.show(n)}))},r(t,null,[{key:"VERSION",get:function(){return"4.6.2"}},{key:"Default",get:function(){return Te}}]),t}();i.default(document).on("click.bs.modal.data-api",'[data-toggle="modal"]',(function(t){var e,n=this,o=u.getSelectorFromElement(this);o&&(e=document.querySelector(o));var r=i.default(e).data(ue)?"toggle":a({},i.default(e).data(),i.default(this).data());"A"!==this.tagName&&"AREA"!==this.tagName||t.preventDefault();var s=i.default(e).one(ge,(function(t){t.isDefaultPrevented()||s.one(me,(function(){i.default(n).is(":visible")&&n.focus()}))}));Se._jQueryInterface.call(i.default(e),r,this)})),i.default.fn.modal=Se._jQueryInterface,i.default.fn.modal.Constructor=Se,i.default.fn.modal.noConflict=function(){return i.default.fn.modal=fe,Se._jQueryInterface};var Ne=["background","cite","href","itemtype","longdesc","poster","src","xlink:href"],De=/^(?:(?:https?|mailto|ftp|tel|file|sms):|[^#&/:?]*(?:[#/?]|$))/i,Ae=/^data:(?:image\/(?:bmp|gif|jpeg|jpg|png|tiff|webp)|video\/(?:mpeg|mp4|ogg|webm)|audio\/(?:mp3|oga|ogg|opus));base64,[\d+/a-z]+=*$/i;function ke(t,e,n){if(0===t.length)return t;if(n&&"function"==typeof n)return n(t);for(var i=(new window.DOMParser).parseFromString(t,"text/html"),o=Object.keys(e),r=[].slice.call(i.body.querySelectorAll("*")),a=function(t,n){var i=r[t],a=i.nodeName.toLowerCase();if(-1===o.indexOf(i.nodeName.toLowerCase()))return i.parentNode.removeChild(i),"continue";var s=[].slice.call(i.attributes),l=[].concat(e["*"]||[],e[a]||[]);s.forEach((function(t){(function(t,e){var n=t.nodeName.toLowerCase();if(-1!==e.indexOf(n))return-1===Ne.indexOf(n)||Boolean(De.test(t.nodeValue)||Ae.test(t.nodeValue));for(var i=e.filter((function(t){return t instanceof RegExp})),o=0,r=i.length;o<r;o++)if(i[o].test(n))return!0;return!1})(t,l)||i.removeAttribute(t.nodeName)}))},s=0,l=r.length;s<l;s++)a(s);return i.body.innerHTML}var Ie="tooltip",Oe="bs.tooltip",xe=i.default.fn.tooltip,je=new RegExp("(^|\\s)bs-tooltip\\S+","g"),Le=["sanitize","whiteList","sanitizeFn"],Pe="fade",Fe="show",Re="show",Be="out",He="hover",Me="focus",qe={AUTO:"auto",TOP:"top",RIGHT:"right",BOTTOM:"bottom",LEFT:"left"},Qe={animation:!0,template:'<div class="tooltip" role="tooltip"><div class="arrow"></div><div class="tooltip-inner"></div></div>',trigger:"hover focus",title:"",delay:0,html:!1,selector:!1,placement:"top",offset:0,container:!1,fallbackPlacement:"flip",boundary:"scrollParent",customClass:"",sanitize:!0,sanitizeFn:null,whiteList:{"*":["class","dir","id","lang","role",/^aria-[\w-]*$/i],a:["target","href","title","rel"],area:[],b:[],br:[],col:[],code:[],div:[],em:[],hr:[],h1:[],h2:[],h3:[],h4:[],h5:[],h6:[],i:[],img:["src","srcset","alt","title","width","height"],li:[],ol:[],p:[],pre:[],s:[],small:[],span:[],sub:[],sup:[],strong:[],u:[],ul:[]},popperConfig:null},We={animation:"boolean",template:"string",title:"(string|element|function)",trigger:"string",delay:"(number|object)",html:"boolean",selector:"(string|boolean)",placement:"(string|function)",offset:"(number|string|function)",container:"(string|element|boolean)",fallbackPlacement:"(string|array)",boundary:"(string|element)",customClass:"(string|function)",sanitize:"boolean",sanitizeFn:"(null|function)",whiteList:"object",popperConfig:"(null|object)"},Ue={HIDE:"hide.bs.tooltip",HIDDEN:"hidden.bs.tooltip",SHOW:"show.bs.tooltip",SHOWN:"shown.bs.tooltip",INSERTED:"inserted.bs.tooltip",CLICK:"click.bs.tooltip",FOCUSIN:"focusin.bs.tooltip",FOCUSOUT:"focusout.bs.tooltip",MOUSEENTER:"mouseenter.bs.tooltip",MOUSELEAVE:"mouseleave.bs.tooltip"},Ve=function(){function t(t,e){if("undefined"==typeof Yt)throw new TypeError("Bootstrap's tooltips require Popper (https://popper.js.org)");this._isEnabled=!0,this._timeout=0,this._hoverState="",this._activeTrigger={},this._popper=null,this.element=t,this.config=this._getConfig(e),this.tip=null,this._setListeners()}var e=t.prototype;return e.enable=function(){this._isEnabled=!0},e.disable=function(){this._isEnabled=!1},e.toggleEnabled=function(){this._isEnabled=!this._isEnabled},e.toggle=function(t){if(this._isEnabled)if(t){var e=this.constructor.DATA_KEY,n=i.default(t.currentTarget).data(e);n||(n=new this.constructor(t.currentTarget,this._getDelegateConfig()),i.default(t.currentTarget).data(e,n)),n._activeTrigger.click=!n._activeTrigger.click,n._isWithActiveTrigger()?n._enter(null,n):n._leave(null,n)}else{if(i.default(this.getTipElement()).hasClass(Fe))return void this._leave(null,this);this._enter(null,this)}},e.dispose=function(){clearTimeout(this._timeout),i.default.removeData(this.element,this.constructor.DATA_KEY),i.default(this.element).off(this.constructor.EVENT_KEY),i.default(this.element).closest(".modal").off("hide.bs.modal",this._hideModalHandler),this.tip&&i.default(this.tip).remove(),this._isEnabled=null,this._timeout=null,this._hoverState=null,this._activeTrigger=null,this._popper&&this._popper.destroy(),this._popper=null,this.element=null,this.config=null,this.tip=null},e.show=function(){var t=this;if("none"===i.default(this.element).css("display"))throw new Error("Please use show on visible elements");var e=i.default.Event(this.constructor.Event.SHOW);if(this.isWithContent()&&this._isEnabled){i.default(this.element).trigger(e);var n=u.findShadowRoot(this.element),o=i.default.contains(null!==n?n:this.element.ownerDocument.documentElement,this.element);if(e.isDefaultPrevented()||!o)return;var r=this.getTipElement(),a=u.getUID(this.constructor.NAME);r.setAttribute("id",a),this.element.setAttribute("aria-describedby",a),this.setContent(),this.config.animation&&i.default(r).addClass(Pe);var s="function"==typeof this.config.placement?this.config.placement.call(this,r,this.element):this.config.placement,l=this._getAttachment(s);this.addAttachmentClass(l);var f=this._getContainer();i.default(r).data(this.constructor.DATA_KEY,this),i.default.contains(this.element.ownerDocument.documentElement,this.tip)||i.default(r).appendTo(f),i.default(this.element).trigger(this.constructor.Event.INSERTED),this._popper=new Yt(this.element,r,this._getPopperConfig(l)),i.default(r).addClass(Fe),i.default(r).addClass(this.config.customClass),"ontouchstart"in document.documentElement&&i.default(document.body).children().on("mouseover",null,i.default.noop);var d=function(){t.config.animation&&t._fixTransition();var e=t._hoverState;t._hoverState=null,i.default(t.element).trigger(t.constructor.Event.SHOWN),e===Be&&t._leave(null,t)};if(i.default(this.tip).hasClass(Pe)){var c=u.getTransitionDurationFromElement(this.tip);i.default(this.tip).one(u.TRANSITION_END,d).emulateTransitionEnd(c)}else d()}},e.hide=function(t){var e=this,n=this.getTipElement(),o=i.default.Event(this.constructor.Event.HIDE),r=function(){e._hoverState!==Re&&n.parentNode&&n.parentNode.removeChild(n),e._cleanTipClass(),e.element.removeAttribute("aria-describedby"),i.default(e.element).trigger(e.constructor.Event.HIDDEN),null!==e._popper&&e._popper.destroy(),t&&t()};if(i.default(this.element).trigger(o),!o.isDefaultPrevented()){if(i.default(n).removeClass(Fe),"ontouchstart"in document.documentElement&&i.default(document.body).children().off("mouseover",null,i.default.noop),this._activeTrigger.click=!1,this._activeTrigger.focus=!1,this._activeTrigger.hover=!1,i.default(this.tip).hasClass(Pe)){var a=u.getTransitionDurationFromElement(n);i.default(n).one(u.TRANSITION_END,r).emulateTransitionEnd(a)}else r();this._hoverState=""}},e.update=function(){null!==this._popper&&this._popper.scheduleUpdate()},e.isWithContent=function(){return Boolean(this.getTitle())},e.addAttachmentClass=function(t){i.default(this.getTipElement()).addClass("bs-tooltip-"+t)},e.getTipElement=function(){return this.tip=this.tip||i.default(this.config.template)[0],this.tip},e.setContent=function(){var t=this.getTipElement();this.setElementContent(i.default(t.querySelectorAll(".tooltip-inner")),this.getTitle()),i.default(t).removeClass("fade show")},e.setElementContent=function(t,e){"object"!=typeof e||!e.nodeType&&!e.jquery?this.config.html?(this.config.sanitize&&(e=ke(e,this.config.whiteList,this.config.sanitizeFn)),t.html(e)):t.text(e):this.config.html?i.default(e).parent().is(t)||t.empty().append(e):t.text(i.default(e).text())},e.getTitle=function(){var t=this.element.getAttribute("data-original-title");return t||(t="function"==typeof this.config.title?this.config.title.call(this.element):this.config.title),t},e._getPopperConfig=function(t){var e=this;return a({},{placement:t,modifiers:{offset:this._getOffset(),flip:{behavior:this.config.fallbackPlacement},arrow:{element:".arrow"},preventOverflow:{boundariesElement:this.config.boundary}},onCreate:function(t){t.originalPlacement!==t.placement&&e._handlePopperPlacementChange(t)},onUpdate:function(t){return e._handlePopperPlacementChange(t)}},this.config.popperConfig)},e._getOffset=function(){var t=this,e={};return"function"==typeof this.config.offset?e.fn=function(e){return e.offsets=a({},e.offsets,t.config.offset(e.offsets,t.element)),e}:e.offset=this.config.offset,e},e._getContainer=function(){return!1===this.config.container?document.body:u.isElement(this.config.container)?i.default(this.config.container):i.default(document).find(this.config.container)},e._getAttachment=function(t){return qe[t.toUpperCase()]},e._setListeners=function(){var t=this;this.config.trigger.split(" ").forEach((function(e){if("click"===e)i.default(t.element).on(t.constructor.Event.CLICK,t.config.selector,(function(e){return t.toggle(e)}));else if("manual"!==e){var n=e===He?t.constructor.Event.MOUSEENTER:t.constructor.Event.FOCUSIN,o=e===He?t.constructor.Event.MOUSELEAVE:t.constructor.Event.FOCUSOUT;i.default(t.element).on(n,t.config.selector,(function(e){return t._enter(e)})).on(o,t.config.selector,(function(e){return t._leave(e)}))}})),this._hideModalHandler=function(){t.element&&t.hide()},i.default(this.element).closest(".modal").on("hide.bs.modal",this._hideModalHandler),this.config.selector?this.config=a({},this.config,{trigger:"manual",selector:""}):this._fixTitle()},e._fixTitle=function(){var t=typeof this.element.getAttribute("data-original-title");(this.element.getAttribute("title")||"string"!==t)&&(this.element.setAttribute("data-original-title",this.element.getAttribute("title")||""),this.element.setAttribute("title",""))},e._enter=function(t,e){var n=this.constructor.DATA_KEY;(e=e||i.default(t.currentTarget).data(n))||(e=new this.constructor(t.currentTarget,this._getDelegateConfig()),i.default(t.currentTarget).data(n,e)),t&&(e._activeTrigger["focusin"===t.type?Me:He]=!0),i.default(e.getTipElement()).hasClass(Fe)||e._hoverState===Re?e._hoverState=Re:(clearTimeout(e._timeout),e._hoverState=Re,e.config.delay&&e.config.delay.show?e._timeout=setTimeout((function(){e._hoverState===Re&&e.show()}),e.config.delay.show):e.show())},e._leave=function(t,e){var n=this.constructor.DATA_KEY;(e=e||i.default(t.currentTarget).data(n))||(e=new this.constructor(t.currentTarget,this._getDelegateConfig()),i.default(t.currentTarget).data(n,e)),t&&(e._activeTrigger["focusout"===t.type?Me:He]=!1),e._isWithActiveTrigger()||(clearTimeout(e._timeout),e._hoverState=Be,e.config.delay&&e.config.delay.hide?e._timeout=setTimeout((function(){e._hoverState===Be&&e.hide()}),e.config.delay.hide):e.hide())},e._isWithActiveTrigger=function(){for(var t in this._activeTrigger)if(this._activeTrigger[t])return!0;return!1},e._getConfig=function(t){var e=i.default(this.element).data();return Object.keys(e).forEach((function(t){-1!==Le.indexOf(t)&&delete e[t]})),"number"==typeof(t=a({},this.constructor.Default,e,"object"==typeof t&&t?t:{})).delay&&(t.delay={show:t.delay,hide:t.delay}),"number"==typeof t.title&&(t.title=t.title.toString()),"number"==typeof t.content&&(t.content=t.content.toString()),u.typeCheckConfig(Ie,t,this.constructor.DefaultType),t.sanitize&&(t.template=ke(t.template,t.whiteList,t.sanitizeFn)),t},e._getDelegateConfig=function(){var t={};if(this.config)for(var e in this.config)this.constructor.Default[e]!==this.config[e]&&(t[e]=this.config[e]);return t},e._cleanTipClass=function(){var t=i.default(this.getTipElement()),e=t.attr("class").match(je);null!==e&&e.length&&t.removeClass(e.join(""))},e._handlePopperPlacementChange=function(t){this.tip=t.instance.popper,this._cleanTipClass(),this.addAttachmentClass(this._getAttachment(t.placement))},e._fixTransition=function(){var t=this.getTipElement(),e=this.config.animation;null===t.getAttribute("x-placement")&&(i.default(t).removeClass(Pe),this.config.animation=!1,this.hide(),this.show(),this.config.animation=e)},t._jQueryInterface=function(e){return this.each((function(){var n=i.default(this),o=n.data(Oe),r="object"==typeof e&&e;if((o||!/dispose|hide/.test(e))&&(o||(o=new t(this,r),n.data(Oe,o)),"string"==typeof e)){if("undefined"==typeof o[e])throw new TypeError('No method named "'+e+'"');o[e]()}}))},r(t,null,[{key:"VERSION",get:function(){return"4.6.2"}},{key:"Default",get:function(){return Qe}},{key:"NAME",get:function(){return Ie}},{key:"DATA_KEY",get:function(){return Oe}},{key:"Event",get:function(){return Ue}},{key:"EVENT_KEY",get:function(){return".bs.tooltip"}},{key:"DefaultType",get:function(){return We}}]),t}();i.default.fn.tooltip=Ve._jQueryInterface,i.default.fn.tooltip.Constructor=Ve,i.default.fn.tooltip.noConflict=function(){return i.default.fn.tooltip=xe,Ve._jQueryInterface};var Ye="bs.popover",ze=i.default.fn.popover,Ke=new RegExp("(^|\\s)bs-popover\\S+","g"),Xe=a({},Ve.Default,{placement:"right",trigger:"click",content:"",template:'<div class="popover" role="tooltip"><div class="arrow"></div><h3 class="popover-header"></h3><div class="popover-body"></div></div>'}),Ge=a({},Ve.DefaultType,{content:"(string|element|function)"}),$e={HIDE:"hide.bs.popover",HIDDEN:"hidden.bs.popover",SHOW:"show.bs.popover",SHOWN:"shown.bs.popover",INSERTED:"inserted.bs.popover",CLICK:"click.bs.popover",FOCUSIN:"focusin.bs.popover",FOCUSOUT:"focusout.bs.popover",MOUSEENTER:"mouseenter.bs.popover",MOUSELEAVE:"mouseleave.bs.popover"},Je=function(t){var e,n;function o(){return t.apply(this,arguments)||this}n=t,(e=o).prototype=Object.create(n.prototype),e.prototype.constructor=e,s(e,n);var a=o.prototype;return a.isWithContent=function(){return this.getTitle()||this._getContent()},a.addAttachmentClass=function(t){i.default(this.getTipElement()).addClass("bs-popover-"+t)},a.getTipElement=function(){return this.tip=this.tip||i.default(this.config.template)[0],this.tip},a.setContent=function(){var t=i.default(this.getTipElement());this.setElementContent(t.find(".popover-header"),this.getTitle());var e=this._getContent();"function"==typeof e&&(e=e.call(this.element)),this.setElementContent(t.find(".popover-body"),e),t.removeClass("fade show")},a._getContent=function(){return this.element.getAttribute("data-content")||this.config.content},a._cleanTipClass=function(){var t=i.default(this.getTipElement()),e=t.attr("class").match(Ke);null!==e&&e.length>0&&t.removeClass(e.join(""))},o._jQueryInterface=function(t){return this.each((function(){var e=i.default(this).data(Ye),n="object"==typeof t?t:null;if((e||!/dispose|hide/.test(t))&&(e||(e=new o(this,n),i.default(this).data(Ye,e)),"string"==typeof t)){if("undefined"==typeof e[t])throw new TypeError('No method named "'+t+'"');e[t]()}}))},r(o,null,[{key:"VERSION",get:function(){return"4.6.2"}},{key:"Default",get:function(){return Xe}},{key:"NAME",get:function(){return"popover"}},{key:"DATA_KEY",get:function(){return Ye}},{key:"Event",get:function(){return $e}},{key:"EVENT_KEY",get:function(){return".bs.popover"}},{key:"DefaultType",get:function(){return Ge}}]),o}(Ve);i.default.fn.popover=Je._jQueryInterface,i.default.fn.popover.Constructor=Je,i.default.fn.popover.noConflict=function(){return i.default.fn.popover=ze,Je._jQueryInterface};var Ze="scrollspy",tn="bs.scrollspy",en=i.default.fn[Ze],nn="active",on="position",rn=".nav, .list-group",an={offset:10,method:"auto",target:""},sn={offset:"number",method:"string",target:"(string|element)"},ln=function(){function t(t,e){var n=this;this._element=t,this._scrollElement="BODY"===t.tagName?window:t,this._config=this._getConfig(e),this._selector=this._config.target+" .nav-link,"+this._config.target+" .list-group-item,"+this._config.target+" .dropdown-item",this._offsets=[],this._targets=[],this._activeTarget=null,this._scrollHeight=0,i.default(this._scrollElement).on("scroll.bs.scrollspy",(function(t){return n._process(t)})),this.refresh(),this._process()}var e=t.prototype;return e.refresh=function(){var t=this,e=this._scrollElement===this._scrollElement.window?"offset":on,n="auto"===this._config.method?e:this._config.method,o=n===on?this._getScrollTop():0;this._offsets=[],this._targets=[],this._scrollHeight=this._getScrollHeight(),[].slice.call(document.querySelectorAll(this._selector)).map((function(t){var e,r=u.getSelectorFromElement(t);if(r&&(e=document.querySelector(r)),e){var a=e.getBoundingClientRect();if(a.width||a.height)return[i.default(e)[n]().top+o,r]}return null})).filter(Boolean).sort((function(t,e){return t[0]-e[0]})).forEach((function(e){t._offsets.push(e[0]),t._targets.push(e[1])}))},e.dispose=function(){i.default.removeData(this._element,tn),i.default(this._scrollElement).off(".bs.scrollspy"),this._element=null,this._scrollElement=null,this._config=null,this._selector=null,this._offsets=null,this._targets=null,this._activeTarget=null,this._scrollHeight=null},e._getConfig=function(t){if("string"!=typeof(t=a({},an,"object"==typeof t&&t?t:{})).target&&u.isElement(t.target)){var e=i.default(t.target).attr("id");e||(e=u.getUID(Ze),i.default(t.target).attr("id",e)),t.target="#"+e}return u.typeCheckConfig(Ze,t,sn),t},e._getScrollTop=function(){return this._scrollElement===window?this._scrollElement.pageYOffset:this._scrollElement.scrollTop},e._getScrollHeight=function(){return this._scrollElement.scrollHeight||Math.max(document.body.scrollHeight,document.documentElement.scrollHeight)},e._getOffsetHeight=function(){return this._scrollElement===window?window.innerHeight:this._scrollElement.getBoundingClientRect().height},e._process=function(){var t=this._getScrollTop()+this._config.offset,e=this._getScrollHeight(),n=this._config.offset+e-this._getOffsetHeight();if(this._scrollHeight!==e&&this.refresh(),t>=n){var i=this._targets[this._targets.length-1];this._activeTarget!==i&&this._activate(i)}else{if(this._activeTarget&&t<this._offsets[0]&&this._offsets[0]>0)return this._activeTarget=null,void this._clear();for(var o=this._offsets.length;o--;)this._activeTarget!==this._targets[o]&&t>=this._offsets[o]&&("undefined"==typeof this._offsets[o+1]||t<this._offsets[o+1])&&this._activate(this._targets[o])}},e._activate=function(t){this._activeTarget=t,this._clear();var e=this._selector.split(",").map((function(e){return e+'[data-target="'+t+'"],'+e+'[href="'+t+'"]'})),n=i.default([].slice.call(document.querySelectorAll(e.join(","))));n.hasClass("dropdown-item")?(n.closest(".dropdown").find(".dropdown-toggle").addClass(nn),n.addClass(nn)):(n.addClass(nn),n.parents(rn).prev(".nav-link, .list-group-item").addClass(nn),n.parents(rn).prev(".nav-item").children(".nav-link").addClass(nn)),i.default(this._scrollElement).trigger("activate.bs.scrollspy",{relatedTarget:t})},e._clear=function(){[].slice.call(document.querySelectorAll(this._selector)).filter((function(t){return t.classList.contains(nn)})).forEach((function(t){return t.classList.remove(nn)}))},t._jQueryInterface=function(e){return this.each((function(){var n=i.default(this).data(tn);if(n||(n=new t(this,"object"==typeof e&&e),i.default(this).data(tn,n)),"string"==typeof e){if("undefined"==typeof n[e])throw new TypeError('No method named "'+e+'"');n[e]()}}))},r(t,null,[{key:"VERSION",get:function(){return"4.6.2"}},{key:"Default",get:function(){return an}}]),t}();i.default(window).on("load.bs.scrollspy.data-api",(function(){for(var t=[].slice.call(document.querySelectorAll('[data-spy="scroll"]')),e=t.length;e--;){var n=i.default(t[e]);ln._jQueryInterface.call(n,n.data())}})),i.default.fn[Ze]=ln._jQueryInterface,i.default.fn[Ze].Constructor=ln,i.default.fn[Ze].noConflict=function(){return i.default.fn[Ze]=en,ln._jQueryInterface};var un="bs.tab",fn=i.default.fn.tab,dn="active",cn="fade",hn="show",pn=".active",mn="> li > .active",gn=function(){function t(t){this._element=t}var e=t.prototype;return e.show=function(){var t=this;if(!(this._element.parentNode&&this._element.parentNode.nodeType===Node.ELEMENT_NODE&&i.default(this._element).hasClass(dn)||i.default(this._element).hasClass("disabled")||this._element.hasAttribute("disabled"))){var e,n,o=i.default(this._element).closest(".nav, .list-group")[0],r=u.getSelectorFromElement(this._element);if(o){var a="UL"===o.nodeName||"OL"===o.nodeName?mn:pn;n=(n=i.default.makeArray(i.default(o).find(a)))[n.length-1]}var s=i.default.Event("hide.bs.tab",{relatedTarget:this._element}),l=i.default.Event("show.bs.tab",{relatedTarget:n});if(n&&i.default(n).trigger(s),i.default(this._element).trigger(l),!l.isDefaultPrevented()&&!s.isDefaultPrevented()){r&&(e=document.querySelector(r)),this._activate(this._element,o);var f=function(){var e=i.default.Event("hidden.bs.tab",{relatedTarget:t._element}),o=i.default.Event("shown.bs.tab",{relatedTarget:n});i.default(n).trigger(e),i.default(t._element).trigger(o)};e?this._activate(e,e.parentNode,f):f()}}},e.dispose=function(){i.default.removeData(this._element,un),this._element=null},e._activate=function(t,e,n){var o=this,r=(!e||"UL"!==e.nodeName&&"OL"!==e.nodeName?i.default(e).children(pn):i.default(e).find(mn))[0],a=n&&r&&i.default(r).hasClass(cn),s=function(){return o._transitionComplete(t,r,n)};if(r&&a){var l=u.getTransitionDurationFromElement(r);i.default(r).removeClass(hn).one(u.TRANSITION_END,s).emulateTransitionEnd(l)}else s()},e._transitionComplete=function(t,e,n){if(e){i.default(e).removeClass(dn);var o=i.default(e.parentNode).find("> .dropdown-menu .active")[0];o&&i.default(o).removeClass(dn),"tab"===e.getAttribute("role")&&e.setAttribute("aria-selected",!1)}i.default(t).addClass(dn),"tab"===t.getAttribute("role")&&t.setAttribute("aria-selected",!0),u.reflow(t),t.classList.contains(cn)&&t.classList.add(hn);var r=t.parentNode;if(r&&"LI"===r.nodeName&&(r=r.parentNode),r&&i.default(r).hasClass("dropdown-menu")){var a=i.default(t).closest(".dropdown")[0];if(a){var s=[].slice.call(a.querySelectorAll(".dropdown-toggle"));i.default(s).addClass(dn)}t.setAttribute("aria-expanded",!0)}n&&n()},t._jQueryInterface=function(e){return this.each((function(){var n=i.default(this),o=n.data(un);if(o||(o=new t(this),n.data(un,o)),"string"==typeof e){if("undefined"==typeof o[e])throw new TypeError('No method named "'+e+'"');o[e]()}}))},r(t,null,[{key:"VERSION",get:function(){return"4.6.2"}}]),t}();i.default(document).on("click.bs.tab.data-api",'[data-toggle="tab"], [data-toggle="pill"], [data-toggle="list"]',(function(t){t.preventDefault(),gn._jQueryInterface.call(i.default(this),"show")})),i.default.fn.tab=gn._jQueryInterface,i.default.fn.tab.Constructor=gn,i.default.fn.tab.noConflict=function(){return i.default.fn.tab=fn,gn._jQueryInterface};var _n="bs.toast",vn=i.default.fn.toast,bn="hide",yn="show",En="showing",wn="click.dismiss.bs.toast",Tn={animation:!0,autohide:!0,delay:500},Cn={animation:"boolean",autohide:"boolean",delay:"number"},Sn=function(){function t(t,e){this._element=t,this._config=this._getConfig(e),this._timeout=null,this._setListeners()}var e=t.prototype;return e.show=function(){var t=this,e=i.default.Event("show.bs.toast");if(i.default(this._element).trigger(e),!e.isDefaultPrevented()){this._clearTimeout(),this._config.animation&&this._element.classList.add("fade");var n=function(){t._element.classList.remove(En),t._element.classList.add(yn),i.default(t._element).trigger("shown.bs.toast"),t._config.autohide&&(t._timeout=setTimeout((function(){t.hide()}),t._config.delay))};if(this._element.classList.remove(bn),u.reflow(this._element),this._element.classList.add(En),this._config.animation){var o=u.getTransitionDurationFromElement(this._element);i.default(this._element).one(u.TRANSITION_END,n).emulateTransitionEnd(o)}else n()}},e.hide=function(){if(this._element.classList.contains(yn)){var t=i.default.Event("hide.bs.toast");i.default(this._element).trigger(t),t.isDefaultPrevented()||this._close()}},e.dispose=function(){this._clearTimeout(),this._element.classList.contains(yn)&&this._element.classList.remove(yn),i.default(this._element).off(wn),i.default.removeData(this._element,_n),this._element=null,this._config=null},e._getConfig=function(t){return t=a({},Tn,i.default(this._element).data(),"object"==typeof t&&t?t:{}),u.typeCheckConfig("toast",t,this.constructor.DefaultType),t},e._setListeners=function(){var t=this;i.default(this._element).on(wn,'[data-dismiss="toast"]',(function(){return t.hide()}))},e._close=function(){var t=this,e=function(){t._element.classList.add(bn),i.default(t._element).trigger("hidden.bs.toast")};if(this._element.classList.remove(yn),this._config.animation){var n=u.getTransitionDurationFromElement(this._element);i.default(this._element).one(u.TRANSITION_END,e).emulateTransitionEnd(n)}else e()},e._clearTimeout=function(){clearTimeout(this._timeout),this._timeout=null},t._jQueryInterface=function(e){return this.each((function(){var n=i.default(this),o=n.data(_n);if(o||(o=new t(this,"object"==typeof e&&e),n.data(_n,o)),"string"==typeof e){if("undefined"==typeof o[e])throw new TypeError('No method named "'+e+'"');o[e](this)}}))},r(t,null,[{key:"VERSION",get:function(){return"4.6.2"}},{key:"DefaultType",get:function(){return Cn}},{key:"Default",get:function(){return Tn}}]),t}();i.default.fn.toast=Sn._jQueryInterface,i.default.fn.toast.Constructor=Sn,i.default.fn.toast.noConflict=function(){return i.default.fn.toast=vn,Sn._jQueryInterface},t.Alert=c,t.Button=b,t.Carousel=O,t.Collapse=W,t.Dropdown=le,t.Modal=Se,t.Popover=Je,t.Scrollspy=ln,t.Tab=gn,t.Toast=Sn,t.Tooltip=Ve,t.Util=u,Object.defineProperty(t,"__esModule",{value:!0})}));
|
7 |
-
//# sourceMappingURL=bootstrap.bundle.min.js.map
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/distlib/scripts.py
DELETED
@@ -1,437 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
#
|
3 |
-
# Copyright (C) 2013-2015 Vinay Sajip.
|
4 |
-
# Licensed to the Python Software Foundation under a contributor agreement.
|
5 |
-
# See LICENSE.txt and CONTRIBUTORS.txt.
|
6 |
-
#
|
7 |
-
from io import BytesIO
|
8 |
-
import logging
|
9 |
-
import os
|
10 |
-
import re
|
11 |
-
import struct
|
12 |
-
import sys
|
13 |
-
import time
|
14 |
-
from zipfile import ZipInfo
|
15 |
-
|
16 |
-
from .compat import sysconfig, detect_encoding, ZipFile
|
17 |
-
from .resources import finder
|
18 |
-
from .util import (FileOperator, get_export_entry, convert_path,
|
19 |
-
get_executable, get_platform, in_venv)
|
20 |
-
|
21 |
-
logger = logging.getLogger(__name__)
|
22 |
-
|
23 |
-
_DEFAULT_MANIFEST = '''
|
24 |
-
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
25 |
-
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
|
26 |
-
<assemblyIdentity version="1.0.0.0"
|
27 |
-
processorArchitecture="X86"
|
28 |
-
name="%s"
|
29 |
-
type="win32"/>
|
30 |
-
|
31 |
-
<!-- Identify the application security requirements. -->
|
32 |
-
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
|
33 |
-
<security>
|
34 |
-
<requestedPrivileges>
|
35 |
-
<requestedExecutionLevel level="asInvoker" uiAccess="false"/>
|
36 |
-
</requestedPrivileges>
|
37 |
-
</security>
|
38 |
-
</trustInfo>
|
39 |
-
</assembly>'''.strip()
|
40 |
-
|
41 |
-
# check if Python is called on the first line with this expression
|
42 |
-
FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$')
|
43 |
-
SCRIPT_TEMPLATE = r'''# -*- coding: utf-8 -*-
|
44 |
-
import re
|
45 |
-
import sys
|
46 |
-
from %(module)s import %(import_name)s
|
47 |
-
if __name__ == '__main__':
|
48 |
-
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
49 |
-
sys.exit(%(func)s())
|
50 |
-
'''
|
51 |
-
|
52 |
-
|
53 |
-
def enquote_executable(executable):
|
54 |
-
if ' ' in executable:
|
55 |
-
# make sure we quote only the executable in case of env
|
56 |
-
# for example /usr/bin/env "/dir with spaces/bin/jython"
|
57 |
-
# instead of "/usr/bin/env /dir with spaces/bin/jython"
|
58 |
-
# otherwise whole
|
59 |
-
if executable.startswith('/usr/bin/env '):
|
60 |
-
env, _executable = executable.split(' ', 1)
|
61 |
-
if ' ' in _executable and not _executable.startswith('"'):
|
62 |
-
executable = '%s "%s"' % (env, _executable)
|
63 |
-
else:
|
64 |
-
if not executable.startswith('"'):
|
65 |
-
executable = '"%s"' % executable
|
66 |
-
return executable
|
67 |
-
|
68 |
-
# Keep the old name around (for now), as there is at least one project using it!
|
69 |
-
_enquote_executable = enquote_executable
|
70 |
-
|
71 |
-
class ScriptMaker(object):
|
72 |
-
"""
|
73 |
-
A class to copy or create scripts from source scripts or callable
|
74 |
-
specifications.
|
75 |
-
"""
|
76 |
-
script_template = SCRIPT_TEMPLATE
|
77 |
-
|
78 |
-
executable = None # for shebangs
|
79 |
-
|
80 |
-
def __init__(self, source_dir, target_dir, add_launchers=True,
|
81 |
-
dry_run=False, fileop=None):
|
82 |
-
self.source_dir = source_dir
|
83 |
-
self.target_dir = target_dir
|
84 |
-
self.add_launchers = add_launchers
|
85 |
-
self.force = False
|
86 |
-
self.clobber = False
|
87 |
-
# It only makes sense to set mode bits on POSIX.
|
88 |
-
self.set_mode = (os.name == 'posix') or (os.name == 'java' and
|
89 |
-
os._name == 'posix')
|
90 |
-
self.variants = set(('', 'X.Y'))
|
91 |
-
self._fileop = fileop or FileOperator(dry_run)
|
92 |
-
|
93 |
-
self._is_nt = os.name == 'nt' or (
|
94 |
-
os.name == 'java' and os._name == 'nt')
|
95 |
-
self.version_info = sys.version_info
|
96 |
-
|
97 |
-
def _get_alternate_executable(self, executable, options):
|
98 |
-
if options.get('gui', False) and self._is_nt: # pragma: no cover
|
99 |
-
dn, fn = os.path.split(executable)
|
100 |
-
fn = fn.replace('python', 'pythonw')
|
101 |
-
executable = os.path.join(dn, fn)
|
102 |
-
return executable
|
103 |
-
|
104 |
-
if sys.platform.startswith('java'): # pragma: no cover
|
105 |
-
def _is_shell(self, executable):
|
106 |
-
"""
|
107 |
-
Determine if the specified executable is a script
|
108 |
-
(contains a #! line)
|
109 |
-
"""
|
110 |
-
try:
|
111 |
-
with open(executable) as fp:
|
112 |
-
return fp.read(2) == '#!'
|
113 |
-
except (OSError, IOError):
|
114 |
-
logger.warning('Failed to open %s', executable)
|
115 |
-
return False
|
116 |
-
|
117 |
-
def _fix_jython_executable(self, executable):
|
118 |
-
if self._is_shell(executable):
|
119 |
-
# Workaround for Jython is not needed on Linux systems.
|
120 |
-
import java
|
121 |
-
|
122 |
-
if java.lang.System.getProperty('os.name') == 'Linux':
|
123 |
-
return executable
|
124 |
-
elif executable.lower().endswith('jython.exe'):
|
125 |
-
# Use wrapper exe for Jython on Windows
|
126 |
-
return executable
|
127 |
-
return '/usr/bin/env %s' % executable
|
128 |
-
|
129 |
-
def _build_shebang(self, executable, post_interp):
|
130 |
-
"""
|
131 |
-
Build a shebang line. In the simple case (on Windows, or a shebang line
|
132 |
-
which is not too long or contains spaces) use a simple formulation for
|
133 |
-
the shebang. Otherwise, use /bin/sh as the executable, with a contrived
|
134 |
-
shebang which allows the script to run either under Python or sh, using
|
135 |
-
suitable quoting. Thanks to Harald Nordgren for his input.
|
136 |
-
|
137 |
-
See also: http://www.in-ulm.de/~mascheck/various/shebang/#length
|
138 |
-
https://hg.mozilla.org/mozilla-central/file/tip/mach
|
139 |
-
"""
|
140 |
-
if os.name != 'posix':
|
141 |
-
simple_shebang = True
|
142 |
-
else:
|
143 |
-
# Add 3 for '#!' prefix and newline suffix.
|
144 |
-
shebang_length = len(executable) + len(post_interp) + 3
|
145 |
-
if sys.platform == 'darwin':
|
146 |
-
max_shebang_length = 512
|
147 |
-
else:
|
148 |
-
max_shebang_length = 127
|
149 |
-
simple_shebang = ((b' ' not in executable) and
|
150 |
-
(shebang_length <= max_shebang_length))
|
151 |
-
|
152 |
-
if simple_shebang:
|
153 |
-
result = b'#!' + executable + post_interp + b'\n'
|
154 |
-
else:
|
155 |
-
result = b'#!/bin/sh\n'
|
156 |
-
result += b"'''exec' " + executable + post_interp + b' "$0" "$@"\n'
|
157 |
-
result += b"' '''"
|
158 |
-
return result
|
159 |
-
|
160 |
-
def _get_shebang(self, encoding, post_interp=b'', options=None):
|
161 |
-
enquote = True
|
162 |
-
if self.executable:
|
163 |
-
executable = self.executable
|
164 |
-
enquote = False # assume this will be taken care of
|
165 |
-
elif not sysconfig.is_python_build():
|
166 |
-
executable = get_executable()
|
167 |
-
elif in_venv(): # pragma: no cover
|
168 |
-
executable = os.path.join(sysconfig.get_path('scripts'),
|
169 |
-
'python%s' % sysconfig.get_config_var('EXE'))
|
170 |
-
else: # pragma: no cover
|
171 |
-
executable = os.path.join(
|
172 |
-
sysconfig.get_config_var('BINDIR'),
|
173 |
-
'python%s%s' % (sysconfig.get_config_var('VERSION'),
|
174 |
-
sysconfig.get_config_var('EXE')))
|
175 |
-
if not os.path.isfile(executable):
|
176 |
-
# for Python builds from source on Windows, no Python executables with
|
177 |
-
# a version suffix are created, so we use python.exe
|
178 |
-
executable = os.path.join(sysconfig.get_config_var('BINDIR'),
|
179 |
-
'python%s' % (sysconfig.get_config_var('EXE')))
|
180 |
-
if options:
|
181 |
-
executable = self._get_alternate_executable(executable, options)
|
182 |
-
|
183 |
-
if sys.platform.startswith('java'): # pragma: no cover
|
184 |
-
executable = self._fix_jython_executable(executable)
|
185 |
-
|
186 |
-
# Normalise case for Windows - COMMENTED OUT
|
187 |
-
# executable = os.path.normcase(executable)
|
188 |
-
# N.B. The normalising operation above has been commented out: See
|
189 |
-
# issue #124. Although paths in Windows are generally case-insensitive,
|
190 |
-
# they aren't always. For example, a path containing a ẞ (which is a
|
191 |
-
# LATIN CAPITAL LETTER SHARP S - U+1E9E) is normcased to ß (which is a
|
192 |
-
# LATIN SMALL LETTER SHARP S' - U+00DF). The two are not considered by
|
193 |
-
# Windows as equivalent in path names.
|
194 |
-
|
195 |
-
# If the user didn't specify an executable, it may be necessary to
|
196 |
-
# cater for executable paths with spaces (not uncommon on Windows)
|
197 |
-
if enquote:
|
198 |
-
executable = enquote_executable(executable)
|
199 |
-
# Issue #51: don't use fsencode, since we later try to
|
200 |
-
# check that the shebang is decodable using utf-8.
|
201 |
-
executable = executable.encode('utf-8')
|
202 |
-
# in case of IronPython, play safe and enable frames support
|
203 |
-
if (sys.platform == 'cli' and '-X:Frames' not in post_interp
|
204 |
-
and '-X:FullFrames' not in post_interp): # pragma: no cover
|
205 |
-
post_interp += b' -X:Frames'
|
206 |
-
shebang = self._build_shebang(executable, post_interp)
|
207 |
-
# Python parser starts to read a script using UTF-8 until
|
208 |
-
# it gets a #coding:xxx cookie. The shebang has to be the
|
209 |
-
# first line of a file, the #coding:xxx cookie cannot be
|
210 |
-
# written before. So the shebang has to be decodable from
|
211 |
-
# UTF-8.
|
212 |
-
try:
|
213 |
-
shebang.decode('utf-8')
|
214 |
-
except UnicodeDecodeError: # pragma: no cover
|
215 |
-
raise ValueError(
|
216 |
-
'The shebang (%r) is not decodable from utf-8' % shebang)
|
217 |
-
# If the script is encoded to a custom encoding (use a
|
218 |
-
# #coding:xxx cookie), the shebang has to be decodable from
|
219 |
-
# the script encoding too.
|
220 |
-
if encoding != 'utf-8':
|
221 |
-
try:
|
222 |
-
shebang.decode(encoding)
|
223 |
-
except UnicodeDecodeError: # pragma: no cover
|
224 |
-
raise ValueError(
|
225 |
-
'The shebang (%r) is not decodable '
|
226 |
-
'from the script encoding (%r)' % (shebang, encoding))
|
227 |
-
return shebang
|
228 |
-
|
229 |
-
def _get_script_text(self, entry):
|
230 |
-
return self.script_template % dict(module=entry.prefix,
|
231 |
-
import_name=entry.suffix.split('.')[0],
|
232 |
-
func=entry.suffix)
|
233 |
-
|
234 |
-
manifest = _DEFAULT_MANIFEST
|
235 |
-
|
236 |
-
def get_manifest(self, exename):
|
237 |
-
base = os.path.basename(exename)
|
238 |
-
return self.manifest % base
|
239 |
-
|
240 |
-
def _write_script(self, names, shebang, script_bytes, filenames, ext):
|
241 |
-
use_launcher = self.add_launchers and self._is_nt
|
242 |
-
linesep = os.linesep.encode('utf-8')
|
243 |
-
if not shebang.endswith(linesep):
|
244 |
-
shebang += linesep
|
245 |
-
if not use_launcher:
|
246 |
-
script_bytes = shebang + script_bytes
|
247 |
-
else: # pragma: no cover
|
248 |
-
if ext == 'py':
|
249 |
-
launcher = self._get_launcher('t')
|
250 |
-
else:
|
251 |
-
launcher = self._get_launcher('w')
|
252 |
-
stream = BytesIO()
|
253 |
-
with ZipFile(stream, 'w') as zf:
|
254 |
-
source_date_epoch = os.environ.get('SOURCE_DATE_EPOCH')
|
255 |
-
if source_date_epoch:
|
256 |
-
date_time = time.gmtime(int(source_date_epoch))[:6]
|
257 |
-
zinfo = ZipInfo(filename='__main__.py', date_time=date_time)
|
258 |
-
zf.writestr(zinfo, script_bytes)
|
259 |
-
else:
|
260 |
-
zf.writestr('__main__.py', script_bytes)
|
261 |
-
zip_data = stream.getvalue()
|
262 |
-
script_bytes = launcher + shebang + zip_data
|
263 |
-
for name in names:
|
264 |
-
outname = os.path.join(self.target_dir, name)
|
265 |
-
if use_launcher: # pragma: no cover
|
266 |
-
n, e = os.path.splitext(outname)
|
267 |
-
if e.startswith('.py'):
|
268 |
-
outname = n
|
269 |
-
outname = '%s.exe' % outname
|
270 |
-
try:
|
271 |
-
self._fileop.write_binary_file(outname, script_bytes)
|
272 |
-
except Exception:
|
273 |
-
# Failed writing an executable - it might be in use.
|
274 |
-
logger.warning('Failed to write executable - trying to '
|
275 |
-
'use .deleteme logic')
|
276 |
-
dfname = '%s.deleteme' % outname
|
277 |
-
if os.path.exists(dfname):
|
278 |
-
os.remove(dfname) # Not allowed to fail here
|
279 |
-
os.rename(outname, dfname) # nor here
|
280 |
-
self._fileop.write_binary_file(outname, script_bytes)
|
281 |
-
logger.debug('Able to replace executable using '
|
282 |
-
'.deleteme logic')
|
283 |
-
try:
|
284 |
-
os.remove(dfname)
|
285 |
-
except Exception:
|
286 |
-
pass # still in use - ignore error
|
287 |
-
else:
|
288 |
-
if self._is_nt and not outname.endswith('.' + ext): # pragma: no cover
|
289 |
-
outname = '%s.%s' % (outname, ext)
|
290 |
-
if os.path.exists(outname) and not self.clobber:
|
291 |
-
logger.warning('Skipping existing file %s', outname)
|
292 |
-
continue
|
293 |
-
self._fileop.write_binary_file(outname, script_bytes)
|
294 |
-
if self.set_mode:
|
295 |
-
self._fileop.set_executable_mode([outname])
|
296 |
-
filenames.append(outname)
|
297 |
-
|
298 |
-
variant_separator = '-'
|
299 |
-
|
300 |
-
def get_script_filenames(self, name):
|
301 |
-
result = set()
|
302 |
-
if '' in self.variants:
|
303 |
-
result.add(name)
|
304 |
-
if 'X' in self.variants:
|
305 |
-
result.add('%s%s' % (name, self.version_info[0]))
|
306 |
-
if 'X.Y' in self.variants:
|
307 |
-
result.add('%s%s%s.%s' % (name, self.variant_separator,
|
308 |
-
self.version_info[0], self.version_info[1]))
|
309 |
-
return result
|
310 |
-
|
311 |
-
def _make_script(self, entry, filenames, options=None):
|
312 |
-
post_interp = b''
|
313 |
-
if options:
|
314 |
-
args = options.get('interpreter_args', [])
|
315 |
-
if args:
|
316 |
-
args = ' %s' % ' '.join(args)
|
317 |
-
post_interp = args.encode('utf-8')
|
318 |
-
shebang = self._get_shebang('utf-8', post_interp, options=options)
|
319 |
-
script = self._get_script_text(entry).encode('utf-8')
|
320 |
-
scriptnames = self.get_script_filenames(entry.name)
|
321 |
-
if options and options.get('gui', False):
|
322 |
-
ext = 'pyw'
|
323 |
-
else:
|
324 |
-
ext = 'py'
|
325 |
-
self._write_script(scriptnames, shebang, script, filenames, ext)
|
326 |
-
|
327 |
-
def _copy_script(self, script, filenames):
|
328 |
-
adjust = False
|
329 |
-
script = os.path.join(self.source_dir, convert_path(script))
|
330 |
-
outname = os.path.join(self.target_dir, os.path.basename(script))
|
331 |
-
if not self.force and not self._fileop.newer(script, outname):
|
332 |
-
logger.debug('not copying %s (up-to-date)', script)
|
333 |
-
return
|
334 |
-
|
335 |
-
# Always open the file, but ignore failures in dry-run mode --
|
336 |
-
# that way, we'll get accurate feedback if we can read the
|
337 |
-
# script.
|
338 |
-
try:
|
339 |
-
f = open(script, 'rb')
|
340 |
-
except IOError: # pragma: no cover
|
341 |
-
if not self.dry_run:
|
342 |
-
raise
|
343 |
-
f = None
|
344 |
-
else:
|
345 |
-
first_line = f.readline()
|
346 |
-
if not first_line: # pragma: no cover
|
347 |
-
logger.warning('%s is an empty file (skipping)', script)
|
348 |
-
return
|
349 |
-
|
350 |
-
match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n'))
|
351 |
-
if match:
|
352 |
-
adjust = True
|
353 |
-
post_interp = match.group(1) or b''
|
354 |
-
|
355 |
-
if not adjust:
|
356 |
-
if f:
|
357 |
-
f.close()
|
358 |
-
self._fileop.copy_file(script, outname)
|
359 |
-
if self.set_mode:
|
360 |
-
self._fileop.set_executable_mode([outname])
|
361 |
-
filenames.append(outname)
|
362 |
-
else:
|
363 |
-
logger.info('copying and adjusting %s -> %s', script,
|
364 |
-
self.target_dir)
|
365 |
-
if not self._fileop.dry_run:
|
366 |
-
encoding, lines = detect_encoding(f.readline)
|
367 |
-
f.seek(0)
|
368 |
-
shebang = self._get_shebang(encoding, post_interp)
|
369 |
-
if b'pythonw' in first_line: # pragma: no cover
|
370 |
-
ext = 'pyw'
|
371 |
-
else:
|
372 |
-
ext = 'py'
|
373 |
-
n = os.path.basename(outname)
|
374 |
-
self._write_script([n], shebang, f.read(), filenames, ext)
|
375 |
-
if f:
|
376 |
-
f.close()
|
377 |
-
|
378 |
-
@property
|
379 |
-
def dry_run(self):
|
380 |
-
return self._fileop.dry_run
|
381 |
-
|
382 |
-
@dry_run.setter
|
383 |
-
def dry_run(self, value):
|
384 |
-
self._fileop.dry_run = value
|
385 |
-
|
386 |
-
if os.name == 'nt' or (os.name == 'java' and os._name == 'nt'): # pragma: no cover
|
387 |
-
# Executable launcher support.
|
388 |
-
# Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/
|
389 |
-
|
390 |
-
def _get_launcher(self, kind):
|
391 |
-
if struct.calcsize('P') == 8: # 64-bit
|
392 |
-
bits = '64'
|
393 |
-
else:
|
394 |
-
bits = '32'
|
395 |
-
platform_suffix = '-arm' if get_platform() == 'win-arm64' else ''
|
396 |
-
name = '%s%s%s.exe' % (kind, bits, platform_suffix)
|
397 |
-
# Issue 31: don't hardcode an absolute package name, but
|
398 |
-
# determine it relative to the current package
|
399 |
-
distlib_package = __name__.rsplit('.', 1)[0]
|
400 |
-
resource = finder(distlib_package).find(name)
|
401 |
-
if not resource:
|
402 |
-
msg = ('Unable to find resource %s in package %s' % (name,
|
403 |
-
distlib_package))
|
404 |
-
raise ValueError(msg)
|
405 |
-
return resource.bytes
|
406 |
-
|
407 |
-
# Public API follows
|
408 |
-
|
409 |
-
def make(self, specification, options=None):
|
410 |
-
"""
|
411 |
-
Make a script.
|
412 |
-
|
413 |
-
:param specification: The specification, which is either a valid export
|
414 |
-
entry specification (to make a script from a
|
415 |
-
callable) or a filename (to make a script by
|
416 |
-
copying from a source location).
|
417 |
-
:param options: A dictionary of options controlling script generation.
|
418 |
-
:return: A list of all absolute pathnames written to.
|
419 |
-
"""
|
420 |
-
filenames = []
|
421 |
-
entry = get_export_entry(specification)
|
422 |
-
if entry is None:
|
423 |
-
self._copy_script(specification, filenames)
|
424 |
-
else:
|
425 |
-
self._make_script(entry, filenames, options=options)
|
426 |
-
return filenames
|
427 |
-
|
428 |
-
def make_multiple(self, specifications, options=None):
|
429 |
-
"""
|
430 |
-
Take a list of specifications and make scripts from them,
|
431 |
-
:param specifications: A list of specifications.
|
432 |
-
:return: A list of all absolute pathnames written to,
|
433 |
-
"""
|
434 |
-
filenames = []
|
435 |
-
for specification in specifications:
|
436 |
-
filenames.extend(self.make(specification, options))
|
437 |
-
return filenames
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AyameYODAYO/xijinpingx/index.html
DELETED
@@ -1,24 +0,0 @@
|
|
1 |
-
<!DOCTYPE html>
|
2 |
-
<html>
|
3 |
-
<head>
|
4 |
-
<meta charset="utf-8" />
|
5 |
-
<meta name="viewport" content="width=device-width" />
|
6 |
-
<title>My static Space</title>
|
7 |
-
<link rel="stylesheet" href="style.css" />
|
8 |
-
</head>
|
9 |
-
<body>
|
10 |
-
<div class="card">
|
11 |
-
<h1>Welcome to your static Space!</h1>
|
12 |
-
<p>
|
13 |
-
You can modify this app directly by editing <i>index.html</i> in the
|
14 |
-
Files and versions tab.
|
15 |
-
</p>
|
16 |
-
<p>
|
17 |
-
Also don't forget to check the
|
18 |
-
<a href="https://huggingface.co/docs/hub/spaces" target="_blank"
|
19 |
-
>Spaces documentation</a
|
20 |
-
>.
|
21 |
-
</p>
|
22 |
-
</div>
|
23 |
-
</body>
|
24 |
-
</html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Caramelo Crush Saga Para Windows Pc 7.md
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cómo descargar Candy Crush Saga para PC Windows 7</h1>
|
3 |
-
<p>Si eres un fan de los juegos de puzzle, probablemente hayas oído hablar de Candy Crush Saga, uno de los juegos más populares y adictivos de todos los tiempos. ¿Pero sabías que también puedes jugar a este juego en tu PC Windows 7? En este artículo, te mostraremos cómo descargar e instalar Candy Crush Saga para PC Windows 7 usando dos métodos diferentes: desde la tienda de Microsoft y usando un emulador. ¡Sigue leyendo para saber más! </p>
|
4 |
-
<h2>Introducción</h2>
|
5 |
-
<h3>¿Qué es Candy Crush Saga? </h3>
|
6 |
-
<p>Candy Crush Saga es un juego de puzzle desarrollado por King, una empresa líder en la industria de los juegos casuales. El juego fue lanzado en 2012 y desde entonces se ha convertido en un fenómeno mundial, con más de mil millones de descargas y millones de jugadores activos cada día. El juego es simple pero desafiante: tienes que cambiar y combinar dulces del mismo color para limpiar el tablero y completar los niveles. Hay miles de niveles para jugar, cada uno con diferentes objetivos y obstáculos. También puedes usar potenciadores y dulces especiales para ayudarte. El juego es gratis, pero también puedes comprar movimientos adicionales, vidas y otros artículos con dinero real. </p>
|
7 |
-
<h2>descargar caramelo crush saga para windows pc 7</h2><br /><p><b><b>Download File</b> ○ <a href="https://bltlly.com/2v6K90">https://bltlly.com/2v6K90</a></b></p><br /><br />
|
8 |
-
<h3> ¿Por qué jugar Candy Crush Saga en PC Windows 7?</h3>
|
9 |
-
<p>Mientras que Candy Crush Saga está diseñado principalmente para dispositivos móviles, hay muchas razones por las que es posible que desee jugar en su PC Windows 7. Aquí están algunos de ellos:</p>
|
10 |
-
<ul>
|
11 |
-
<li>Puedes disfrutar de una pantalla más grande y mejores gráficos. </li>
|
12 |
-
<li> Puede utilizar el teclado y el ratón para un control más preciso. </li>
|
13 |
-
<li> Puede ahorrar la vida de la batería y el uso de datos. </li>
|
14 |
-
<li>Puedes jugar offline sin interrupciones. </li>
|
15 |
-
<li>Puede sincronizar su progreso a través de múltiples dispositivos. </li>
|
16 |
-
</ul>
|
17 |
-
<p>Entonces, ¿cómo se descarga Candy Crush Saga para PC Windows 7? Hay dos maneras de hacerlo: desde la tienda de Microsoft o usando un emulador. Veamos cómo funciona cada método. </p>
|
18 |
-
<h2>Cómo descargar Candy Crush Saga de Microsoft Store</h2>
|
19 |
-
|
20 |
-
<h3>Paso 1: Ir al sitio web de Microsoft Store</h3>
|
21 |
-
<p>Abra su navegador web y vaya a [el sitio web de Microsoft Store]( 1 ). También puede acceder a la tienda desde su menú de inicio o barra de tareas. </p>
|
22 |
-
<h3>Paso 2: Búsqueda de Candy Crush Saga</h3>
|
23 |
-
<p>En la barra de búsqueda en la esquina superior derecha, escriba "Candy Crush Saga" y pulse Enter. Verá una lista de resultados relacionados con su consulta. Haga clic en el que dice "Candy Crush Saga" por king.com. </p>
|
24 |
-
<h3>Paso 3: Haga clic en el botón Obtener</h3>
|
25 |
-
<p>En la página del juego, verá un botón que dice "Obtener". Haga clic en él para comenzar a descargar el juego. Es posible que tengas que iniciar sesión con tu cuenta de Microsoft si aún no lo has hecho. El juego es gratuito, pero puede ofrecer compras en la aplicación. </p>
|
26 |
-
<h3>Paso 4: Iniciar el juego y disfrutar de</h3>
|
27 |
-
<p>Una vez completada la descarga, puedes iniciar el juego desde tu menú de inicio o barra de tareas. Verás una pantalla de bienvenida que te pide que conectes tu cuenta de Facebook o que juegues como invitado. También puede elegir su idioma preferido en el menú de configuración. Ahora está listo para jugar Candy Crush Saga en su PC Windows 7!</p>
|
28 |
-
<h2>Cómo descargar Candy Crush Saga usando el emulador de BlueStacks</h2>
|
29 |
-
<p>Un emulador es un software que le permite ejecutar aplicaciones y juegos para Android en su PC. Hay muchos emuladores disponibles, pero uno de los más populares y fiables es BlueStacks. BlueStacks es gratuito y fácil de usar, y tiene una gran biblioteca de aplicaciones y juegos que puedes descargar y jugar. Estos son los pasos para descargar Candy Crush Saga usando el emulador de BlueStacks:</p>
|
30 |
-
<h3>Paso 1: Descargar e instalar BlueStacks en su PC</h3>
|
31 |
-
<p>Vaya a [el sitio web de BlueStacks] y haga clic en el botón "Descargar BlueStacks". Esto comenzará a descargar el archivo de instalación en su PC. Una vez finalizada la descarga, ejecute el archivo y siga las instrucciones para instalar BlueStacks en su PC. Es posible que necesite conceder algunos permisos y reiniciar su PC durante el proceso. </p>
|
32 |
-
<p></p>
|
33 |
-
<h3>Paso 2: Inicia sesión con tu cuenta de Google</h3>
|
34 |
-
|
35 |
-
<h3>Paso 3: Búsqueda de Candy Crush Saga en la Play Store</h3>
|
36 |
-
<p>Una vez que haya iniciado sesión, verá la pantalla de inicio de BlueStacks, que parece una tableta Android. En la esquina superior derecha, verá un icono de búsqueda. Haga clic en él y escriba "Candy Crush Saga" en la barra de búsqueda. Verá una lista de resultados relacionados con su consulta. Haga clic en el que dice "Candy Crush Saga" por King.</p>
|
37 |
-
<h3>Paso 4: Instalar y jugar el juego en BlueStacks</h3>
|
38 |
-
<p>En la página del juego, verá un botón que dice "Instalar". Haga clic en él para comenzar a descargar e instalar el juego en BlueStacks. El juego es gratuito, pero puede ofrecer compras en la aplicación. Una vez completada la instalación, puede iniciar el juego desde la pantalla de inicio o el cajón de aplicaciones de BlueStacks. Verás una pantalla de bienvenida que te pide que conectes tu cuenta de Facebook o que juegues como invitado. También puede elegir su idioma preferido en el menú de configuración. Ahora está listo para jugar Candy Crush Saga en su PC Windows 7!</p>
|
39 |
-
<h2>Conclusión</h2>
|
40 |
-
<h3>Resumen de los puntos principales</h3>
|
41 |
-
<p>En este artículo, le hemos mostrado cómo descargar e instalar Candy Crush Saga para PC Windows 7 usando dos métodos diferentes: desde la Tienda de Microsoft y usando un emulador. Ambos métodos son fáciles y seguros, y te permiten disfrutar de este divertido y adictivo juego de puzzle en tu PC Windows 7. También puedes sincronizar tu progreso a través de múltiples dispositivos y jugar sin conexión sin interrupciones. </p>
|
42 |
-
<h3>Llamada a la acción</h3>
|
43 |
-
<p>Así que, ¿qué estás esperando? Descargar Candy Crush Saga para PC Windows 7 hoy y empezar a emparejar caramelos y niveles de limpieza! Usted tendrá una explosión jugando a este juego, si usted es un principiante o un experto. ¡Y no olvides compartir tu experiencia con nosotros en los comentarios de abajo! </p> 64aa2da5cf<br />
|
44 |
-
<br />
|
45 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/resources/action.py
DELETED
@@ -1,257 +0,0 @@
|
|
1 |
-
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
4 |
-
# may not use this file except in compliance with the License. A copy of
|
5 |
-
# the License is located at
|
6 |
-
#
|
7 |
-
# https://aws.amazon.com/apache2.0/
|
8 |
-
#
|
9 |
-
# or in the "license" file accompanying this file. This file is
|
10 |
-
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
11 |
-
# ANY KIND, either express or implied. See the License for the specific
|
12 |
-
# language governing permissions and limitations under the License.
|
13 |
-
|
14 |
-
import logging
|
15 |
-
|
16 |
-
from botocore import xform_name
|
17 |
-
|
18 |
-
from boto3.docs.docstring import ActionDocstring
|
19 |
-
from boto3.utils import inject_attribute
|
20 |
-
|
21 |
-
from .model import Action
|
22 |
-
from .params import create_request_parameters
|
23 |
-
from .response import RawHandler, ResourceHandler
|
24 |
-
|
25 |
-
logger = logging.getLogger(__name__)
|
26 |
-
|
27 |
-
|
28 |
-
class ServiceAction:
|
29 |
-
"""
|
30 |
-
A class representing a callable action on a resource, for example
|
31 |
-
``sqs.get_queue_by_name(...)`` or ``s3.Bucket('foo').delete()``.
|
32 |
-
The action may construct parameters from existing resource identifiers
|
33 |
-
and may return either a raw response or a new resource instance.
|
34 |
-
|
35 |
-
:type action_model: :py:class`~boto3.resources.model.Action`
|
36 |
-
:param action_model: The action model.
|
37 |
-
|
38 |
-
:type factory: ResourceFactory
|
39 |
-
:param factory: The factory that created the resource class to which
|
40 |
-
this action is attached.
|
41 |
-
|
42 |
-
:type service_context: :py:class:`~boto3.utils.ServiceContext`
|
43 |
-
:param service_context: Context about the AWS service
|
44 |
-
"""
|
45 |
-
|
46 |
-
def __init__(self, action_model, factory=None, service_context=None):
|
47 |
-
self._action_model = action_model
|
48 |
-
|
49 |
-
# In the simplest case we just return the response, but if a
|
50 |
-
# resource is defined, then we must create these before returning.
|
51 |
-
resource_response_model = action_model.resource
|
52 |
-
if resource_response_model:
|
53 |
-
self._response_handler = ResourceHandler(
|
54 |
-
search_path=resource_response_model.path,
|
55 |
-
factory=factory,
|
56 |
-
resource_model=resource_response_model,
|
57 |
-
service_context=service_context,
|
58 |
-
operation_name=action_model.request.operation,
|
59 |
-
)
|
60 |
-
else:
|
61 |
-
self._response_handler = RawHandler(action_model.path)
|
62 |
-
|
63 |
-
def __call__(self, parent, *args, **kwargs):
|
64 |
-
"""
|
65 |
-
Perform the action's request operation after building operation
|
66 |
-
parameters and build any defined resources from the response.
|
67 |
-
|
68 |
-
:type parent: :py:class:`~boto3.resources.base.ServiceResource`
|
69 |
-
:param parent: The resource instance to which this action is attached.
|
70 |
-
:rtype: dict or ServiceResource or list(ServiceResource)
|
71 |
-
:return: The response, either as a raw dict or resource instance(s).
|
72 |
-
"""
|
73 |
-
operation_name = xform_name(self._action_model.request.operation)
|
74 |
-
|
75 |
-
# First, build predefined params and then update with the
|
76 |
-
# user-supplied kwargs, which allows overriding the pre-built
|
77 |
-
# params if needed.
|
78 |
-
params = create_request_parameters(parent, self._action_model.request)
|
79 |
-
params.update(kwargs)
|
80 |
-
|
81 |
-
logger.debug(
|
82 |
-
'Calling %s:%s with %r',
|
83 |
-
parent.meta.service_name,
|
84 |
-
operation_name,
|
85 |
-
params,
|
86 |
-
)
|
87 |
-
|
88 |
-
response = getattr(parent.meta.client, operation_name)(*args, **params)
|
89 |
-
|
90 |
-
logger.debug('Response: %r', response)
|
91 |
-
|
92 |
-
return self._response_handler(parent, params, response)
|
93 |
-
|
94 |
-
|
95 |
-
class BatchAction(ServiceAction):
|
96 |
-
"""
|
97 |
-
An action which operates on a batch of items in a collection, typically
|
98 |
-
a single page of results from the collection's underlying service
|
99 |
-
operation call. For example, this allows you to delete up to 999
|
100 |
-
S3 objects in a single operation rather than calling ``.delete()`` on
|
101 |
-
each one individually.
|
102 |
-
|
103 |
-
:type action_model: :py:class`~boto3.resources.model.Action`
|
104 |
-
:param action_model: The action model.
|
105 |
-
|
106 |
-
:type factory: ResourceFactory
|
107 |
-
:param factory: The factory that created the resource class to which
|
108 |
-
this action is attached.
|
109 |
-
|
110 |
-
:type service_context: :py:class:`~boto3.utils.ServiceContext`
|
111 |
-
:param service_context: Context about the AWS service
|
112 |
-
"""
|
113 |
-
|
114 |
-
def __call__(self, parent, *args, **kwargs):
|
115 |
-
"""
|
116 |
-
Perform the batch action's operation on every page of results
|
117 |
-
from the collection.
|
118 |
-
|
119 |
-
:type parent:
|
120 |
-
:py:class:`~boto3.resources.collection.ResourceCollection`
|
121 |
-
:param parent: The collection iterator to which this action
|
122 |
-
is attached.
|
123 |
-
:rtype: list(dict)
|
124 |
-
:return: A list of low-level response dicts from each call.
|
125 |
-
"""
|
126 |
-
service_name = None
|
127 |
-
client = None
|
128 |
-
responses = []
|
129 |
-
operation_name = xform_name(self._action_model.request.operation)
|
130 |
-
|
131 |
-
# Unlike the simple action above, a batch action must operate
|
132 |
-
# on batches (or pages) of items. So we get each page, construct
|
133 |
-
# the necessary parameters and call the batch operation.
|
134 |
-
for page in parent.pages():
|
135 |
-
params = {}
|
136 |
-
for index, resource in enumerate(page):
|
137 |
-
# There is no public interface to get a service name
|
138 |
-
# or low-level client from a collection, so we get
|
139 |
-
# these from the first resource in the collection.
|
140 |
-
if service_name is None:
|
141 |
-
service_name = resource.meta.service_name
|
142 |
-
if client is None:
|
143 |
-
client = resource.meta.client
|
144 |
-
|
145 |
-
create_request_parameters(
|
146 |
-
resource,
|
147 |
-
self._action_model.request,
|
148 |
-
params=params,
|
149 |
-
index=index,
|
150 |
-
)
|
151 |
-
|
152 |
-
if not params:
|
153 |
-
# There are no items, no need to make a call.
|
154 |
-
break
|
155 |
-
|
156 |
-
params.update(kwargs)
|
157 |
-
|
158 |
-
logger.debug(
|
159 |
-
'Calling %s:%s with %r', service_name, operation_name, params
|
160 |
-
)
|
161 |
-
|
162 |
-
response = getattr(client, operation_name)(*args, **params)
|
163 |
-
|
164 |
-
logger.debug('Response: %r', response)
|
165 |
-
|
166 |
-
responses.append(self._response_handler(parent, params, response))
|
167 |
-
|
168 |
-
return responses
|
169 |
-
|
170 |
-
|
171 |
-
class WaiterAction:
|
172 |
-
"""
|
173 |
-
A class representing a callable waiter action on a resource, for example
|
174 |
-
``s3.Bucket('foo').wait_until_bucket_exists()``.
|
175 |
-
The waiter action may construct parameters from existing resource
|
176 |
-
identifiers.
|
177 |
-
|
178 |
-
:type waiter_model: :py:class`~boto3.resources.model.Waiter`
|
179 |
-
:param waiter_model: The action waiter.
|
180 |
-
:type waiter_resource_name: string
|
181 |
-
:param waiter_resource_name: The name of the waiter action for the
|
182 |
-
resource. It usually begins with a
|
183 |
-
``wait_until_``
|
184 |
-
"""
|
185 |
-
|
186 |
-
def __init__(self, waiter_model, waiter_resource_name):
|
187 |
-
self._waiter_model = waiter_model
|
188 |
-
self._waiter_resource_name = waiter_resource_name
|
189 |
-
|
190 |
-
def __call__(self, parent, *args, **kwargs):
|
191 |
-
"""
|
192 |
-
Perform the wait operation after building operation
|
193 |
-
parameters.
|
194 |
-
|
195 |
-
:type parent: :py:class:`~boto3.resources.base.ServiceResource`
|
196 |
-
:param parent: The resource instance to which this action is attached.
|
197 |
-
"""
|
198 |
-
client_waiter_name = xform_name(self._waiter_model.waiter_name)
|
199 |
-
|
200 |
-
# First, build predefined params and then update with the
|
201 |
-
# user-supplied kwargs, which allows overriding the pre-built
|
202 |
-
# params if needed.
|
203 |
-
params = create_request_parameters(parent, self._waiter_model)
|
204 |
-
params.update(kwargs)
|
205 |
-
|
206 |
-
logger.debug(
|
207 |
-
'Calling %s:%s with %r',
|
208 |
-
parent.meta.service_name,
|
209 |
-
self._waiter_resource_name,
|
210 |
-
params,
|
211 |
-
)
|
212 |
-
|
213 |
-
client = parent.meta.client
|
214 |
-
waiter = client.get_waiter(client_waiter_name)
|
215 |
-
response = waiter.wait(**params)
|
216 |
-
|
217 |
-
logger.debug('Response: %r', response)
|
218 |
-
|
219 |
-
|
220 |
-
class CustomModeledAction:
|
221 |
-
"""A custom, modeled action to inject into a resource."""
|
222 |
-
|
223 |
-
def __init__(self, action_name, action_model, function, event_emitter):
|
224 |
-
"""
|
225 |
-
:type action_name: str
|
226 |
-
:param action_name: The name of the action to inject, e.g.
|
227 |
-
'delete_tags'
|
228 |
-
|
229 |
-
:type action_model: dict
|
230 |
-
:param action_model: A JSON definition of the action, as if it were
|
231 |
-
part of the resource model.
|
232 |
-
|
233 |
-
:type function: function
|
234 |
-
:param function: The function to perform when the action is called.
|
235 |
-
The first argument should be 'self', which will be the resource
|
236 |
-
the function is to be called on.
|
237 |
-
|
238 |
-
:type event_emitter: :py:class:`botocore.hooks.BaseEventHooks`
|
239 |
-
:param event_emitter: The session event emitter.
|
240 |
-
"""
|
241 |
-
self.name = action_name
|
242 |
-
self.model = action_model
|
243 |
-
self.function = function
|
244 |
-
self.emitter = event_emitter
|
245 |
-
|
246 |
-
def inject(self, class_attributes, service_context, event_name, **kwargs):
|
247 |
-
resource_name = event_name.rsplit(".")[-1]
|
248 |
-
action = Action(self.name, self.model, {})
|
249 |
-
self.function.__name__ = self.name
|
250 |
-
self.function.__doc__ = ActionDocstring(
|
251 |
-
resource_name=resource_name,
|
252 |
-
event_emitter=self.emitter,
|
253 |
-
action_model=action,
|
254 |
-
service_model=service_context.service_model,
|
255 |
-
include_signature=False,
|
256 |
-
)
|
257 |
-
inject_attribute(class_attributes, self.name, self.function)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/req/req_set.py
DELETED
@@ -1,82 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
from collections import OrderedDict
|
3 |
-
from typing import Dict, List
|
4 |
-
|
5 |
-
from pip._vendor.packaging.utils import canonicalize_name
|
6 |
-
|
7 |
-
from pip._internal.req.req_install import InstallRequirement
|
8 |
-
|
9 |
-
logger = logging.getLogger(__name__)
|
10 |
-
|
11 |
-
|
12 |
-
class RequirementSet:
|
13 |
-
def __init__(self, check_supported_wheels: bool = True) -> None:
|
14 |
-
"""Create a RequirementSet."""
|
15 |
-
|
16 |
-
self.requirements: Dict[str, InstallRequirement] = OrderedDict()
|
17 |
-
self.check_supported_wheels = check_supported_wheels
|
18 |
-
|
19 |
-
self.unnamed_requirements: List[InstallRequirement] = []
|
20 |
-
|
21 |
-
def __str__(self) -> str:
|
22 |
-
requirements = sorted(
|
23 |
-
(req for req in self.requirements.values() if not req.comes_from),
|
24 |
-
key=lambda req: canonicalize_name(req.name or ""),
|
25 |
-
)
|
26 |
-
return " ".join(str(req.req) for req in requirements)
|
27 |
-
|
28 |
-
def __repr__(self) -> str:
|
29 |
-
requirements = sorted(
|
30 |
-
self.requirements.values(),
|
31 |
-
key=lambda req: canonicalize_name(req.name or ""),
|
32 |
-
)
|
33 |
-
|
34 |
-
format_string = "<{classname} object; {count} requirement(s): {reqs}>"
|
35 |
-
return format_string.format(
|
36 |
-
classname=self.__class__.__name__,
|
37 |
-
count=len(requirements),
|
38 |
-
reqs=", ".join(str(req.req) for req in requirements),
|
39 |
-
)
|
40 |
-
|
41 |
-
def add_unnamed_requirement(self, install_req: InstallRequirement) -> None:
|
42 |
-
assert not install_req.name
|
43 |
-
self.unnamed_requirements.append(install_req)
|
44 |
-
|
45 |
-
def add_named_requirement(self, install_req: InstallRequirement) -> None:
|
46 |
-
assert install_req.name
|
47 |
-
|
48 |
-
project_name = canonicalize_name(install_req.name)
|
49 |
-
self.requirements[project_name] = install_req
|
50 |
-
|
51 |
-
def has_requirement(self, name: str) -> bool:
|
52 |
-
project_name = canonicalize_name(name)
|
53 |
-
|
54 |
-
return (
|
55 |
-
project_name in self.requirements
|
56 |
-
and not self.requirements[project_name].constraint
|
57 |
-
)
|
58 |
-
|
59 |
-
def get_requirement(self, name: str) -> InstallRequirement:
|
60 |
-
project_name = canonicalize_name(name)
|
61 |
-
|
62 |
-
if project_name in self.requirements:
|
63 |
-
return self.requirements[project_name]
|
64 |
-
|
65 |
-
raise KeyError(f"No project with the name {name!r}")
|
66 |
-
|
67 |
-
@property
|
68 |
-
def all_requirements(self) -> List[InstallRequirement]:
|
69 |
-
return self.unnamed_requirements + list(self.requirements.values())
|
70 |
-
|
71 |
-
@property
|
72 |
-
def requirements_to_install(self) -> List[InstallRequirement]:
|
73 |
-
"""Return the list of requirements that need to be installed.
|
74 |
-
|
75 |
-
TODO remove this property together with the legacy resolver, since the new
|
76 |
-
resolver only returns requirements that need to be installed.
|
77 |
-
"""
|
78 |
-
return [
|
79 |
-
install_req
|
80 |
-
for install_req in self.all_requirements
|
81 |
-
if not install_req.constraint and not install_req.satisfied_by
|
82 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Boadiwaa/Recipes/openai/api_resources/abstract/api_resource.py
DELETED
@@ -1,117 +0,0 @@
|
|
1 |
-
from urllib.parse import quote_plus
|
2 |
-
|
3 |
-
import openai
|
4 |
-
from openai import api_requestor, error, util
|
5 |
-
from openai.openai_object import OpenAIObject
|
6 |
-
from openai.util import ApiType
|
7 |
-
|
8 |
-
|
9 |
-
class APIResource(OpenAIObject):
|
10 |
-
api_prefix = ""
|
11 |
-
azure_api_prefix = "openai"
|
12 |
-
azure_deployments_prefix = "deployments"
|
13 |
-
|
14 |
-
@classmethod
|
15 |
-
def retrieve(cls, id, api_key=None, request_id=None, **params):
|
16 |
-
instance = cls(id, api_key, **params)
|
17 |
-
instance.refresh(request_id=request_id)
|
18 |
-
return instance
|
19 |
-
|
20 |
-
def refresh(self, request_id=None):
|
21 |
-
self.refresh_from(
|
22 |
-
self.request("get", self.instance_url(), request_id=request_id)
|
23 |
-
)
|
24 |
-
return self
|
25 |
-
|
26 |
-
@classmethod
|
27 |
-
def class_url(cls):
|
28 |
-
if cls == APIResource:
|
29 |
-
raise NotImplementedError(
|
30 |
-
"APIResource is an abstract class. You should perform actions on its subclasses."
|
31 |
-
)
|
32 |
-
# Namespaces are separated in object names with periods (.) and in URLs
|
33 |
-
# with forward slashes (/), so replace the former with the latter.
|
34 |
-
base = cls.OBJECT_NAME.replace(".", "/") # type: ignore
|
35 |
-
if cls.api_prefix:
|
36 |
-
return "/%s/%s" % (cls.api_prefix, base)
|
37 |
-
return "/%s" % (base)
|
38 |
-
|
39 |
-
def instance_url(self, operation=None):
|
40 |
-
id = self.get("id")
|
41 |
-
|
42 |
-
if not isinstance(id, str):
|
43 |
-
raise error.InvalidRequestError(
|
44 |
-
"Could not determine which URL to request: %s instance "
|
45 |
-
"has invalid ID: %r, %s. ID should be of type `str` (or"
|
46 |
-
" `unicode`)" % (type(self).__name__, id, type(id)),
|
47 |
-
"id",
|
48 |
-
)
|
49 |
-
api_version = self.api_version or openai.api_version
|
50 |
-
extn = quote_plus(id)
|
51 |
-
|
52 |
-
if self.typed_api_type == ApiType.AZURE:
|
53 |
-
if not api_version:
|
54 |
-
raise error.InvalidRequestError(
|
55 |
-
"An API version is required for the Azure API type."
|
56 |
-
)
|
57 |
-
|
58 |
-
if not operation:
|
59 |
-
base = self.class_url()
|
60 |
-
return "/%s%s/%s?api-version=%s" % (
|
61 |
-
self.azure_api_prefix,
|
62 |
-
base,
|
63 |
-
extn,
|
64 |
-
api_version
|
65 |
-
)
|
66 |
-
|
67 |
-
return "/%s/%s/%s/%s?api-version=%s" % (
|
68 |
-
self.azure_api_prefix,
|
69 |
-
self.azure_deployments_prefix,
|
70 |
-
extn,
|
71 |
-
operation,
|
72 |
-
api_version
|
73 |
-
)
|
74 |
-
|
75 |
-
|
76 |
-
elif self.typed_api_type == ApiType.OPEN_AI:
|
77 |
-
base = self.class_url()
|
78 |
-
return "%s/%s" % (base, extn)
|
79 |
-
|
80 |
-
else:
|
81 |
-
raise error.InvalidAPIType("Unsupported API type %s" % self.api_type)
|
82 |
-
|
83 |
-
# The `method_` and `url_` arguments are suffixed with an underscore to
|
84 |
-
# avoid conflicting with actual request parameters in `params`.
|
85 |
-
@classmethod
|
86 |
-
def _static_request(
|
87 |
-
cls,
|
88 |
-
method_,
|
89 |
-
url_,
|
90 |
-
api_key=None,
|
91 |
-
api_base=None,
|
92 |
-
api_type=None,
|
93 |
-
request_id=None,
|
94 |
-
api_version=None,
|
95 |
-
organization=None,
|
96 |
-
**params,
|
97 |
-
):
|
98 |
-
requestor = api_requestor.APIRequestor(
|
99 |
-
api_key,
|
100 |
-
api_version=api_version,
|
101 |
-
organization=organization,
|
102 |
-
api_base=api_base,
|
103 |
-
api_type=api_type
|
104 |
-
)
|
105 |
-
response, _, api_key = requestor.request(
|
106 |
-
method_, url_, params, request_id=request_id
|
107 |
-
)
|
108 |
-
return util.convert_to_openai_object(
|
109 |
-
response, api_key, api_version, organization
|
110 |
-
)
|
111 |
-
|
112 |
-
@classmethod
|
113 |
-
def _get_api_type_and_version(cls, api_type: str, api_version: str):
|
114 |
-
typed_api_type = ApiType.from_str(api_type) if api_type else ApiType.from_str(openai.api_type)
|
115 |
-
typed_api_version = api_version or openai.api_version
|
116 |
-
return (typed_api_type, typed_api_version)
|
117 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bonosa2/movies/app3.py
DELETED
@@ -1,82 +0,0 @@
|
|
1 |
-
import pandas as pd
|
2 |
-
import numpy as np
|
3 |
-
from transformers import AutoTokenizer, AutoModel
|
4 |
-
import torch
|
5 |
-
import scipy.spatial
|
6 |
-
import gradio as gr
|
7 |
-
|
8 |
-
# Load the movies dataset
|
9 |
-
url = 'https://storage.googleapis.com/movves123/movies.csv'
|
10 |
-
df_movies = pd.read_csv(url)
|
11 |
-
|
12 |
-
# Load the ratings dataset
|
13 |
-
df_ratings = pd.read_csv('ratings.csv')
|
14 |
-
|
15 |
-
# Calculate the average rating for each movie
|
16 |
-
average_ratings = df_ratings.groupby('movieId')['rating'].mean()
|
17 |
-
|
18 |
-
# Join the average ratings with the movies DataFrame
|
19 |
-
df = df_movies.join(average_ratings, on='movieId')
|
20 |
-
|
21 |
-
# Load DistilBERT model and tokenizer
|
22 |
-
tokenizer = AutoTokenizer.from_pretrained('distilbert-base-uncased')
|
23 |
-
model = AutoModel.from_pretrained('distilbert-base-uncased')
|
24 |
-
|
25 |
-
# Precompute movie title embeddings
|
26 |
-
titles = df['title'].tolist()
|
27 |
-
genres = df['genres'].tolist()
|
28 |
-
ratings = df['rating'].tolist() # Use the average rating
|
29 |
-
|
30 |
-
# Combine title, genre, and ratings into a single string and compute embeddings
|
31 |
-
combined = [f"{title} {genre} {rating}" for title, genre, rating in zip(titles, genres, ratings)]
|
32 |
-
inputs = tokenizer(combined, return_tensors='pt', padding=True, truncation=True, max_length=512)
|
33 |
-
outputs = model(**inputs)
|
34 |
-
embeddings = outputs.last_hidden_state.mean(1).detach().numpy()
|
35 |
-
|
36 |
-
# Convert embeddings to a tensor for similarity calculation
|
37 |
-
embeddings = torch.tensor(embeddings)
|
38 |
-
|
39 |
-
# List of movie genres
|
40 |
-
genre_keywords = ['Action', 'Adventure', 'Animation', 'Children', 'Comedy', 'Crime',
|
41 |
-
'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', 'Musical',
|
42 |
-
'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western']
|
43 |
-
|
44 |
-
def recommend_movies(user_input):
|
45 |
-
# Detect genre from user's input
|
46 |
-
user_genre = [genre for genre in genre_keywords if genre.lower() in user_input.lower()]
|
47 |
-
|
48 |
-
# If a genre is detected, recommend movies from that genre
|
49 |
-
if user_genre:
|
50 |
-
inputs = tokenizer(user_genre[0], return_tensors='pt')
|
51 |
-
query_embedding = model(**inputs).last_hidden_state.mean(1).detach().numpy()
|
52 |
-
else:
|
53 |
-
inputs = tokenizer(user_input, return_tensors='pt')
|
54 |
-
query_embedding = model(**inputs).last_hidden_state.mean(1).detach().numpy()
|
55 |
-
|
56 |
-
# Compute cosine similarity scores
|
57 |
-
cosine_scores = scipy.spatial.distance.cdist(query_embedding, embeddings, "cosine")[0]
|
58 |
-
|
59 |
-
# Get top 5 matches
|
60 |
-
top_results = np.argpartition(-cosine_scores, range(5))[:5]
|
61 |
-
|
62 |
-
# Generate a list of numbered recommendations
|
63 |
-
final_recommendations = [f"{i+1}. {df.iloc[idx]['title']} (Rating: {df.iloc[idx]['rating']})" for i, idx in enumerate(top_results)]
|
64 |
-
|
65 |
-
return "\n".join(final_recommendations)
|
66 |
-
|
67 |
-
|
68 |
-
examples = [
|
69 |
-
['I\'m in the mood for a comedy.'],
|
70 |
-
['How about some action?'],
|
71 |
-
['I want to watch a romance movie.']
|
72 |
-
]
|
73 |
-
|
74 |
-
iface = gr.Interface(fn=recommend_movies,
|
75 |
-
inputs=gr.inputs.Textbox(lines=2, placeholder='Type something...'),
|
76 |
-
outputs=gr.outputs.Textbox(),
|
77 |
-
examples=examples) # Include examples
|
78 |
-
iface.launch()
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BorisovMaksim/denoising/Dockerfile
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
# syntax=docker/dockerfile:1
|
2 |
-
|
3 |
-
FROM python:3.8-slim-buster
|
4 |
-
LABEL maintainer="Borisov Maksim"
|
5 |
-
|
6 |
-
WORKDIR /app
|
7 |
-
|
8 |
-
COPY requirements.txt requirements.txt
|
9 |
-
RUN pip3 install -r requirements.txt
|
10 |
-
|
11 |
-
COPY . .
|
12 |
-
|
13 |
-
WORKDIR /app
|
14 |
-
CMD ["python3", "app.py"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/meta_arch/rcnn.py
DELETED
@@ -1,282 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
import logging
|
3 |
-
import numpy as np
|
4 |
-
import torch
|
5 |
-
from torch import nn
|
6 |
-
|
7 |
-
from detectron2.structures import ImageList
|
8 |
-
from detectron2.utils.events import get_event_storage
|
9 |
-
from detectron2.utils.logger import log_first_n
|
10 |
-
|
11 |
-
from ..backbone import build_backbone
|
12 |
-
from ..postprocessing import detector_postprocess
|
13 |
-
from ..proposal_generator import build_proposal_generator
|
14 |
-
from ..roi_heads import build_roi_heads
|
15 |
-
from .build import META_ARCH_REGISTRY
|
16 |
-
|
17 |
-
__all__ = ["GeneralizedRCNN", "ProposalNetwork"]
|
18 |
-
|
19 |
-
|
20 |
-
@META_ARCH_REGISTRY.register()
|
21 |
-
class GeneralizedRCNN(nn.Module):
|
22 |
-
"""
|
23 |
-
Generalized R-CNN. Any models that contains the following three components:
|
24 |
-
1. Per-image feature extraction (aka backbone)
|
25 |
-
2. Region proposal generation
|
26 |
-
3. Per-region feature extraction and prediction
|
27 |
-
"""
|
28 |
-
|
29 |
-
def __init__(self, cfg):
|
30 |
-
super().__init__()
|
31 |
-
|
32 |
-
self.device = torch.device(cfg.MODEL.DEVICE)
|
33 |
-
self.backbone = build_backbone(cfg)
|
34 |
-
self.proposal_generator = build_proposal_generator(
|
35 |
-
cfg, self.backbone.output_shape()
|
36 |
-
)
|
37 |
-
self.roi_heads = build_roi_heads(cfg, self.backbone.output_shape())
|
38 |
-
self.vis_period = cfg.VIS_PERIOD
|
39 |
-
self.input_format = cfg.INPUT.FORMAT
|
40 |
-
|
41 |
-
assert len(cfg.MODEL.PIXEL_MEAN) == len(cfg.MODEL.PIXEL_STD)
|
42 |
-
num_channels = len(cfg.MODEL.PIXEL_MEAN)
|
43 |
-
pixel_mean = (
|
44 |
-
torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(num_channels, 1, 1)
|
45 |
-
)
|
46 |
-
pixel_std = (
|
47 |
-
torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(num_channels, 1, 1)
|
48 |
-
)
|
49 |
-
self.normalizer = lambda x: (x - pixel_mean) / pixel_std
|
50 |
-
self.to(self.device)
|
51 |
-
|
52 |
-
def visualize_training(self, batched_inputs, proposals):
|
53 |
-
"""
|
54 |
-
A function used to visualize images and proposals. It shows ground truth
|
55 |
-
bounding boxes on the original image and up to 20 predicted object
|
56 |
-
proposals on the original image. Users can implement different
|
57 |
-
visualization functions for different models.
|
58 |
-
|
59 |
-
Args:
|
60 |
-
batched_inputs (list): a list that contains input to the model.
|
61 |
-
proposals (list): a list that contains predicted proposals. Both
|
62 |
-
batched_inputs and proposals should have the same length.
|
63 |
-
"""
|
64 |
-
from detectron2.utils.visualizer import Visualizer
|
65 |
-
|
66 |
-
storage = get_event_storage()
|
67 |
-
max_vis_prop = 20
|
68 |
-
|
69 |
-
for input, prop in zip(batched_inputs, proposals):
|
70 |
-
img = input["image"].cpu().numpy()
|
71 |
-
assert img.shape[0] == 3, "Images should have 3 channels."
|
72 |
-
if self.input_format == "BGR":
|
73 |
-
img = img[::-1, :, :]
|
74 |
-
img = img.transpose(1, 2, 0)
|
75 |
-
v_gt = Visualizer(img, None)
|
76 |
-
v_gt = v_gt.overlay_instances(boxes=input["instances"].gt_boxes)
|
77 |
-
anno_img = v_gt.get_image()
|
78 |
-
box_size = min(len(prop.proposal_boxes), max_vis_prop)
|
79 |
-
v_pred = Visualizer(img, None)
|
80 |
-
v_pred = v_pred.overlay_instances(
|
81 |
-
boxes=prop.proposal_boxes[0:box_size].tensor.cpu().numpy()
|
82 |
-
)
|
83 |
-
prop_img = v_pred.get_image()
|
84 |
-
vis_img = np.concatenate((anno_img, prop_img), axis=1)
|
85 |
-
vis_img = vis_img.transpose(2, 0, 1)
|
86 |
-
vis_name = "Left: GT bounding boxes; Right: Predicted proposals"
|
87 |
-
storage.put_image(vis_name, vis_img)
|
88 |
-
break # only visualize one image in a batch
|
89 |
-
|
90 |
-
def forward(self, batched_inputs):
|
91 |
-
"""
|
92 |
-
Args:
|
93 |
-
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
|
94 |
-
Each item in the list contains the inputs for one image.
|
95 |
-
For now, each item in the list is a dict that contains:
|
96 |
-
|
97 |
-
* image: Tensor, image in (C, H, W) format.
|
98 |
-
* instances (optional): groundtruth :class:`Instances`
|
99 |
-
* proposals (optional): :class:`Instances`, precomputed proposals.
|
100 |
-
|
101 |
-
Other information that's included in the original dicts, such as:
|
102 |
-
|
103 |
-
* "height", "width" (int): the output resolution of the model, used in inference.
|
104 |
-
See :meth:`postprocess` for details.
|
105 |
-
|
106 |
-
Returns:
|
107 |
-
list[dict]:
|
108 |
-
Each dict is the output for one input image.
|
109 |
-
The dict contains one key "instances" whose value is a :class:`Instances`.
|
110 |
-
The :class:`Instances` object has the following keys:
|
111 |
-
"pred_boxes", "pred_classes", "scores", "pred_masks", "pred_keypoints"
|
112 |
-
"""
|
113 |
-
if not self.training:
|
114 |
-
return self.inference(batched_inputs)
|
115 |
-
|
116 |
-
images = self.preprocess_image(batched_inputs)
|
117 |
-
if "instances" in batched_inputs[0]:
|
118 |
-
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
|
119 |
-
elif "targets" in batched_inputs[0]:
|
120 |
-
log_first_n(
|
121 |
-
logging.WARN,
|
122 |
-
"'targets' in the model inputs is now renamed to 'instances'!",
|
123 |
-
n=10,
|
124 |
-
)
|
125 |
-
gt_instances = [x["targets"].to(self.device) for x in batched_inputs]
|
126 |
-
else:
|
127 |
-
gt_instances = None
|
128 |
-
|
129 |
-
features = self.backbone(images.tensor)
|
130 |
-
|
131 |
-
if self.proposal_generator:
|
132 |
-
proposals, proposal_losses = self.proposal_generator(
|
133 |
-
images, features, gt_instances
|
134 |
-
)
|
135 |
-
else:
|
136 |
-
assert "proposals" in batched_inputs[0]
|
137 |
-
proposals = [x["proposals"].to(self.device) for x in batched_inputs]
|
138 |
-
proposal_losses = {}
|
139 |
-
|
140 |
-
_, detector_losses = self.roi_heads(images, features, proposals, gt_instances)
|
141 |
-
if self.vis_period > 0:
|
142 |
-
storage = get_event_storage()
|
143 |
-
if storage.iter % self.vis_period == 0:
|
144 |
-
self.visualize_training(batched_inputs, proposals)
|
145 |
-
|
146 |
-
losses = {}
|
147 |
-
losses.update(detector_losses)
|
148 |
-
losses.update(proposal_losses)
|
149 |
-
return losses
|
150 |
-
|
151 |
-
def inference(self, batched_inputs, detected_instances=None, do_postprocess=True):
|
152 |
-
"""
|
153 |
-
Run inference on the given inputs.
|
154 |
-
|
155 |
-
Args:
|
156 |
-
batched_inputs (list[dict]): same as in :meth:`forward`
|
157 |
-
detected_instances (None or list[Instances]): if not None, it
|
158 |
-
contains an `Instances` object per image. The `Instances`
|
159 |
-
object contains "pred_boxes" and "pred_classes" which are
|
160 |
-
known boxes in the image.
|
161 |
-
The inference will then skip the detection of bounding boxes,
|
162 |
-
and only predict other per-ROI outputs.
|
163 |
-
do_postprocess (bool): whether to apply post-processing on the outputs.
|
164 |
-
|
165 |
-
Returns:
|
166 |
-
same as in :meth:`forward`.
|
167 |
-
"""
|
168 |
-
assert not self.training
|
169 |
-
|
170 |
-
images = self.preprocess_image(batched_inputs)
|
171 |
-
features = self.backbone(images.tensor)
|
172 |
-
|
173 |
-
if detected_instances is None:
|
174 |
-
if self.proposal_generator:
|
175 |
-
proposals, _ = self.proposal_generator(images, features, None)
|
176 |
-
else:
|
177 |
-
assert "proposals" in batched_inputs[0]
|
178 |
-
proposals = [x["proposals"].to(self.device) for x in batched_inputs]
|
179 |
-
|
180 |
-
results, box_features = self.roi_heads(images, features, proposals, None)
|
181 |
-
else:
|
182 |
-
detected_instances = [x.to(self.device) for x in detected_instances]
|
183 |
-
results = self.roi_heads.forward_with_given_boxes(
|
184 |
-
features, detected_instances
|
185 |
-
)
|
186 |
-
|
187 |
-
if do_postprocess:
|
188 |
-
return (
|
189 |
-
GeneralizedRCNN._postprocess(
|
190 |
-
results, batched_inputs, images.image_sizes
|
191 |
-
),
|
192 |
-
box_features,
|
193 |
-
)
|
194 |
-
else:
|
195 |
-
return results
|
196 |
-
|
197 |
-
def preprocess_image(self, batched_inputs):
|
198 |
-
"""
|
199 |
-
Normalize, pad and batch the input images.
|
200 |
-
"""
|
201 |
-
images = [x["image"].to(self.device) for x in batched_inputs]
|
202 |
-
images = [self.normalizer(x) for x in images]
|
203 |
-
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
|
204 |
-
return images
|
205 |
-
|
206 |
-
@staticmethod
|
207 |
-
def _postprocess(instances, batched_inputs, image_sizes):
|
208 |
-
"""
|
209 |
-
Rescale the output instances to the target size.
|
210 |
-
"""
|
211 |
-
# note: private function; subject to changes
|
212 |
-
processed_results = []
|
213 |
-
for results_per_image, input_per_image, image_size in zip(
|
214 |
-
instances, batched_inputs, image_sizes
|
215 |
-
):
|
216 |
-
height = input_per_image.get("height", image_size[0])
|
217 |
-
width = input_per_image.get("width", image_size[1])
|
218 |
-
r = detector_postprocess(results_per_image, height, width)
|
219 |
-
processed_results.append({"instances": r})
|
220 |
-
return processed_results
|
221 |
-
|
222 |
-
|
223 |
-
@META_ARCH_REGISTRY.register()
|
224 |
-
class ProposalNetwork(nn.Module):
|
225 |
-
def __init__(self, cfg):
|
226 |
-
super().__init__()
|
227 |
-
self.device = torch.device(cfg.MODEL.DEVICE)
|
228 |
-
|
229 |
-
self.backbone = build_backbone(cfg)
|
230 |
-
self.proposal_generator = build_proposal_generator(
|
231 |
-
cfg, self.backbone.output_shape()
|
232 |
-
)
|
233 |
-
|
234 |
-
pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(-1, 1, 1)
|
235 |
-
pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(-1, 1, 1)
|
236 |
-
self.normalizer = lambda x: (x - pixel_mean) / pixel_std
|
237 |
-
self.to(self.device)
|
238 |
-
|
239 |
-
def forward(self, batched_inputs):
|
240 |
-
"""
|
241 |
-
Args:
|
242 |
-
Same as in :class:`GeneralizedRCNN.forward`
|
243 |
-
|
244 |
-
Returns:
|
245 |
-
list[dict]:
|
246 |
-
Each dict is the output for one input image.
|
247 |
-
The dict contains one key "proposals" whose value is a
|
248 |
-
:class:`Instances` with keys "proposal_boxes" and "objectness_logits".
|
249 |
-
"""
|
250 |
-
images = [x["image"].to(self.device) for x in batched_inputs]
|
251 |
-
images = [self.normalizer(x) for x in images]
|
252 |
-
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
|
253 |
-
features = self.backbone(images.tensor)
|
254 |
-
|
255 |
-
if "instances" in batched_inputs[0]:
|
256 |
-
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
|
257 |
-
elif "targets" in batched_inputs[0]:
|
258 |
-
log_first_n(
|
259 |
-
logging.WARN,
|
260 |
-
"'targets' in the model inputs is now renamed to 'instances'!",
|
261 |
-
n=10,
|
262 |
-
)
|
263 |
-
gt_instances = [x["targets"].to(self.device) for x in batched_inputs]
|
264 |
-
else:
|
265 |
-
gt_instances = None
|
266 |
-
proposals, proposal_losses = self.proposal_generator(
|
267 |
-
images, features, gt_instances
|
268 |
-
)
|
269 |
-
# In training, the proposals are not useful at all but we generate them anyway.
|
270 |
-
# This makes RPN-only models about 5% slower.
|
271 |
-
if self.training:
|
272 |
-
return proposal_losses
|
273 |
-
|
274 |
-
processed_results = []
|
275 |
-
for results_per_image, input_per_image, image_size in zip(
|
276 |
-
proposals, batched_inputs, images.image_sizes
|
277 |
-
):
|
278 |
-
height = input_per_image.get("height", image_size[0])
|
279 |
-
width = input_per_image.get("width", image_size[1])
|
280 |
-
r = detector_postprocess(results_per_image, height, width)
|
281 |
-
processed_results.append({"proposals": r})
|
282 |
-
return processed_results
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Text2Human/Text2Human/data/mask_dataset.py
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import os.path
|
3 |
-
import random
|
4 |
-
|
5 |
-
import numpy as np
|
6 |
-
import torch
|
7 |
-
import torch.utils.data as data
|
8 |
-
from PIL import Image
|
9 |
-
|
10 |
-
|
11 |
-
class MaskDataset(data.Dataset):
|
12 |
-
|
13 |
-
def __init__(self, segm_dir, ann_dir, downsample_factor=2, xflip=False):
|
14 |
-
|
15 |
-
self._segm_path = segm_dir
|
16 |
-
self._image_fnames = []
|
17 |
-
|
18 |
-
self.downsample_factor = downsample_factor
|
19 |
-
self.xflip = xflip
|
20 |
-
|
21 |
-
# load attributes
|
22 |
-
assert os.path.exists(f'{ann_dir}/upper_fused.txt')
|
23 |
-
for idx, row in enumerate(
|
24 |
-
open(os.path.join(f'{ann_dir}/upper_fused.txt'), 'r')):
|
25 |
-
annotations = row.split()
|
26 |
-
self._image_fnames.append(annotations[0])
|
27 |
-
|
28 |
-
def _open_file(self, path_prefix, fname):
|
29 |
-
return open(os.path.join(path_prefix, fname), 'rb')
|
30 |
-
|
31 |
-
def _load_segm(self, raw_idx):
|
32 |
-
fname = self._image_fnames[raw_idx]
|
33 |
-
fname = f'{fname[:-4]}_segm.png'
|
34 |
-
with self._open_file(self._segm_path, fname) as f:
|
35 |
-
segm = Image.open(f)
|
36 |
-
if self.downsample_factor != 1:
|
37 |
-
width, height = segm.size
|
38 |
-
width = width // self.downsample_factor
|
39 |
-
height = height // self.downsample_factor
|
40 |
-
segm = segm.resize(
|
41 |
-
size=(width, height), resample=Image.NEAREST)
|
42 |
-
segm = np.array(segm)
|
43 |
-
# segm = segm[:, :, np.newaxis].transpose(2, 0, 1)
|
44 |
-
return segm.astype(np.float32)
|
45 |
-
|
46 |
-
def __getitem__(self, index):
|
47 |
-
segm = self._load_segm(index)
|
48 |
-
|
49 |
-
if self.xflip and random.random() > 0.5:
|
50 |
-
segm = segm[:, ::-1].copy()
|
51 |
-
|
52 |
-
segm = torch.from_numpy(segm).long()
|
53 |
-
|
54 |
-
return_dict = {'segm': segm, 'img_name': self._image_fnames[index]}
|
55 |
-
|
56 |
-
return return_dict
|
57 |
-
|
58 |
-
def __len__(self):
|
59 |
-
return len(self._image_fnames)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChallengeHub/Chinese-LangChain/clc/gpt_service.py
DELETED
@@ -1,62 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
# -*- coding:utf-8 _*-
|
3 |
-
"""
|
4 |
-
@author:quincy qiang
|
5 |
-
@license: Apache Licence
|
6 |
-
@file: generate.py
|
7 |
-
@time: 2023/04/17
|
8 |
-
@contact: [email protected]
|
9 |
-
@software: PyCharm
|
10 |
-
@description: coding..
|
11 |
-
"""
|
12 |
-
|
13 |
-
from typing import List, Optional
|
14 |
-
|
15 |
-
from langchain.llms.base import LLM
|
16 |
-
from langchain.llms.utils import enforce_stop_tokens
|
17 |
-
from transformers import AutoModel, AutoTokenizer
|
18 |
-
|
19 |
-
|
20 |
-
class ChatGLMService(LLM):
|
21 |
-
max_token: int = 10000
|
22 |
-
temperature: float = 0.1
|
23 |
-
top_p = 0.9
|
24 |
-
history = []
|
25 |
-
tokenizer: object = None
|
26 |
-
model: object = None
|
27 |
-
|
28 |
-
def __init__(self):
|
29 |
-
super().__init__()
|
30 |
-
|
31 |
-
@property
|
32 |
-
def _llm_type(self) -> str:
|
33 |
-
return "ChatGLM"
|
34 |
-
|
35 |
-
def _call(self,
|
36 |
-
prompt: str,
|
37 |
-
stop: Optional[List[str]] = None) -> str:
|
38 |
-
response, _ = self.model.chat(
|
39 |
-
self.tokenizer,
|
40 |
-
prompt,
|
41 |
-
history=self.history,
|
42 |
-
max_length=self.max_token,
|
43 |
-
temperature=self.temperature,
|
44 |
-
)
|
45 |
-
if stop is not None:
|
46 |
-
response = enforce_stop_tokens(response, stop)
|
47 |
-
self.history = self.history + [[None, response]]
|
48 |
-
return response
|
49 |
-
|
50 |
-
def load_model(self,
|
51 |
-
model_name_or_path: str = "THUDM/chatglm-6b"):
|
52 |
-
self.tokenizer = AutoTokenizer.from_pretrained(
|
53 |
-
model_name_or_path,
|
54 |
-
trust_remote_code=True
|
55 |
-
)
|
56 |
-
self.model = AutoModel.from_pretrained(model_name_or_path, trust_remote_code=True).half().cuda()
|
57 |
-
self.model=self.model.eval()
|
58 |
-
# if __name__ == '__main__':
|
59 |
-
# config=LangChainCFG()
|
60 |
-
# chatLLM = ChatGLMService()
|
61 |
-
# chatLLM.load_model(model_name_or_path=config.llm_model_name)
|
62 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Codecooker/rvcapi/src/vc_infer_pipeline.py
DELETED
@@ -1,653 +0,0 @@
|
|
1 |
-
from functools import lru_cache
|
2 |
-
from time import time as ttime
|
3 |
-
|
4 |
-
import faiss
|
5 |
-
import librosa
|
6 |
-
import numpy as np
|
7 |
-
import os
|
8 |
-
import parselmouth
|
9 |
-
import pyworld
|
10 |
-
import sys
|
11 |
-
import torch
|
12 |
-
import torch.nn.functional as F
|
13 |
-
import torchcrepe
|
14 |
-
import traceback
|
15 |
-
from scipy import signal
|
16 |
-
from torch import Tensor
|
17 |
-
|
18 |
-
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
19 |
-
now_dir = os.path.join(BASE_DIR, 'src')
|
20 |
-
sys.path.append(now_dir)
|
21 |
-
|
22 |
-
bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000)
|
23 |
-
|
24 |
-
input_audio_path2wav = {}
|
25 |
-
|
26 |
-
|
27 |
-
@lru_cache
|
28 |
-
def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period):
|
29 |
-
audio = input_audio_path2wav[input_audio_path]
|
30 |
-
f0, t = pyworld.harvest(
|
31 |
-
audio,
|
32 |
-
fs=fs,
|
33 |
-
f0_ceil=f0max,
|
34 |
-
f0_floor=f0min,
|
35 |
-
frame_period=frame_period,
|
36 |
-
)
|
37 |
-
f0 = pyworld.stonemask(audio, f0, t, fs)
|
38 |
-
return f0
|
39 |
-
|
40 |
-
|
41 |
-
def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频,2是输出音频,rate是2的占比
|
42 |
-
# print(data1.max(),data2.max())
|
43 |
-
rms1 = librosa.feature.rms(
|
44 |
-
y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2
|
45 |
-
) # 每半秒一个点
|
46 |
-
rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2)
|
47 |
-
rms1 = torch.from_numpy(rms1)
|
48 |
-
rms1 = F.interpolate(
|
49 |
-
rms1.unsqueeze(0), size=data2.shape[0], mode="linear"
|
50 |
-
).squeeze()
|
51 |
-
rms2 = torch.from_numpy(rms2)
|
52 |
-
rms2 = F.interpolate(
|
53 |
-
rms2.unsqueeze(0), size=data2.shape[0], mode="linear"
|
54 |
-
).squeeze()
|
55 |
-
rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6)
|
56 |
-
data2 *= (
|
57 |
-
torch.pow(rms1, torch.tensor(1 - rate))
|
58 |
-
* torch.pow(rms2, torch.tensor(rate - 1))
|
59 |
-
).numpy()
|
60 |
-
return data2
|
61 |
-
|
62 |
-
|
63 |
-
class VC(object):
|
64 |
-
def __init__(self, tgt_sr, config):
|
65 |
-
self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = (
|
66 |
-
config.x_pad,
|
67 |
-
config.x_query,
|
68 |
-
config.x_center,
|
69 |
-
config.x_max,
|
70 |
-
config.is_half,
|
71 |
-
)
|
72 |
-
self.sr = 16000 # hubert输入采样率
|
73 |
-
self.window = 160 # 每帧点数
|
74 |
-
self.t_pad = self.sr * self.x_pad # 每条前后pad时间
|
75 |
-
self.t_pad_tgt = tgt_sr * self.x_pad
|
76 |
-
self.t_pad2 = self.t_pad * 2
|
77 |
-
self.t_query = self.sr * self.x_query # 查询切点前后查询时间
|
78 |
-
self.t_center = self.sr * self.x_center # 查询切点位置
|
79 |
-
self.t_max = self.sr * self.x_max # 免查询时长阈值
|
80 |
-
self.device = config.device
|
81 |
-
|
82 |
-
# Fork Feature: Get the best torch device to use for f0 algorithms that require a torch device. Will return the type (torch.device)
|
83 |
-
def get_optimal_torch_device(self, index: int = 0) -> torch.device:
|
84 |
-
# Get cuda device
|
85 |
-
if torch.cuda.is_available():
|
86 |
-
return torch.device(
|
87 |
-
f"cuda:{index % torch.cuda.device_count()}"
|
88 |
-
) # Very fast
|
89 |
-
elif torch.backends.mps.is_available():
|
90 |
-
return torch.device("mps")
|
91 |
-
# Insert an else here to grab "xla" devices if available. TO DO later. Requires the torch_xla.core.xla_model library
|
92 |
-
# Else wise return the "cpu" as a torch device,
|
93 |
-
return torch.device("cpu")
|
94 |
-
|
95 |
-
# Fork Feature: Compute f0 with the crepe method
|
96 |
-
def get_f0_crepe_computation(
|
97 |
-
self,
|
98 |
-
x,
|
99 |
-
f0_min,
|
100 |
-
f0_max,
|
101 |
-
p_len,
|
102 |
-
hop_length=160, # 512 before. Hop length changes the speed that the voice jumps to a different dramatic pitch. Lower hop lengths means more pitch accuracy but longer inference time.
|
103 |
-
model="full", # Either use crepe-tiny "tiny" or crepe "full". Default is full
|
104 |
-
):
|
105 |
-
x = x.astype(
|
106 |
-
np.float32
|
107 |
-
) # fixes the F.conv2D exception. We needed to convert double to float.
|
108 |
-
x /= np.quantile(np.abs(x), 0.999)
|
109 |
-
torch_device = self.get_optimal_torch_device()
|
110 |
-
audio = torch.from_numpy(x).to(torch_device, copy=True)
|
111 |
-
audio = torch.unsqueeze(audio, dim=0)
|
112 |
-
if audio.ndim == 2 and audio.shape[0] > 1:
|
113 |
-
audio = torch.mean(audio, dim=0, keepdim=True).detach()
|
114 |
-
audio = audio.detach()
|
115 |
-
print("Initiating prediction with a crepe_hop_length of: " + str(hop_length))
|
116 |
-
pitch: Tensor = torchcrepe.predict(
|
117 |
-
audio,
|
118 |
-
self.sr,
|
119 |
-
hop_length,
|
120 |
-
f0_min,
|
121 |
-
f0_max,
|
122 |
-
model,
|
123 |
-
batch_size=hop_length * 2,
|
124 |
-
device=torch_device,
|
125 |
-
pad=True,
|
126 |
-
)
|
127 |
-
p_len = p_len or x.shape[0] // hop_length
|
128 |
-
# Resize the pitch for final f0
|
129 |
-
source = np.array(pitch.squeeze(0).cpu().float().numpy())
|
130 |
-
source[source < 0.001] = np.nan
|
131 |
-
target = np.interp(
|
132 |
-
np.arange(0, len(source) * p_len, len(source)) / p_len,
|
133 |
-
np.arange(0, len(source)),
|
134 |
-
source,
|
135 |
-
)
|
136 |
-
f0 = np.nan_to_num(target)
|
137 |
-
return f0 # Resized f0
|
138 |
-
|
139 |
-
def get_f0_official_crepe_computation(
|
140 |
-
self,
|
141 |
-
x,
|
142 |
-
f0_min,
|
143 |
-
f0_max,
|
144 |
-
model="full",
|
145 |
-
):
|
146 |
-
# Pick a batch size that doesn't cause memory errors on your gpu
|
147 |
-
batch_size = 512
|
148 |
-
# Compute pitch using first gpu
|
149 |
-
audio = torch.tensor(np.copy(x))[None].float()
|
150 |
-
f0, pd = torchcrepe.predict(
|
151 |
-
audio,
|
152 |
-
self.sr,
|
153 |
-
self.window,
|
154 |
-
f0_min,
|
155 |
-
f0_max,
|
156 |
-
model,
|
157 |
-
batch_size=batch_size,
|
158 |
-
device=self.device,
|
159 |
-
return_periodicity=True,
|
160 |
-
)
|
161 |
-
pd = torchcrepe.filter.median(pd, 3)
|
162 |
-
f0 = torchcrepe.filter.mean(f0, 3)
|
163 |
-
f0[pd < 0.1] = 0
|
164 |
-
f0 = f0[0].cpu().numpy()
|
165 |
-
return f0
|
166 |
-
|
167 |
-
# Fork Feature: Compute pYIN f0 method
|
168 |
-
def get_f0_pyin_computation(self, x, f0_min, f0_max):
|
169 |
-
y, sr = librosa.load("saudio/Sidney.wav", self.sr, mono=True)
|
170 |
-
f0, _, _ = librosa.pyin(y, sr=self.sr, fmin=f0_min, fmax=f0_max)
|
171 |
-
f0 = f0[1:] # Get rid of extra first frame
|
172 |
-
return f0
|
173 |
-
|
174 |
-
# Fork Feature: Acquire median hybrid f0 estimation calculation
|
175 |
-
def get_f0_hybrid_computation(
|
176 |
-
self,
|
177 |
-
methods_str,
|
178 |
-
input_audio_path,
|
179 |
-
x,
|
180 |
-
f0_min,
|
181 |
-
f0_max,
|
182 |
-
p_len,
|
183 |
-
filter_radius,
|
184 |
-
crepe_hop_length,
|
185 |
-
time_step,
|
186 |
-
):
|
187 |
-
# Get various f0 methods from input to use in the computation stack
|
188 |
-
s = methods_str
|
189 |
-
s = s.split("hybrid")[1]
|
190 |
-
s = s.replace("[", "").replace("]", "")
|
191 |
-
methods = s.split("+")
|
192 |
-
f0_computation_stack = []
|
193 |
-
|
194 |
-
print("Calculating f0 pitch estimations for methods: %s" % str(methods))
|
195 |
-
x = x.astype(np.float32)
|
196 |
-
x /= np.quantile(np.abs(x), 0.999)
|
197 |
-
# Get f0 calculations for all methods specified
|
198 |
-
for method in methods:
|
199 |
-
f0 = None
|
200 |
-
if method == "pm":
|
201 |
-
f0 = (
|
202 |
-
parselmouth.Sound(x, self.sr)
|
203 |
-
.to_pitch_ac(
|
204 |
-
time_step=time_step / 1000,
|
205 |
-
voicing_threshold=0.6,
|
206 |
-
pitch_floor=f0_min,
|
207 |
-
pitch_ceiling=f0_max,
|
208 |
-
)
|
209 |
-
.selected_array["frequency"]
|
210 |
-
)
|
211 |
-
pad_size = (p_len - len(f0) + 1) // 2
|
212 |
-
if pad_size > 0 or p_len - len(f0) - pad_size > 0:
|
213 |
-
f0 = np.pad(
|
214 |
-
f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
|
215 |
-
)
|
216 |
-
elif method == "crepe":
|
217 |
-
f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max)
|
218 |
-
f0 = f0[1:] # Get rid of extra first frame
|
219 |
-
elif method == "crepe-tiny":
|
220 |
-
f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max, "tiny")
|
221 |
-
f0 = f0[1:] # Get rid of extra first frame
|
222 |
-
elif method == "mangio-crepe":
|
223 |
-
f0 = self.get_f0_crepe_computation(
|
224 |
-
x, f0_min, f0_max, p_len, crepe_hop_length
|
225 |
-
)
|
226 |
-
elif method == "mangio-crepe-tiny":
|
227 |
-
f0 = self.get_f0_crepe_computation(
|
228 |
-
x, f0_min, f0_max, p_len, crepe_hop_length, "tiny"
|
229 |
-
)
|
230 |
-
elif method == "harvest":
|
231 |
-
f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10)
|
232 |
-
if filter_radius > 2:
|
233 |
-
f0 = signal.medfilt(f0, 3)
|
234 |
-
f0 = f0[1:] # Get rid of first frame.
|
235 |
-
elif method == "dio": # Potentially buggy?
|
236 |
-
f0, t = pyworld.dio(
|
237 |
-
x.astype(np.double),
|
238 |
-
fs=self.sr,
|
239 |
-
f0_ceil=f0_max,
|
240 |
-
f0_floor=f0_min,
|
241 |
-
frame_period=10,
|
242 |
-
)
|
243 |
-
f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr)
|
244 |
-
f0 = signal.medfilt(f0, 3)
|
245 |
-
f0 = f0[1:]
|
246 |
-
# elif method == "pyin": Not Working just yet
|
247 |
-
# f0 = self.get_f0_pyin_computation(x, f0_min, f0_max)
|
248 |
-
# Push method to the stack
|
249 |
-
f0_computation_stack.append(f0)
|
250 |
-
|
251 |
-
for fc in f0_computation_stack:
|
252 |
-
print(len(fc))
|
253 |
-
|
254 |
-
print("Calculating hybrid median f0 from the stack of: %s" % str(methods))
|
255 |
-
f0_median_hybrid = None
|
256 |
-
if len(f0_computation_stack) == 1:
|
257 |
-
f0_median_hybrid = f0_computation_stack[0]
|
258 |
-
else:
|
259 |
-
f0_median_hybrid = np.nanmedian(f0_computation_stack, axis=0)
|
260 |
-
return f0_median_hybrid
|
261 |
-
|
262 |
-
def get_f0(
|
263 |
-
self,
|
264 |
-
input_audio_path,
|
265 |
-
x,
|
266 |
-
p_len,
|
267 |
-
f0_up_key,
|
268 |
-
f0_method,
|
269 |
-
filter_radius,
|
270 |
-
crepe_hop_length,
|
271 |
-
inp_f0=None,
|
272 |
-
):
|
273 |
-
global input_audio_path2wav
|
274 |
-
time_step = self.window / self.sr * 1000
|
275 |
-
f0_min = 50
|
276 |
-
f0_max = 1100
|
277 |
-
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
|
278 |
-
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
|
279 |
-
if f0_method == "pm":
|
280 |
-
f0 = (
|
281 |
-
parselmouth.Sound(x, self.sr)
|
282 |
-
.to_pitch_ac(
|
283 |
-
time_step=time_step / 1000,
|
284 |
-
voicing_threshold=0.6,
|
285 |
-
pitch_floor=f0_min,
|
286 |
-
pitch_ceiling=f0_max,
|
287 |
-
)
|
288 |
-
.selected_array["frequency"]
|
289 |
-
)
|
290 |
-
pad_size = (p_len - len(f0) + 1) // 2
|
291 |
-
if pad_size > 0 or p_len - len(f0) - pad_size > 0:
|
292 |
-
f0 = np.pad(
|
293 |
-
f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
|
294 |
-
)
|
295 |
-
elif f0_method == "harvest":
|
296 |
-
input_audio_path2wav[input_audio_path] = x.astype(np.double)
|
297 |
-
f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10)
|
298 |
-
if filter_radius > 2:
|
299 |
-
f0 = signal.medfilt(f0, 3)
|
300 |
-
elif f0_method == "dio": # Potentially Buggy?
|
301 |
-
f0, t = pyworld.dio(
|
302 |
-
x.astype(np.double),
|
303 |
-
fs=self.sr,
|
304 |
-
f0_ceil=f0_max,
|
305 |
-
f0_floor=f0_min,
|
306 |
-
frame_period=10,
|
307 |
-
)
|
308 |
-
f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr)
|
309 |
-
f0 = signal.medfilt(f0, 3)
|
310 |
-
elif f0_method == "crepe":
|
311 |
-
f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max)
|
312 |
-
elif f0_method == "crepe-tiny":
|
313 |
-
f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max, "tiny")
|
314 |
-
elif f0_method == "mangio-crepe":
|
315 |
-
f0 = self.get_f0_crepe_computation(
|
316 |
-
x, f0_min, f0_max, p_len, crepe_hop_length
|
317 |
-
)
|
318 |
-
elif f0_method == "mangio-crepe-tiny":
|
319 |
-
f0 = self.get_f0_crepe_computation(
|
320 |
-
x, f0_min, f0_max, p_len, crepe_hop_length, "tiny"
|
321 |
-
)
|
322 |
-
elif f0_method == "rmvpe":
|
323 |
-
if hasattr(self, "model_rmvpe") == False:
|
324 |
-
from rmvpe import RMVPE
|
325 |
-
|
326 |
-
self.model_rmvpe = RMVPE(
|
327 |
-
os.path.join(BASE_DIR, 'rvc_models', 'rmvpe.pt'), is_half=self.is_half, device=self.device
|
328 |
-
)
|
329 |
-
f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)
|
330 |
-
|
331 |
-
elif "hybrid" in f0_method:
|
332 |
-
# Perform hybrid median pitch estimation
|
333 |
-
input_audio_path2wav[input_audio_path] = x.astype(np.double)
|
334 |
-
f0 = self.get_f0_hybrid_computation(
|
335 |
-
f0_method,
|
336 |
-
input_audio_path,
|
337 |
-
x,
|
338 |
-
f0_min,
|
339 |
-
f0_max,
|
340 |
-
p_len,
|
341 |
-
filter_radius,
|
342 |
-
crepe_hop_length,
|
343 |
-
time_step,
|
344 |
-
)
|
345 |
-
|
346 |
-
f0 *= pow(2, f0_up_key / 12)
|
347 |
-
# with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
|
348 |
-
tf0 = self.sr // self.window # 每秒f0点数
|
349 |
-
if inp_f0 is not None:
|
350 |
-
delta_t = np.round(
|
351 |
-
(inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
|
352 |
-
).astype("int16")
|
353 |
-
replace_f0 = np.interp(
|
354 |
-
list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]
|
355 |
-
)
|
356 |
-
shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0]
|
357 |
-
f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[
|
358 |
-
:shape
|
359 |
-
]
|
360 |
-
# with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
|
361 |
-
f0bak = f0.copy()
|
362 |
-
f0_mel = 1127 * np.log(1 + f0 / 700)
|
363 |
-
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
|
364 |
-
f0_mel_max - f0_mel_min
|
365 |
-
) + 1
|
366 |
-
f0_mel[f0_mel <= 1] = 1
|
367 |
-
f0_mel[f0_mel > 255] = 255
|
368 |
-
f0_coarse = np.rint(f0_mel).astype(np.int)
|
369 |
-
|
370 |
-
return f0_coarse, f0bak # 1-0
|
371 |
-
|
372 |
-
def vc(
|
373 |
-
self,
|
374 |
-
model,
|
375 |
-
net_g,
|
376 |
-
sid,
|
377 |
-
audio0,
|
378 |
-
pitch,
|
379 |
-
pitchf,
|
380 |
-
times,
|
381 |
-
index,
|
382 |
-
big_npy,
|
383 |
-
index_rate,
|
384 |
-
version,
|
385 |
-
protect,
|
386 |
-
): # ,file_index,file_big_npy
|
387 |
-
feats = torch.from_numpy(audio0)
|
388 |
-
if self.is_half:
|
389 |
-
feats = feats.half()
|
390 |
-
else:
|
391 |
-
feats = feats.float()
|
392 |
-
if feats.dim() == 2: # double channels
|
393 |
-
feats = feats.mean(-1)
|
394 |
-
assert feats.dim() == 1, feats.dim()
|
395 |
-
feats = feats.view(1, -1)
|
396 |
-
padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
|
397 |
-
|
398 |
-
inputs = {
|
399 |
-
"source": feats.to(self.device),
|
400 |
-
"padding_mask": padding_mask,
|
401 |
-
"output_layer": 9 if version == "v1" else 12,
|
402 |
-
}
|
403 |
-
t0 = ttime()
|
404 |
-
with torch.no_grad():
|
405 |
-
logits = model.extract_features(**inputs)
|
406 |
-
feats = model.final_proj(logits[0]) if version == "v1" else logits[0]
|
407 |
-
if protect < 0.5 and pitch != None and pitchf != None:
|
408 |
-
feats0 = feats.clone()
|
409 |
-
if (
|
410 |
-
isinstance(index, type(None)) == False
|
411 |
-
and isinstance(big_npy, type(None)) == False
|
412 |
-
and index_rate != 0
|
413 |
-
):
|
414 |
-
npy = feats[0].cpu().numpy()
|
415 |
-
if self.is_half:
|
416 |
-
npy = npy.astype("float32")
|
417 |
-
|
418 |
-
# _, I = index.search(npy, 1)
|
419 |
-
# npy = big_npy[I.squeeze()]
|
420 |
-
|
421 |
-
score, ix = index.search(npy, k=8)
|
422 |
-
weight = np.square(1 / score)
|
423 |
-
weight /= weight.sum(axis=1, keepdims=True)
|
424 |
-
npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
|
425 |
-
|
426 |
-
if self.is_half:
|
427 |
-
npy = npy.astype("float16")
|
428 |
-
feats = (
|
429 |
-
torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate
|
430 |
-
+ (1 - index_rate) * feats
|
431 |
-
)
|
432 |
-
|
433 |
-
feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
|
434 |
-
if protect < 0.5 and pitch != None and pitchf != None:
|
435 |
-
feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(
|
436 |
-
0, 2, 1
|
437 |
-
)
|
438 |
-
t1 = ttime()
|
439 |
-
p_len = audio0.shape[0] // self.window
|
440 |
-
if feats.shape[1] < p_len:
|
441 |
-
p_len = feats.shape[1]
|
442 |
-
if pitch != None and pitchf != None:
|
443 |
-
pitch = pitch[:, :p_len]
|
444 |
-
pitchf = pitchf[:, :p_len]
|
445 |
-
|
446 |
-
if protect < 0.5 and pitch != None and pitchf != None:
|
447 |
-
pitchff = pitchf.clone()
|
448 |
-
pitchff[pitchf > 0] = 1
|
449 |
-
pitchff[pitchf < 1] = protect
|
450 |
-
pitchff = pitchff.unsqueeze(-1)
|
451 |
-
feats = feats * pitchff + feats0 * (1 - pitchff)
|
452 |
-
feats = feats.to(feats0.dtype)
|
453 |
-
p_len = torch.tensor([p_len], device=self.device).long()
|
454 |
-
with torch.no_grad():
|
455 |
-
if pitch != None and pitchf != None:
|
456 |
-
audio1 = (
|
457 |
-
(net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0])
|
458 |
-
.data.cpu()
|
459 |
-
.float()
|
460 |
-
.numpy()
|
461 |
-
)
|
462 |
-
else:
|
463 |
-
audio1 = (
|
464 |
-
(net_g.infer(feats, p_len, sid)[0][0, 0]).data.cpu().float().numpy()
|
465 |
-
)
|
466 |
-
del feats, p_len, padding_mask
|
467 |
-
if torch.cuda.is_available():
|
468 |
-
torch.cuda.empty_cache()
|
469 |
-
t2 = ttime()
|
470 |
-
times[0] += t1 - t0
|
471 |
-
times[2] += t2 - t1
|
472 |
-
return audio1
|
473 |
-
|
474 |
-
def pipeline(
|
475 |
-
self,
|
476 |
-
model,
|
477 |
-
net_g,
|
478 |
-
sid,
|
479 |
-
audio,
|
480 |
-
input_audio_path,
|
481 |
-
times,
|
482 |
-
f0_up_key,
|
483 |
-
f0_method,
|
484 |
-
file_index,
|
485 |
-
# file_big_npy,
|
486 |
-
index_rate,
|
487 |
-
if_f0,
|
488 |
-
filter_radius,
|
489 |
-
tgt_sr,
|
490 |
-
resample_sr,
|
491 |
-
rms_mix_rate,
|
492 |
-
version,
|
493 |
-
protect,
|
494 |
-
crepe_hop_length,
|
495 |
-
f0_file=None,
|
496 |
-
):
|
497 |
-
if (
|
498 |
-
file_index != ""
|
499 |
-
# and file_big_npy != ""
|
500 |
-
# and os.path.exists(file_big_npy) == True
|
501 |
-
and os.path.exists(file_index) == True
|
502 |
-
and index_rate != 0
|
503 |
-
):
|
504 |
-
try:
|
505 |
-
index = faiss.read_index(file_index)
|
506 |
-
# big_npy = np.load(file_big_npy)
|
507 |
-
big_npy = index.reconstruct_n(0, index.ntotal)
|
508 |
-
except:
|
509 |
-
traceback.print_exc()
|
510 |
-
index = big_npy = None
|
511 |
-
else:
|
512 |
-
index = big_npy = None
|
513 |
-
audio = signal.filtfilt(bh, ah, audio)
|
514 |
-
audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect")
|
515 |
-
opt_ts = []
|
516 |
-
if audio_pad.shape[0] > self.t_max:
|
517 |
-
audio_sum = np.zeros_like(audio)
|
518 |
-
for i in range(self.window):
|
519 |
-
audio_sum += audio_pad[i : i - self.window]
|
520 |
-
for t in range(self.t_center, audio.shape[0], self.t_center):
|
521 |
-
opt_ts.append(
|
522 |
-
t
|
523 |
-
- self.t_query
|
524 |
-
+ np.where(
|
525 |
-
np.abs(audio_sum[t - self.t_query : t + self.t_query])
|
526 |
-
== np.abs(audio_sum[t - self.t_query : t + self.t_query]).min()
|
527 |
-
)[0][0]
|
528 |
-
)
|
529 |
-
s = 0
|
530 |
-
audio_opt = []
|
531 |
-
t = None
|
532 |
-
t1 = ttime()
|
533 |
-
audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect")
|
534 |
-
p_len = audio_pad.shape[0] // self.window
|
535 |
-
inp_f0 = None
|
536 |
-
if hasattr(f0_file, "name") == True:
|
537 |
-
try:
|
538 |
-
with open(f0_file.name, "r") as f:
|
539 |
-
lines = f.read().strip("\n").split("\n")
|
540 |
-
inp_f0 = []
|
541 |
-
for line in lines:
|
542 |
-
inp_f0.append([float(i) for i in line.split(",")])
|
543 |
-
inp_f0 = np.array(inp_f0, dtype="float32")
|
544 |
-
except:
|
545 |
-
traceback.print_exc()
|
546 |
-
sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
|
547 |
-
pitch, pitchf = None, None
|
548 |
-
if if_f0 == 1:
|
549 |
-
pitch, pitchf = self.get_f0(
|
550 |
-
input_audio_path,
|
551 |
-
audio_pad,
|
552 |
-
p_len,
|
553 |
-
f0_up_key,
|
554 |
-
f0_method,
|
555 |
-
filter_radius,
|
556 |
-
crepe_hop_length,
|
557 |
-
inp_f0,
|
558 |
-
)
|
559 |
-
pitch = pitch[:p_len]
|
560 |
-
pitchf = pitchf[:p_len]
|
561 |
-
if self.device == "mps":
|
562 |
-
pitchf = pitchf.astype(np.float32)
|
563 |
-
pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()
|
564 |
-
pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()
|
565 |
-
t2 = ttime()
|
566 |
-
times[1] += t2 - t1
|
567 |
-
for t in opt_ts:
|
568 |
-
t = t // self.window * self.window
|
569 |
-
if if_f0 == 1:
|
570 |
-
audio_opt.append(
|
571 |
-
self.vc(
|
572 |
-
model,
|
573 |
-
net_g,
|
574 |
-
sid,
|
575 |
-
audio_pad[s : t + self.t_pad2 + self.window],
|
576 |
-
pitch[:, s // self.window : (t + self.t_pad2) // self.window],
|
577 |
-
pitchf[:, s // self.window : (t + self.t_pad2) // self.window],
|
578 |
-
times,
|
579 |
-
index,
|
580 |
-
big_npy,
|
581 |
-
index_rate,
|
582 |
-
version,
|
583 |
-
protect,
|
584 |
-
)[self.t_pad_tgt : -self.t_pad_tgt]
|
585 |
-
)
|
586 |
-
else:
|
587 |
-
audio_opt.append(
|
588 |
-
self.vc(
|
589 |
-
model,
|
590 |
-
net_g,
|
591 |
-
sid,
|
592 |
-
audio_pad[s : t + self.t_pad2 + self.window],
|
593 |
-
None,
|
594 |
-
None,
|
595 |
-
times,
|
596 |
-
index,
|
597 |
-
big_npy,
|
598 |
-
index_rate,
|
599 |
-
version,
|
600 |
-
protect,
|
601 |
-
)[self.t_pad_tgt : -self.t_pad_tgt]
|
602 |
-
)
|
603 |
-
s = t
|
604 |
-
if if_f0 == 1:
|
605 |
-
audio_opt.append(
|
606 |
-
self.vc(
|
607 |
-
model,
|
608 |
-
net_g,
|
609 |
-
sid,
|
610 |
-
audio_pad[t:],
|
611 |
-
pitch[:, t // self.window :] if t is not None else pitch,
|
612 |
-
pitchf[:, t // self.window :] if t is not None else pitchf,
|
613 |
-
times,
|
614 |
-
index,
|
615 |
-
big_npy,
|
616 |
-
index_rate,
|
617 |
-
version,
|
618 |
-
protect,
|
619 |
-
)[self.t_pad_tgt : -self.t_pad_tgt]
|
620 |
-
)
|
621 |
-
else:
|
622 |
-
audio_opt.append(
|
623 |
-
self.vc(
|
624 |
-
model,
|
625 |
-
net_g,
|
626 |
-
sid,
|
627 |
-
audio_pad[t:],
|
628 |
-
None,
|
629 |
-
None,
|
630 |
-
times,
|
631 |
-
index,
|
632 |
-
big_npy,
|
633 |
-
index_rate,
|
634 |
-
version,
|
635 |
-
protect,
|
636 |
-
)[self.t_pad_tgt : -self.t_pad_tgt]
|
637 |
-
)
|
638 |
-
audio_opt = np.concatenate(audio_opt)
|
639 |
-
if rms_mix_rate != 1:
|
640 |
-
audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate)
|
641 |
-
if resample_sr >= 16000 and tgt_sr != resample_sr:
|
642 |
-
audio_opt = librosa.resample(
|
643 |
-
audio_opt, orig_sr=tgt_sr, target_sr=resample_sr
|
644 |
-
)
|
645 |
-
audio_max = np.abs(audio_opt).max() / 0.99
|
646 |
-
max_int16 = 32768
|
647 |
-
if audio_max > 1:
|
648 |
-
max_int16 /= audio_max
|
649 |
-
audio_opt = (audio_opt * max_int16).astype(np.int16)
|
650 |
-
del pitch, pitchf, sid
|
651 |
-
if torch.cuda.is_available():
|
652 |
-
torch.cuda.empty_cache()
|
653 |
-
return audio_opt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|