Commit
·
0ebaad9
1
Parent(s):
ccd43be
Update parquet files (step 58 of 397)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/0xhimzel/Detect-AI-Plagiarism/app.py +0 -32
- spaces/101-5/gpt4free/g4f/.v1/gui/query_methods.py +0 -100
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/DS DELMIA V5-6R2015 GA.md +0 -52
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (Life Is Beautiful! 1080p movie free downloadgolkes) - A masterpiece of Italian cinema based on a true story.md +0 -100
- spaces/1gistliPinn/ChatGPT4/Examples/Azov Films Moviebizz Vladik S Fun Dvd Azov Films.md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Among Us APK from Uptodown and Survive the Space Mission.md +0 -135
- spaces/1phancelerku/anime-remove-background/Download Cover Fire Offline Shooting Game for Free and Enjoy the Best Action Shooter on Mobile.md +0 -99
- spaces/2023Liu2023/bingo/src/lib/hooks/use-enter-submit.tsx +0 -23
- spaces/232labs/VToonify/vtoonify/model/encoder/criteria/id_loss.py +0 -33
- spaces/AI-Zero-to-Hero/06-SL-AI-Image-Music-Video-UI-UX-URL/README.md +0 -12
- spaces/AIConsultant/MusicGen/audiocraft/data/sound_dataset.py +0 -330
- spaces/AIGC-Audio/AudioGPT/text_to_speech/data_gen/tts/runs/train_mfa_align.py +0 -46
- spaces/AIGText/GlyphControl/ldm/modules/midas/midas/transforms.py +0 -234
- spaces/AIKey/TestStatic/index.html +0 -32
- spaces/ALR03/gradiolangchainChatbotOpenAI/app.py +0 -34
- spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/stores/webSearchParameters.ts +0 -9
- spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/selector/__init__.py +0 -10
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinput/ColorInput.js +0 -91
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateRoundRectangle.js +0 -12
- spaces/AlanMars/QYL-AI-Space/modules/webui_locale.py +0 -27
- spaces/Ali36Ahmad/magic-diffusion/share_btn.py +0 -88
- spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/configs/3millions.py +0 -23
- spaces/Alpaca233/SadTalker/src/face3d/models/facerecon_model.py +0 -220
- spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/ops/grid_sample_gradfix.py +0 -83
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/training/dreambooth.md +0 -475
- spaces/Andy1621/uniformer_image_detection/configs/empirical_attention/README.md +0 -23
- spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco.py +0 -4
- spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/base.py +0 -355
- spaces/Andy1621/uniformer_image_detection/tools/deployment/onnx2tensorrt.py +0 -179
- spaces/Andy1621/uniformer_image_segmentation/configs/_base_/datasets/cityscapes_769x769.py +0 -35
- spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes.py +0 -11
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/send_pictures/script.py +0 -58
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/img.py +0 -645
- spaces/AutoGeneralAI/voice-assistant/README_zh.md +0 -14
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE.md +0 -5
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/roi_heads/fed_loss.py +0 -31
- spaces/B2gan/LLM_Can_See/ai_functions.py +0 -45
- spaces/Benson/text-generation/Examples/Descargar Canal De Youtube Apk.md +0 -72
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/vcs/subversion.py +0 -324
- spaces/Borda90/Titanic_Esp/app.py +0 -67
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/vis/extractor.py +0 -152
- spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/async/copy.h +0 -34
- spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/copy.h +0 -59
- spaces/Caoyunkang/Segment-Any-Anomaly/SAA/modelinet.py +0 -173
- spaces/Chris4K/llms_compare/Cedie-Ang-Munting-Prinsipe-Tagalog-Version-Full-Movie-Episode-1.md +0 -60
- spaces/ChristopherMarais/Andrew_Alpha/app.py +0 -107
- spaces/CosmoAI/BhagwatGeeta/app.py +0 -196
- spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/util.py +0 -197
- spaces/DAMO-NLP-SG/Video-LLaMA/app.py +0 -259
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/_urlparse.py +0 -462
spaces/0xhimzel/Detect-AI-Plagiarism/app.py
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import gradio as gr
|
3 |
-
from transformers import pipeline
|
4 |
-
|
5 |
-
auth_token = os.environ.get("access_token")
|
6 |
-
pipeline_en = pipeline(task="text-classification", model="Hello-SimpleAI/chatgpt-detector-roberta",use_auth_token=auth_token)
|
7 |
-
|
8 |
-
|
9 |
-
def predict_en(text):
|
10 |
-
res = pipeline_en(text)[0]
|
11 |
-
label = res['label']
|
12 |
-
score = round(res['score']*100, 2)
|
13 |
-
return "%d%% chance"%score, label
|
14 |
-
|
15 |
-
|
16 |
-
with gr.Blocks() as demo:
|
17 |
-
gr.Markdown("""
|
18 |
-
# 🤖 Detect AI Plagiarism with Jurnee
|
19 |
-
Paste in the text you want to check and get a holistic score for how much of the document is written by AI. We recommend that educators take these results as one of many pieces in their assessment of student work. This model is based on Hello Simple's paper [arxiv: 2301.07597](https://arxiv.org/abs/2301.07597) and Github project [Hello-SimpleAI/chatgpt-comparison-detection](https://github.com/Hello-SimpleAI/chatgpt-comparison-detection).
|
20 |
-
""")
|
21 |
-
with gr.Tab("Try it out 👇"):
|
22 |
-
gr.Markdown("""
|
23 |
-
Note: Providing more text to the `Text` box can make the prediction more accurate!
|
24 |
-
""")
|
25 |
-
t1 = gr.Textbox(lines=5, label='Paste the text you want to check',value="There are a few things that can help protect your credit card information from being misused when you give it to a restaurant or any other business:\n\nEncryption: Many businesses use encryption to protect your credit card information when it is being transmitted or stored. This means that the information is transformed into a code that is difficult for anyone to read without the right key.")
|
26 |
-
button1 = gr.Button("👀 See results")
|
27 |
-
score1 = gr.Textbox(lines=1, label='There is a')
|
28 |
-
label1 = gr.Textbox(lines=1, label='That this text is written entirely by a')
|
29 |
-
|
30 |
-
button1.click(predict_en, inputs=[t1], outputs=[score1, label1])
|
31 |
-
|
32 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/101-5/gpt4free/g4f/.v1/gui/query_methods.py
DELETED
@@ -1,100 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import sys
|
3 |
-
from typing import Optional
|
4 |
-
|
5 |
-
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
|
6 |
-
|
7 |
-
from gpt4free import quora, forefront, theb, you
|
8 |
-
import random
|
9 |
-
|
10 |
-
|
11 |
-
def query_forefront(question: str, proxy: Optional[str] = None) -> str:
|
12 |
-
# create an account
|
13 |
-
token = forefront.Account.create(logging=False, proxy=proxy)
|
14 |
-
|
15 |
-
response = ""
|
16 |
-
# get a response
|
17 |
-
try:
|
18 |
-
return forefront.Completion.create(token=token, prompt='hello world', model='gpt-4', proxy=proxy).text
|
19 |
-
except Exception as e:
|
20 |
-
# Return error message if an exception occurs
|
21 |
-
return (
|
22 |
-
f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
|
23 |
-
)
|
24 |
-
|
25 |
-
|
26 |
-
def query_quora(question: str, proxy: Optional[str] = None) -> str:
|
27 |
-
token = quora.Account.create(logging=False, enable_bot_creation=True, proxy=proxy)
|
28 |
-
return quora.Completion.create(model='gpt-4', prompt=question, token=token, proxy=proxy).text
|
29 |
-
|
30 |
-
|
31 |
-
def query_theb(question: str, proxy: Optional[str] = None) -> str:
|
32 |
-
# Set cloudflare clearance cookie and get answer from GPT-4 model
|
33 |
-
response = ""
|
34 |
-
try:
|
35 |
-
return ''.join(theb.Completion.create(prompt=question, proxy=proxy))
|
36 |
-
|
37 |
-
except Exception as e:
|
38 |
-
# Return error message if an exception occurs
|
39 |
-
return (
|
40 |
-
f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
|
41 |
-
)
|
42 |
-
|
43 |
-
|
44 |
-
def query_you(question: str, proxy: Optional[str] = None) -> str:
|
45 |
-
# Set cloudflare clearance cookie and get answer from GPT-4 model
|
46 |
-
try:
|
47 |
-
result = you.Completion.create(prompt=question, proxy=proxy)
|
48 |
-
return result.text
|
49 |
-
|
50 |
-
except Exception as e:
|
51 |
-
# Return error message if an exception occurs
|
52 |
-
return (
|
53 |
-
f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
|
54 |
-
)
|
55 |
-
|
56 |
-
|
57 |
-
# Define a dictionary containing all query methods
|
58 |
-
avail_query_methods = {
|
59 |
-
"Forefront": query_forefront,
|
60 |
-
"Poe": query_quora,
|
61 |
-
"Theb": query_theb,
|
62 |
-
"You": query_you,
|
63 |
-
# "Writesonic": query_writesonic,
|
64 |
-
# "T3nsor": query_t3nsor,
|
65 |
-
# "Phind": query_phind,
|
66 |
-
# "Ora": query_ora,
|
67 |
-
}
|
68 |
-
|
69 |
-
|
70 |
-
def query(user_input: str, selected_method: str = "Random", proxy: Optional[str] = None) -> str:
|
71 |
-
# If a specific query method is selected (not "Random") and the method is in the dictionary, try to call it
|
72 |
-
if selected_method != "Random" and selected_method in avail_query_methods:
|
73 |
-
try:
|
74 |
-
return avail_query_methods[selected_method](user_input, proxy=proxy)
|
75 |
-
except Exception as e:
|
76 |
-
print(f"Error with {selected_method}: {e}")
|
77 |
-
return "😵 Sorry, some error occurred please try again."
|
78 |
-
|
79 |
-
# Initialize variables for determining success and storing the result
|
80 |
-
success = False
|
81 |
-
result = "😵 Sorry, some error occurred please try again."
|
82 |
-
# Create a list of available query methods
|
83 |
-
query_methods_list = list(avail_query_methods.values())
|
84 |
-
|
85 |
-
# Continue trying different methods until a successful result is obtained or all methods have been tried
|
86 |
-
while not success and query_methods_list:
|
87 |
-
# Choose a random method from the list
|
88 |
-
chosen_query = random.choice(query_methods_list)
|
89 |
-
# Find the name of the chosen method
|
90 |
-
chosen_query_name = [k for k, v in avail_query_methods.items() if v == chosen_query][0]
|
91 |
-
try:
|
92 |
-
# Try to call the chosen method with the user input
|
93 |
-
result = chosen_query(user_input, proxy=proxy)
|
94 |
-
success = True
|
95 |
-
except Exception as e:
|
96 |
-
print(f"Error with {chosen_query_name}: {e}")
|
97 |
-
# Remove the failed method from the list of available methods
|
98 |
-
query_methods_list.remove(chosen_query)
|
99 |
-
|
100 |
-
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/DS DELMIA V5-6R2015 GA.md
DELETED
@@ -1,52 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>What is DS DELMIA V5-6R2015 GA and Why You Need It</h1>
|
3 |
-
<p>DS DELMIA V5-6R2015 GA is a software solution that enables you to design, simulate, and optimize your production processes and systems. It is part of the Dassault Systemes portfolio of 3D and Product Lifecycle Management (PLM) solutions that help you create innovative products and services.</p>
|
4 |
-
<p>With DS DELMIA V5-6R2015 GA, you can:</p>
|
5 |
-
<h2>DS DELMIA V5-6R2015 GA</h2><br /><p><b><b>Download Zip</b> 🗸 <a href="https://byltly.com/2uKxxM">https://byltly.com/2uKxxM</a></b></p><br /><br />
|
6 |
-
<ul>
|
7 |
-
<li>Model and analyze your production flows and layouts with DELMIA QUEST</li>
|
8 |
-
<li>Perform ergonomic and human factors analysis with DELMIA Ergonomics</li>
|
9 |
-
<li>Plan and program your industrial robots with DELMIA Robotics</li>
|
10 |
-
<li>Define and optimize your machining operations with DELMIA V5 DPM POWERTRAIN</li>
|
11 |
-
<li>Manage and access your process data with DELMIA Process Engineer and DELMIA PPR Navigator</li>
|
12 |
-
<li>Design and validate your assembly, fastening, inspection, and planning processes with DPM Assembly Planning, DPM Fastener Planning, DPM Machining Planning, DPM Inspection Planning, and DPM Planning</li>
|
13 |
-
</ul>
|
14 |
-
<p>DS DELMIA V5-6R2015 GA is compatible with Windows 7 and Windows 8.1 operating systems. It supports both 32-bit and 64-bit architectures. It is available in multiple languages, including English, French, German, Japanese, Chinese, Russian, and more.</p>
|
15 |
-
<p>DS DELMIA V5-6R2015 GA is a powerful tool that can help you improve your productivity, quality, and profitability. It can help you reduce costs, waste, and errors. It can help you enhance your collaboration, innovation, and customer satisfaction.</p>
|
16 |
-
<p>If you want to learn more about DS DELMIA V5-6R2015 GA, you can visit the official website of Dassault Systemes or download a free trial version from their online store. You can also read some of the testimonials and reviews from other users who have benefited from this software solution.</p>
|
17 |
-
<h2>Conclusion</h2>
|
18 |
-
<p>DS DELMIA V5-6R2015 GA is a software solution that enables you to design, simulate, and optimize your production processes and systems. It is part of the Dassault Systemes portfolio of 3D and PLM solutions that help you create innovative products and services. With DS DELMIA V5-6R2015 GA, you can improve your productivity, quality, and profitability. You can reduce costs, waste, and errors. You can enhance your collaboration, innovation, and customer satisfaction.</p>
|
19 |
-
<p>If you are interested in DS DELMIA V5-6R2015 GA, you can visit the official website of Dassault Systemes or download a free trial version from their online store. You can also read some of the testimonials and reviews from other users who have benefited from this software solution.</p>
|
20 |
-
|
21 |
-
<h2>How to Use DS DELMIA V5-6R2015 GA</h2>
|
22 |
-
<p>DS DELMIA V5-6R2015 GA is easy to use and install. You can download it from the Dassault Systemes online store or request a DVD from your local reseller. You can also get a free trial version for 30 days to test its features and benefits.</p>
|
23 |
-
<p>Once you have installed DS DELMIA V5-6R2015 GA, you can launch it from your desktop or start menu. You will see a user-friendly interface that allows you to access different modules and functions. You can also customize your workspace and preferences according to your needs and preferences.</p>
|
24 |
-
<p></p>
|
25 |
-
<p>To use DS DELMIA V5-6R2015 GA, you need to create or open a project file that contains your process data and models. You can import data from other sources, such as CATIA, SolidWorks, or Excel. You can also create data from scratch using the built-in tools and wizards.</p>
|
26 |
-
<p>Once you have your project file ready, you can start designing, simulating, and optimizing your production processes and systems. You can use the various modules and functions of DS DELMIA V5-6R2015 GA to perform different tasks, such as:</p>
|
27 |
-
<ul>
|
28 |
-
<li>Creating and editing production flows and layouts with DELMIA QUEST</li>
|
29 |
-
<li>Adding and manipulating human models and performing ergonomic analysis with DELMIA Ergonomics</li>
|
30 |
-
<li>Adding and programming industrial robots and performing robot simulation with DELMIA Robotics</li>
|
31 |
-
<li>Defining and optimizing machining operations and performing machining simulation with DELMIA V5 DPM POWERTRAIN</li>
|
32 |
-
<li>Managing and accessing your process data with DELMIA Process Engineer and DELMIA PPR Navigator</li>
|
33 |
-
<li>Creating and validating assembly, fastening, inspection, and planning processes with DPM Assembly Planning, DPM Fastener Planning, DPM Machining Planning, DPM Inspection Planning, and DPM Planning</li>
|
34 |
-
</ul>
|
35 |
-
<p>You can also use DS DELMIA V5-6R2015 GA to perform various analyses and validations, such as cycle time analysis, resource utilization analysis, collision detection, reachability analysis, feasibility analysis, quality analysis, and more. You can also generate reports and documentation for your projects.</p>
|
36 |
-
<h2>Who Can Benefit from DS DELMIA V5-6R2015 GA</h2>
|
37 |
-
<p>DS DELMIA V5-6R2015 GA is a software solution that can benefit anyone who is involved in the design, simulation, and optimization of production processes and systems. It can be used by different industries, such as aerospace, automotive, consumer goods, energy, industrial equipment, life sciences, marine and offshore, transportation and mobility, and more.</p>
|
38 |
-
<p>Some of the roles that can benefit from DS DELMIA V5-6R2015 GA are:</p>
|
39 |
-
<ul>
|
40 |
-
<li>Process engineers who need to design and optimize production processes</li>
|
41 |
-
<li>Manufacturing engineers who need to simulate and validate production systems</li>
|
42 |
-
<li>Industrial engineers who need to analyze and improve production performance</li>
|
43 |
-
<li>Ergonomists who need to ensure human safety and comfort in production environments</li>
|
44 |
-
<li>Robotics engineers who need to program and control industrial robots</li>
|
45 |
-
<li>Machining engineers who need to define and optimize machining operations</li>
|
46 |
-
<li>Quality engineers who need to ensure product quality and compliance</li>
|
47 |
-
<li>Project managers who need to coordinate and monitor production projects</li>
|
48 |
-
<li>Decision makers who need to evaluate production scenarios and alternatives</li>
|
49 |
-
</ul>
|
50 |
-
<p>DS DELMIA V5-6R2015 GA can help these roles achieve their goals faster, easier, and better. It can help them reduce costs, waste, and errors. It can help them enhance collaboration, innovation, and customer satisfaction.</p> 81aa517590<br />
|
51 |
-
<br />
|
52 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (Life Is Beautiful! 1080p movie free downloadgolkes) - A masterpiece of Italian cinema based on a true story.md
DELETED
@@ -1,100 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>HD Online Player (Life Is Beautiful! 1080p movie free downloadgolkes)</h1>
|
3 |
-
<p>If you are looking for a way to watch high-quality movies online or offline, you might want to try HD online player. This is a powerful and versatile video player that lets you stream or download any video from any website in HD quality. One of the movies that you can enjoy with HD online player is Life Is Beautiful!, a classic comedy-drama film that will make you laugh and cry. In this article, we will show you how to download Life Is Beautiful! in 1080p for free using HD online player.</p>
|
4 |
-
<h2>Introduction</h2>
|
5 |
-
<p>HD online player is a software that allows you to watch videos on your device without any hassle. You can use it to stream videos from various websites, such as YouTube, Netflix, Hulu, Amazon Prime Video, and more. You can also use it to download videos from these websites and save them on your device for offline viewing. You can choose the video quality, format, and language that suit your preferences.</p>
|
6 |
-
<h2>HD Online Player (Life Is Beautiful! 1080p movie free downloadgolkes)</h2><br /><p><b><b>Download</b> ••• <a href="https://byltly.com/2uKy0T">https://byltly.com/2uKy0T</a></b></p><br /><br />
|
7 |
-
<p>One of the movies that you can watch with HD online player is Life Is Beautiful!, a 1997 Italian film directed by and starring Roberto Benigni. The film tells the story of a Jewish father who uses his imagination and humor to protect his son from the horrors of the Holocaust. The film won three Academy Awards, including Best Foreign Language Film, Best Actor, and Best Original Score. It is widely regarded as one of the best films of all time.</p>
|
8 |
-
<p>If you want to watch Life Is Beautiful! in 1080p for free, you can do so with HD online player. All you need is a device with an internet connection and some storage space. Here are the steps that you need to follow.</p>
|
9 |
-
<h2>Features of HD online player</h2>
|
10 |
-
<h3>High-quality video streaming</h3>
|
11 |
-
<p>One of the main features of HD online player is that it delivers high-quality video streaming without any buffering or lagging. You can watch videos in up to 4K resolution with crystal-clear sound and smooth playback. You can also adjust the brightness, contrast, saturation, and volume of the video according to your liking.</p>
|
12 |
-
<p>Another feature of HD online player is that it supports various video formats and subtitles. You can play videos in MP4, MKV, AVI, WMV, FLV, MOV, and more. You can also add subtitles in SRT, ASS, SSA, SUB, IDX, and more. You can change the font size, color, position, and sync of the subtitles as well.</p>
|
13 |
-
<h3>Easy and fast downloading</h3>
|
14 |
-
<p>Another feature of HD online player is that it allows you to download any video from any website with one click. You can use the built-in browser or paste the URL of the video that you want to download. You can choose the video quality, format, and language that you want. You can also select multiple videos at once and download them in batches.</p>
|
15 |
-
<p>Watch Life Is Beautiful online free HD<br />
|
16 |
-
Life Is Beautiful full movie 1080p download<br />
|
17 |
-
Life Is Beautiful comedy drama film streaming<br />
|
18 |
-
Roberto Benigni Life Is Beautiful movie<br />
|
19 |
-
Life Is Beautiful concentration camp game<br />
|
20 |
-
Life Is Beautiful 1997 Italian movie<br />
|
21 |
-
Life Is Beautiful Miramax production<br />
|
22 |
-
Life Is Beautiful Oscar-winning film<br />
|
23 |
-
Life Is Beautiful movie subtitles<br />
|
24 |
-
Life Is Beautiful movie review<br />
|
25 |
-
Life Is Beautiful movie trailer<br />
|
26 |
-
Life Is Beautiful movie cast<br />
|
27 |
-
Life Is Beautiful movie quotes<br />
|
28 |
-
Life Is Beautiful movie soundtrack<br />
|
29 |
-
Life Is Beautiful movie Netflix<br />
|
30 |
-
Life Is Beautiful movie Amazon Prime<br />
|
31 |
-
Life Is Beautiful movie Hulu<br />
|
32 |
-
Life Is Beautiful movie Disney Plus<br />
|
33 |
-
Life Is Beautiful movie HBO Max<br />
|
34 |
-
Life Is Beautiful movie YouTube<br />
|
35 |
-
How to watch Life Is Beautiful online<br />
|
36 |
-
Where to watch Life Is Beautiful online<br />
|
37 |
-
Best sites to watch Life Is Beautiful online<br />
|
38 |
-
Watch Life Is Beautiful online free no sign up<br />
|
39 |
-
Watch Life Is Beautiful online free 123movies<br />
|
40 |
-
Watch Life Is Beautiful online free Putlocker<br />
|
41 |
-
Watch Life Is Beautiful online free Fmovies<br />
|
42 |
-
Watch Life Is Beautiful online free Gomovies<br />
|
43 |
-
Watch Life Is Beautiful online free Solarmovie<br />
|
44 |
-
Watch Life Is Beautiful online free Vumoo<br />
|
45 |
-
Download Life Is Beautiful movie free HD<br />
|
46 |
-
Download Life Is Beautiful movie torrent HD<br />
|
47 |
-
Download Life Is Beautiful movie magnet link HD<br />
|
48 |
-
Download Life Is Beautiful movie YTS HD<br />
|
49 |
-
Download Life Is Beautiful movie RARBG HD<br />
|
50 |
-
Download Life Is Beautiful movie 1337x HD<br />
|
51 |
-
Download Life Is Beautiful movie EZTV HD<br />
|
52 |
-
Download Life Is Beautiful movie Limetorrents HD<br />
|
53 |
-
Download Life Is Beautiful movie Kickass Torrents HD<br />
|
54 |
-
Download Life Is Beautiful movie The Pirate Bay HD<br />
|
55 |
-
How to download Life Is Beautiful movie free HD<br />
|
56 |
-
Where to download Life Is Beautiful movie free HD<br />
|
57 |
-
Best sites to download Life Is Beautiful movie free HD<br />
|
58 |
-
Download Life Is Beautiful movie free no sign up <br />
|
59 |
-
Download Life Is Beautiful movie free 123movies <br />
|
60 |
-
Download Life Is Beautiful movie free Putlocker <br />
|
61 |
-
Download Life Is Beautiful movie free Fmovies <br />
|
62 |
-
Download Life Is Beautiful movie free Gomovies <br />
|
63 |
-
Download Life Is Beautiful movie free Solarmovie <br />
|
64 |
-
Download Life Is Beautiful movie free Vumoo</p>
|
65 |
-
<p>Another feature of HD online player is that it supports multiple downloads and resume function. You can pause and resume your downloads at any time. You can also check the progress and status of your downloads in the download manager. You can also delete or rename your downloaded files as you wish.</p>
|
66 |
-
<h3>User-friendly interface and customization</h3>
|
67 |
-
<p>Another feature of HD online player is that it has a simple and intuitive design that makes it easy to use. You can access all the functions and settings from the main menu or the toolbar. You can also swipe left or right on the screen to switch between different modes or tabs.</p>
|
68 |
-
<p>Another feature of HD online player is that it lets you adjust the settings, preferences, and appearance of the player. You can change the theme color, background image, icon size, gesture control, playback speed, screen orientation, and more. You can also enable or disable notifications, auto-play, auto-update, hardware acceleration, etc.</p>
|
69 |
-
<h2>How to watch Life Is Beautiful! in HD online player</h2>
|
70 |
-
<h3>Step 1: Download and install HD online player on your device</h3>
|
71 |
-
<p>The first step to watch Life Is Beautiful! in HD online player is to download and install the software on your device. You can find the official website and download link of HD online player here: <a href="https://hd-online-player.com/">https://hd-online-player.com/</a>. The software is compatible with Windows, Mac OS X, Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X Android OS X and iOS devices.</p>
|
72 |
-
<p>To install HD online player on your device, - For Windows users: - Download the .exe file from the website. - Run the file and follow the instructions on the screen. - Agree to the terms and conditions and click Next. - Choose a destination folder for installation and click Next. - Wait for the installation process to complete and click Finish. - For Mac users: - Download the .dmg file from the website. - Open the file and drag the icon into your Applications folder. - Double-click on the icon to launch the software. - For Android users: - Download the .apk file from the website. - Enable Unknown Sources in your device settings. - Tap on the file and install it on your device. - Open the app from your app drawer. - For iOS users: - Download the .ipa file from the website. - Connect your device to your computer via USB cable. - Open iTunes on your computer and select your device. - Drag and drop the file into your device's Apps section. - Sync your device with iTunes. </p>
|
73 |
-
<h3>Step 2: Search for Life Is Beautiful! on HD online player</h3>
|
74 |
-
<p>The second step to watch Life Is Beautiful! in HD online player is to search for the movie on the software. You can use two methods to do this: - Method 1: Use the built-in search engine. - On the main screen of HD online player, type "Life Is Beautiful!" in type "Life Is Beautiful!" in type "Life Is Beautiful!" in type "Life Is Beautiful!" in type "Life Is Beautiful!" in type "Life Is Beautiful!" in type "Life Is Beautiful!" in type "Life Is Beautiful!" in type "Life Is Beautiful!" in type "Life Is Beautiful!" in type "Life Is Beautiful!" in type "Life Is Beautiful!" in type "Life Is Beautiful!" in - Method 1: Use the built-in search engine. - On the main screen of HD online player, type "Life Is Beautiful!" in the search box and tap on the magnifying glass icon. - You will see a list of results that match your query. Tap on the one that says "Life Is Beautiful! (1997)" and has the poster of the movie. - You will see the details of the movie, such as the title, genre, rating, synopsis, cast, director, etc. You will also see two buttons: Play and Download. - Method 2: Browse the categories. - On the main screen of HD online player, swipe left or right to switch between different categories, such as Popular, Trending, Latest, Comedy, Drama, etc. - Tap on the category that you think might have Life Is Beautiful! in it. For example, you can tap on Comedy or Drama. - You will see a grid of movies that belong to that category. Scroll down or up to find Life Is Beautiful! among them. Tap on it when you see it. - You will see the same details and buttons as in Method 1. <h3>Step 3: Stream or download Life Is Beautiful! in 1080p</h3>
|
75 |
-
<p>The third and final step to watch Life Is Beautiful! in HD online player is to stream or download the movie in 1080p. You can choose either option depending on your preference and internet connection.</p>
|
76 |
-
<p>To stream Life Is Beautiful! online, - Tap on the Play button on the movie details screen. - You will see a pop-up window that asks you to choose the video quality and language. Tap on 1080p and English (or any other language that you want). - Wait for a few seconds for the video to load and start playing. You can use the controls on the bottom of the screen to pause, resume, rewind, fast-forward, adjust volume, etc. - Enjoy watching Life Is Beautiful! online with HD online player. </p>
|
77 |
-
<p>To download Life Is Beautiful! offline, - Tap on the Download button on the movie details screen. - You will see a pop-up window that asks you to choose the video quality and language. Tap on 1080p and English (or any other language that you want). - Wait for a few seconds for the download to start. You can see the progress and status of your download in the download manager. You can also pause and resume your download at any time. - Once the download is complete, you can find your downloaded file in your device's storage or in HD online player's library. You can play it anytime without an internet connection. - Enjoy watching Life Is Beautiful! offline with HD online player. </p>
|
78 |
-
<h2>Conclusion</h2>
|
79 |
-
<p>In conclusion, HD online player is a great software that lets you watch high-quality movies online or offline. You can use it to watch Life Is Beautiful!, a classic comedy-drama film that will make you laugh and cry. All you need to do is to download and install HD online player on your device, search for Life Is Beautiful! on HD online player, and stream or download it in 1080p for free.</p>
|
80 |
-
<p>If you are interested in HD online player and Life Is Beautiful!, you can download HD online player from here: <a href="https://hd-online-player.com/">https://hd-online-player.com/</a>. You can also check out other movies that are available on HD online player. You will surely find something that suits your taste and mood.</p>
|
81 |
-
<p>Thank you for reading this article. We hope you found it helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you.</p>
|
82 |
-
<h2>FAQs</h2>
|
83 |
-
<table>
|
84 |
-
<tr>
|
85 |
-
<th>Question</th>
|
86 |
-
<th>Answer</th>
|
87 |
-
</tr>
|
88 |
-
<tr>
|
89 |
-
<td>Is HD online player safe and legal?</td>
|
90 |
-
<td>Yes, HD online player is safe and legal. It does not contain any viruses or malware that can harm your device. It also does not host any pirated or illegal content on its servers. It only provides links to videos that are already available on other websites.</td>
|
91 |
-
</tr>
|
92 |
-
<tr>
|
93 |
-
<td>How much does HD online player cost?</td>
|
94 |
-
<td>HD online player is completely free to use. You do not need to pay any subscription fees or hidden charges to use it. However, you may see some ads on HD online player that help support its development and maintenance.</td>
|
95 |
-
</tr>
|
96 |
-
<tr>
|
97 |
-
<td>Can I watch other movies besides Life Is Beautiful! with HD online player?</td>
|
98 |
-
<td>Yes, you can watch other movies besides Life Is Beautiful! with HD online player. HD online player has a huge collection of movies from various genres and countries. You can find movies from Hollywood, Bollywood, Kollywood, Tollywood, etc. You can also find movies from different languages, such as English, Hindi, Tamil, Telugu, Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Malayalam Okay, I will continue writing the FAQs. - Can I watch other movies besides Life Is Beautiful! with HD online player? - Yes, you can watch other movies besides Life Is Beautiful! with HD online player. HD online player has a huge collection of movies from various genres and countries. You can find movies from Hollywood, Bollywood, Kollywood, Tollywood, etc. You can also find movies from different languages, such as English, Hindi, Tamil, Telugu, Malayalam, Kannada, Bengali, Marathi, Punjabi, Urdu, etc. - How can I contact the support team of HD online player? - If you have any issues or queries regarding HD online player, you can contact the support team of HD online player by sending an email to [email protected]. You can also visit the official website of HD online player and fill out the contact form. The support team will respond to you as soon as possible. - How can I share my feedback or suggestions for HD online player? - If you have any feedback or suggestions for HD online player, you can share them by leaving a comment on the official website of HD online player or on its social media pages. You can also rate and review HD online player on the app store or the play store. Your feedback and suggestions are valuable and appreciated. - How can I update HD online player to the latest version? - If you want to update HD online player to the latest version, you can do so by following these steps: - For Windows users: - Open HD online player on your device and click on the menu icon on the top right corner. - Click on Check for Updates and wait for a few seconds. - If there is a new version available, click on Download and Install and follow the instructions on the screen. - If there is no new version available, click on OK and enjoy using HD online player. - For Mac users: - Open HD online player on your device and click on the menu icon on the top left corner. - Click on Check for Updates and wait for a few seconds. - If there is a new version available, click on Download and Install and follow the instructions on the screen. - If there is no new version available, click on OK and enjoy using HD online player. - For Android users: - Open HD online player on your device and tap on the menu icon on the top left corner. - Tap on Settings and then tap on About. - Tap on Check for Updates and wait for a few seconds. - If there is a new version available, tap on Download and Install and follow the instructions on the screen. - If there is no new version available, tap on OK and enjoy using HD online player. - For iOS users: - Open HD online player on your device and tap on the menu icon on the bottom right corner. - Tap on Settings and then tap on About. - Tap on Check for Updates and wait for a few seconds. - If there is a new version available, tap on Download and Install and follow the instructions on the screen. - If there is no new version available, tap on OK and enjoy using HD online player. - What are some alternatives to HD online player? - If you are looking for some alternatives to HD online player, you can try these software: - VLC Media Player: This is a popular and versatile media player that can play almost any video or audio format. You can also use it to stream or download videos from various websites. It is free and open-source. - MX Player: This is a powerful and user-friendly video player that can play high-quality videos with advanced features. You can also use it to stream or download videos from various websites. It has a free version with ads and a paid version without ads. - KMPlayer: This is a lightweight and fast video player that can play various video formats with high-quality output. You can also use it to stream or download videos from various websites. It has a free version with ads and a paid version without ads. <h2></h2></p> 0a6ba089eb<br />
|
99 |
-
<br />
|
100 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Azov Films Moviebizz Vladik S Fun Dvd Azov Films.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>azov films moviebizz vladik s fun dvd azov films</h2><br /><p><b><b>Download</b> ✵✵✵ <a href="https://imgfil.com/2uxYPj">https://imgfil.com/2uxYPj</a></b></p><br /><br />
|
2 |
-
|
3 |
-
aaccfb2cb3<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Among Us APK from Uptodown and Survive the Space Mission.md
DELETED
@@ -1,135 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Among Us APK from Uptodown</h1>
|
3 |
-
<p>Among Us is one of the most popular online multiplayer games of 2020 and 2021. It is a fun and thrilling game where you have to work together with your crewmates to complete tasks and find the impostors before they kill you. But what if you want to play the game on your Android device and you can't find it on the Google Play Store? Don't worry, there is a way to download and install the game using an APK file from Uptodown, a trusted and safe platform for downloading apps and games. In this article, we will show you how to do that in a few simple steps.</p>
|
4 |
-
<h2>What is Among Us?</h2>
|
5 |
-
<p>Among Us is a game developed and published by Innersloth, an American game studio. It was released in 2018 for Android, iOS, and Windows devices, but it gained a massive surge of popularity in 2020 thanks to many Twitch streamers and YouTubers playing it. The game has also received favorable reviews from critics and players for its fun and entertaining gameplay.</p>
|
6 |
-
<h2>download among us apk uptodown</h2><br /><p><b><b>Download File</b> · <a href="https://urlin.us/2uSWaI">https://urlin.us/2uSWaI</a></b></p><br /><br />
|
7 |
-
<h3>Features of Among Us</h3>
|
8 |
-
<p>Among Us has many features that make it an exciting and addictive game. Some of them are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Customization: You can pick your color, hat, visor, skin, and pet to personalize your character.</li>
|
11 |
-
<li>Lots of game options: You can add more impostors, more tasks, different roles, and so much more to customize your game experience.</li>
|
12 |
-
<li>Different modes to choose from: You can play in classic mode or hide n seek mode, depending on your preference.</li>
|
13 |
-
<li>Four different maps to play in: You can choose between The Skeld, MIRA HQ, Polus, or The Airship, each with its own layout and challenges.</li>
|
14 |
-
<li>Quickly find a game online from the host list: You can join a public game or create a private game with your friends using a code.</li>
|
15 |
-
<li>In-game text chat: You can communicate with other players during meetings or as ghosts.</li>
|
16 |
-
<li>Rich Discord integration: You can use voice chat with your friends using Discord while playing the game.</li>
|
17 |
-
<li>Cross-platform play: You can play with other players on different devices, such as PC, console, Android, and iOS.</li>
|
18 |
-
</ul>
|
19 |
-
<h3>How to play Among Us</h3>
|
20 |
-
<p>The gameplay of Among Us is simple but engaging. Each round, you are assigned one of two roles: crewmate or impostor. The number of impostors can vary from one to three depending on the game settings. The crewmates have to work together to complete tasks around the map while avoiding being killed by the impostors. The impostors have to kill the crewmates or sabotage critical systems without being caught. The game ends when either all crewmates are dead, all tasks are completed, or all impostors are ejected.</p>
|
21 |
-
<p>The game has two phases: free roam and meeting. During free roam, you can move around the map and interact with objects. If you are a crewmate, you can do tasks that are assigned to you. If you are an impostor, you can fake tasks, vent to move quickly, or kill crewmates when no one is watching. You can also use sabotages to create chaos and distract the crewmates. Some sabotages require immediate attention, such as reactor meltdown or oxygen depletion. If they are not fixed in time, the impostors win.</p>
|
22 |
-
<p>If a dead body is found or an emergency button is pressed, a meeting is called. During a meeting, all players can discuss and vote on who they think is the impostor. You can use the text chat or voice chat to communicate with other players. You can also use evidence, such as visual tasks, admin map, or vitals, to support your claims or accusations. The player with the most votes is ejected from the game. If there is a tie, no one is ejected. The game continues until the next meeting or the end condition is met.</p>
|
23 |
-
<h2>What is Uptodown?</h2>
|
24 |
-
<p>Uptodown is a website and app store that allows you to download and install apps and games for various platforms, such as Android, Windows, Mac, Linux, iOS, and more. It was founded in 2002 and has over 4 billion downloads and 130 million monthly users worldwide. Uptodown is a safe and reliable source for downloading apps and games that are not available on the official stores or are region-locked.</p>
|
25 |
-
<h3>Benefits of using Uptodown</h3>
|
26 |
-
<p>Some of the benefits of using Uptodown are:</p>
|
27 |
-
<ul>
|
28 |
-
<li>No registration required: You can download and install apps and games without creating an account or logging in.</li>
|
29 |
-
<li>No geo-restrictions: You can access apps and games that are not available in your country or region.</li>
|
30 |
-
<li>Multiple languages supported: You can browse the website and app store in over 15 languages, including English, Spanish, French, German, Arabic, Chinese, and more.</li>
|
31 |
-
<li>Virus-free and verified: All the apps and games are scanned and checked by Uptodown's team of experts to ensure they are free of malware and viruses.</li>
|
32 |
-
<li>Version history: You can download and install previous versions of apps and games if you prefer them or if the latest version is not compatible with your device.</li>
|
33 |
-
<li>Automatic updates: You can enable automatic updates for your apps and games to keep them up to date.</li>
|
34 |
-
</ul>
|
35 |
-
<h3>How to use Uptodown</h3>
|
36 |
-
<p>To use Uptodown, you need to follow these steps:</p>
|
37 |
-
<ol>
|
38 |
-
<li>Visit the Uptodown website or download the Uptodown app on your device.</li>
|
39 |
-
<li>Search for the app or game you want to download using the search bar or browse by categories.</li>
|
40 |
-
<li>Select the app or game you want to download and click on the download button.</li>
|
41 |
-
<li>Wait for the download to finish and open the file to install it on your device.</li>
|
42 |
-
<li>Enjoy your app or game!</li>
|
43 |
-
</ol>
|
44 |
-
<h2>How to download and install Among Us APK from Uptodown</h2>
|
45 |
-
<p>Now that you know what Among Us and Uptodown are, let's see how you can download and install Among Us APK from Uptodown on your Android device. It's very easy and only takes a few minutes. Here are the steps you need to follow:</p>
|
46 |
-
<p>How to download among us apk from uptodown<br />
|
47 |
-
Download among us apk uptodown latest version<br />
|
48 |
-
Download among us apk uptodown for android<br />
|
49 |
-
Download among us apk uptodown mod menu<br />
|
50 |
-
Download among us apk uptodown hack<br />
|
51 |
-
Download among us apk uptodown free<br />
|
52 |
-
Download among us apk uptodown online<br />
|
53 |
-
Download among us apk uptodown pc<br />
|
54 |
-
Download among us apk uptodown ios<br />
|
55 |
-
Download among us apk uptodown 2023<br />
|
56 |
-
Download among us apk uptodown update<br />
|
57 |
-
Download among us apk uptodown unlocked<br />
|
58 |
-
Download among us apk uptodown no ads<br />
|
59 |
-
Download among us apk uptodown offline<br />
|
60 |
-
Download among us apk uptodown safe<br />
|
61 |
-
Download among us apk uptodown pro<br />
|
62 |
-
Download among us apk uptodown premium<br />
|
63 |
-
Download among us apk uptodown cracked<br />
|
64 |
-
Download among us apk uptodown full<br />
|
65 |
-
Download among us apk uptodown beta<br />
|
66 |
-
Download among us apk uptodown new<br />
|
67 |
-
Download among us apk uptodown old<br />
|
68 |
-
Download among us apk uptodown original<br />
|
69 |
-
Download among us apk uptodown review<br />
|
70 |
-
Download among us apk uptodown tutorial<br />
|
71 |
-
Download among us apk uptodown guide<br />
|
72 |
-
Download among us apk uptodown tips<br />
|
73 |
-
Download among us apk uptodown tricks<br />
|
74 |
-
Download among us apk uptodown cheats<br />
|
75 |
-
Download among us apk uptodown best<br />
|
76 |
-
Download among us apk uptodown alternative<br />
|
77 |
-
Download among us apk uptodown mirror<br />
|
78 |
-
Download among us apk uptodown link<br />
|
79 |
-
Download among us apk uptodown file<br />
|
80 |
-
Download among us apk uptodown site<br />
|
81 |
-
Download among us apk uptodown app<br />
|
82 |
-
Download among us apk uptodown game<br />
|
83 |
-
Download among us apk uptodown fun<br />
|
84 |
-
Download among us apk uptodown action<br />
|
85 |
-
Download among us apk uptodown intrigue<br />
|
86 |
-
Download among us apk uptodown crewmate<br />
|
87 |
-
Download among us apk uptodown imposter<br />
|
88 |
-
Download among us apk uptodown spaceship<br />
|
89 |
-
Download among us apk uptodown units<br />
|
90 |
-
Download among us apk uptodown tasks<br />
|
91 |
-
Download among us apk uptodown meetings<br />
|
92 |
-
Download among us apk uptodown votes<br />
|
93 |
-
Download among us apk uptodown chat<br />
|
94 |
-
Download among us apk uptodown skins</p>
|
95 |
-
<h3>Step 1: Enable unknown sources on your Android device</h3>
|
96 |
-
<p>Before you can install an APK file from Uptodown, you need to enable unknown sources on your Android device. This will allow you to install apps and games from sources other than the Google Play Store. To do this, follow these steps:</p>
|
97 |
-
<ol>
|
98 |
-
<li>Go to your device's settings and tap on security or privacy.</li>
|
99 |
-
<li>Find the option that says unknown sources or install unknown apps and toggle it on.</li>
|
100 |
-
<li>A warning message will pop up. Tap on OK to confirm.</li>
|
101 |
-
</ol>
|
102 |
-
<h3>Step 2: Download the APK file from Uptodown</h3>
|
103 |
-
<p>The next step is to download the APK file of Among Us from Uptodown. To do this, follow these steps:</p>
|
104 |
-
<ol>
|
105 |
-
<li>Open your browser and go to <a href="">https://among-us.en.uptodown.com/android</a>.</li>
|
106 |
-
<li>Tap on the green download button at the top of the page.</li>
|
107 |
-
<li>A new page will open with a QR code. Scan it with your device's camera or tap on the link below it to start the download.</li>
|
108 |
-
<li>The APK file will be downloaded to your device's storage. You can check its progress in your notification bar or in your downloads folder.</li>
|
109 |
-
</ol>
|
110 |
-
<h3>Step 3: Install the APK file on your device</h3>
|
111 |
-
<p>The final step is to install the APK file of Among Us on your device. To do this, follow these steps:</p>
|
112 |
-
<ol>
|
113 |
-
<li>Locate the APK file in your downloads folder or notification bar and tap on it.</li>
|
114 |
-
<li>A prompt will appear asking you if you want to install this application. Tap on install.</li>
|
115 |
-
<li>The installation process will begin. Wait for it to finish. It may take a few seconds or minutes depending on your device and internet speed.</li>
|
116 |
-
<li>Once the installation is complete, tap on open to launch the game or tap on done to exit the installer.</li>
|
117 |
-
</ol>
|
118 |
-
<h3>Step 4: Launch and enjoy the game</h3>
|
119 |
-
<p>Congratulations! You have successfully downloaded and installed Among Us APK from Uptodown on your Android device. Now you can launch the game and enjoy playing it with your friends or strangers online. You can also customize your settings, join or create a game, and chat with other players. Have fun and be careful of the impostors!</p>
|
120 |
-
<h2>Conclusion</h2>
|
121 |
-
<p>In this article, we have shown you how to download and install Among Us APK from Uptodown on your Android device. We hope you found this guide helpful and easy to follow. Uptodown is a great platform for downloading apps and games that are not available on the Google Play Store or are region-locked. Among Us is a fun and thrilling game that you can play with your friends or strangers online. It is a game of deception, teamwork, and betrayal that will keep you hooked for hours. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading!</p>
|
122 |
-
<h2>FAQs</h2>
|
123 |
-
<p>Here are some frequently asked questions about downloading and installing Among Us APK from Uptodown:</p>
|
124 |
-
<h4>Is it safe to download and install Among Us APK from Uptodown?</h4>
|
125 |
-
<p>Yes, it is safe to download and install Among Us APK from Uptodown. Uptodown is a trusted and reliable source for downloading apps and games that are free of malware and viruses. However, you should always be careful when downloading and installing any APK file from unknown sources, as they may contain harmful or malicious code. Always scan the file with an antivirus app before opening it.</p>
|
126 |
-
<h4>Is it legal to download and install Among Us APK from Uptodown?</h4>
|
127 |
-
<p>Yes, it is legal to download and install Among Us APK from Uptodown. Uptodown does not host any pirated or cracked apps or games on its platform. All the apps and games are original and belong to their respective developers and publishers. However, you should always respect the intellectual property rights of the creators and follow their terms of service.</p>
|
128 |
-
<h4>Will I get banned for playing Among Us with an APK file from Uptodown?</h4>
|
129 |
-
<p>No, you will not get banned for playing Among Us with an APK file from Uptodown. The game does not have any anti-cheat system or mechanism that detects or prevents players from using APK files from other sources. However, you should always play fair and follow the rules of the game. Do not use any cheats, hacks, mods, or exploits that may give you an unfair advantage or ruin the game experience for others.</p>
|
130 |
-
<h4>Can I play Among Us with players who have downloaded the game from the Google Play Store?</h4>
|
131 |
-
<p>Yes, you can play Among Us with players who have downloaded the game from the Google Play Store. The game supports cross-platform play between different devices and platforms, such as PC, console, Android, and iOS. As long as you have the same version of the game as the other players, you can join or create a game with them using a code.</p>
|
132 |
-
<h4>Can I update Among Us APK from Uptodown?</h4>
|
133 |
-
<p>Yes, you can update Among Us APK from Uptodown. You can either enable automatic updates for your apps and games in the Uptodown app settings or manually check for updates on the website or app store. When a new version of the game is available, you can download and install it over the existing one without losing your data or progress.</p> 197e85843d<br />
|
134 |
-
<br />
|
135 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Cover Fire Offline Shooting Game for Free and Enjoy the Best Action Shooter on Mobile.md
DELETED
@@ -1,99 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download Cover Fire: Offline Shooting Game - A Review</h1>
|
3 |
-
<p>If you are looking for a fun and addictive shooting game that you can play on your mobile device, you might want to check out Cover Fire: Offline Shooting Game. This game is one of the best shooting games you’ll ever play on a mobile, now for free and offline. In this article, we will review Cover Fire and tell you what it is, what features it has, how to download it, and what are the pros and cons of playing it.</p>
|
4 |
-
<h2>What is Cover Fire?</h2>
|
5 |
-
<p>Cover Fire is a shooting game developed by Viva Games Studios and published by 1MB. It is available for Android and Windows devices. The game has a challenging story mode, where you join the resistance and command a squad of veterans through sieged cities, deserts, and fields taken by guerrillas. You have to defeat all kinds of enemies in the war with the biggest graphic, greatest arsenal, and the best offline gameplay.</p>
|
6 |
-
<h2>download cover fire offline shooting game</h2><br /><p><b><b>Download</b> ⇔ <a href="https://jinyurl.com/2uNNID">https://jinyurl.com/2uNNID</a></b></p><br /><br />
|
7 |
-
<p>Cover Fire also has an online mode, where you can compete and fight against other players or friends around the world with your best times ranked in online leaderboards. You can also participate in cool war events, such as Zombies Survival or Black Ops.</p>
|
8 |
-
<h3>Features of Cover Fire</h3>
|
9 |
-
<h4>Shooting online and offline on mobile</h4>
|
10 |
-
<p>Cover Fire allows you to play offline in a single-player campaign, where you can enjoy 12 new chapters in a thrilling story mode. You can also play online in a competitive sniper shooting battle and don't stop shooting in cool war events.</p>
|
11 |
-
<h4>New shooting game and best sniper 3d shooting game</h4>
|
12 |
-
<p>Cover Fire has a realistic 3D graphics and a variety of weapons to choose from. You can unlock unique army weapons and shoot cool guns, such as pistols, shotguns, snipers, bazookas, and more. You can also customize and upgrade your best guns skills to increase arsenal damage in the war zone.</p>
|
13 |
-
<h4>Easy controls and low mobile requirements</h4>
|
14 |
-
<p>Cover Fire has easy controls that bring you a fun and addictive combat. You can shoot to kill and save victims with simple gestures. The game also has low mobile requirements, so you don't need a wifi to play or download the game.</p>
|
15 |
-
<h4>Online sniper tournaments and events</h4>
|
16 |
-
<p>Cover Fire has an online mode where you can compete and fight against other players or friends around the world with your best times ranked in online leaderboards. You can also join the online sniper tournaments and show your skills as a shooter. Moreover, you can try the free zombie event, where you have to survive with a gun against zombies and save the survivors.</p>
|
17 |
-
<h2>How to download Cover Fire?</h2>
|
18 |
-
<p>Cover Fire is available for Android and Windows devices. You can download it from different sources, depending on your device and preference.</p>
|
19 |
-
<p>How to download cover fire offline shooting game for free<br />
|
20 |
-
Cover fire offline shooting game apk download<br />
|
21 |
-
Cover fire offline shooting game mod apk unlimited money<br />
|
22 |
-
Cover fire offline shooting game for pc download<br />
|
23 |
-
Best offline shooting game cover fire download<br />
|
24 |
-
Download cover fire offline shooting game latest version<br />
|
25 |
-
Cover fire offline shooting game hack download<br />
|
26 |
-
Cover fire offline shooting game cheats and tips<br />
|
27 |
-
Download cover fire offline shooting game for android<br />
|
28 |
-
Cover fire offline shooting game review and rating<br />
|
29 |
-
Download cover fire offline shooting game for ios<br />
|
30 |
-
Cover fire offline shooting game gameplay and features<br />
|
31 |
-
Download cover fire offline shooting game for windows 10<br />
|
32 |
-
Cover fire offline shooting game online multiplayer mode<br />
|
33 |
-
Download cover fire offline shooting game for mac<br />
|
34 |
-
Cover fire offline shooting game weapons and upgrades<br />
|
35 |
-
Download cover fire offline shooting game for laptop<br />
|
36 |
-
Cover fire offline shooting game missions and challenges<br />
|
37 |
-
Download cover fire offline shooting game for chromebook<br />
|
38 |
-
Cover fire offline shooting game zombies survival mode<br />
|
39 |
-
Download cover fire offline shooting game for linux<br />
|
40 |
-
Cover fire offline shooting game graphics and sound effects<br />
|
41 |
-
Download cover fire offline shooting game for kindle fire<br />
|
42 |
-
Cover fire offline shooting game steam version download<br />
|
43 |
-
Download cover fire offline shooting game for bluestacks<br />
|
44 |
-
Cover fire offline shooting game best sniper rifle<br />
|
45 |
-
Download cover fire offline shooting game for nox player<br />
|
46 |
-
Cover fire offline shooting game black ops mode<br />
|
47 |
-
Download cover fire offline shooting game for memu play<br />
|
48 |
-
Cover fire offline shooting game hero shooter mode<br />
|
49 |
-
Download cover fire offline shooting game for ldplayer<br />
|
50 |
-
Cover fire offline shooting game on-rails shooter mode<br />
|
51 |
-
Download cover fire offline shooting game for gameloop<br />
|
52 |
-
Cover fire offline shooting game pve shooter mode<br />
|
53 |
-
Download cover fire offline shooting game for smartgaga<br />
|
54 |
-
Cover fire offline shooting game realistic 3d graphics download<br />
|
55 |
-
Download cover fire offline shooting game for genymotion<br />
|
56 |
-
Cover fire offline shooting game easy controls download<br />
|
57 |
-
Download cover fire offline shooting game for koplayer<br />
|
58 |
-
Cover fire offline shooting game fun and addictive gameplay download</p>
|
59 |
-
<h3>Download from Google Play Store</h3>
|
60 |
-
<p>If you have an Android device, you can download Cover Fire from the Google Play Store for free. Just search for "Cover Fire: Offline Shooting" on the store or click on this link. You will need about 400 MB of free space on your device to install the game.</p>
|
61 |
-
<h3>Download from Steam</h3>
|
62 |
-
<p>If you have a Windows device, you can download Cover Fire from Steam for free. Just search for "Cover Fire: Offline Shooting Game" on Steam or click on this link. You will need about 1 GB of free space on your device to install the game.</p>
|
63 |
-
<h3>Download from APKCombo</h3>
|
64 |
-
<p>If you want to download Cover Fire from an alternative source, you can use APKCombo. This is a website that provides APK files for Android apps. You can download Cover Fire APK from APKCombo for free. Just search for "Cover Fire: Offline Shooting Game" on APKCombo or click on this link. You will need to enable unknown sources on your device settings to install the APK file.</p>
|
65 |
-
<h2>Pros and cons of Cover Fire</h2>
|
66 |
-
<p>Cover Fire is a great shooting game that offers a lot of fun and action. However, like any other game, it also has some pros and cons that you should consider before playing it. Here are some of them:</p>
|
67 |
-
<h3>Pros</h3>
|
68 |
-
<ul>
|
69 |
-
<li>Cover Fire has a thrilling and immersive story mode that will keep you hooked for hours.</li>
|
70 |
-
<li>Cover Fire has a realistic and stunning 3D graphics that will make you feel like you are in a real war zone.</li>
|
71 |
-
<li>Cover Fire has a variety of weapons and customization options that will let you create your own style and strategy.</li>
|
72 |
-
<li>Cover Fire has an online mode where you can challenge and compete with other players around the world and join cool events.</li>
|
73 |
-
<li>Cover Fire has easy controls and low mobile requirements that make it accessible and enjoyable for everyone.</li>
|
74 |
-
</ul>
|
75 |
-
<h3>Cons</h3>
|
76 |
-
<ul>
|
77 |
-
<li>Cover Fire can be repetitive and boring after a while, especially if you play the same missions over and over again.</li>
|
78 |
-
<li>Cover Fire can be frustrating and difficult at times, especially if you face enemies with higher levels and better weapons.</li>
|
79 |
-
<li>Cover Fire can be annoying and intrusive with its ads and pop-ups that can interrupt your gameplay.</li>
|
80 |
-
<li>Cover Fire can be expensive and unfair with its in-app purchases and premium features that can give you an advantage over other players.</li>
|
81 |
-
<li>Cover Fire can be buggy and glitchy at times, especially if you have a slow or unstable internet connection.</li>
|
82 |
-
</ul>
|
83 |
-
<h2>Conclusion</h2>
|
84 |
-
<p>Cover Fire is a shooting game that you can play on your mobile device, either online or offline. It has a captivating story mode, a competitive online mode, a realistic 3D graphics, a variety of weapons, and easy controls. However, it also has some drawbacks, such as being repetitive, frustrating, annoying, expensive, and buggy. Overall, Cover Fire is a game that you should try if you love shooting games and want to have some fun and action on your mobile device. You can download it for free from different sources, depending on your device and preference.</p>
|
85 |
-
<h2>FAQs</h2>
|
86 |
-
<p>Here are some frequently asked questions about Cover Fire:</p>
|
87 |
-
<ol>
|
88 |
-
<li>How do I play Cover Fire offline?</li>
|
89 |
-
<p>To play Cover Fire offline, you need to download the game first from your preferred source. Then, you need to open the game and select the offline mode. You can choose from different missions and chapters in the story mode. You don't need an internet connection to play offline, but you won't be able to access the online features or update the game.</p>
|
90 |
-
<li>How do I get more coins and gold in Cover Fire?</li>
|
91 |
-
<p>To get more coins and gold in Cover Fire, you need to complete missions and challenges in the game. You can also watch ads or videos to earn some extra rewards. Alternatively, you can buy coins and gold with real money through in-app purchases. However, this is not recommended as it can be costly and unfair.</p>
|
92 |
-
<li>How do I upgrade my weapons in Cover Fire?</li>
|
93 |
-
<p>To upgrade your weapons in Cover Fire, you need to go to the arsenal menu and select the weapon you want to upgrade. You can upgrade different aspects of your weapon, such as damage, accuracy, reload speed, magazine size, etc. You will need coins or gold to upgrade your weapons, depending on the level of upgrade.</p>
|
94 |
-
<li>How do I change my character in Cover Fire?</li>
|
95 |
-
<p>To change your character in Cover Fire, you need to go to the squad menu and select the character you want to use. You can choose from different characters with different skills and abilities. You can also customize your character's appearance with different outfits and accessories. You will need coins or gold to unlock new characters or items.</p>
|
96 |
-
<li>How do I join the online mode in Cover Fire?</li>
|
97 |
-
<p>To join the online mode in Cover Fire, you need to have an internet connection and an account in the game. You can create an account with your email or Facebook login. Then, you need to go to the online mode menu and select the option you want to play. You can choose from sniper tournaments or events. You will be matched with other players based on your rank and skill level.</p> 401be4b1e0<br />
|
98 |
-
<br />
|
99 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2023Liu2023/bingo/src/lib/hooks/use-enter-submit.tsx
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
import { useRef, type RefObject } from 'react'
|
2 |
-
|
3 |
-
export function useEnterSubmit(): {
|
4 |
-
formRef: RefObject<HTMLFormElement>
|
5 |
-
onKeyDown: (event: React.KeyboardEvent<HTMLTextAreaElement>) => void
|
6 |
-
} {
|
7 |
-
const formRef = useRef<HTMLFormElement>(null)
|
8 |
-
|
9 |
-
const handleKeyDown = (
|
10 |
-
event: React.KeyboardEvent<HTMLTextAreaElement>
|
11 |
-
): void => {
|
12 |
-
if (
|
13 |
-
event.key === 'Enter' &&
|
14 |
-
!event.shiftKey &&
|
15 |
-
!event.nativeEvent.isComposing
|
16 |
-
) {
|
17 |
-
formRef.current?.requestSubmit()
|
18 |
-
event.preventDefault()
|
19 |
-
}
|
20 |
-
}
|
21 |
-
|
22 |
-
return { formRef, onKeyDown: handleKeyDown }
|
23 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/232labs/VToonify/vtoonify/model/encoder/criteria/id_loss.py
DELETED
@@ -1,33 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch import nn
|
3 |
-
from model.encoder.encoders.model_irse import Backbone
|
4 |
-
|
5 |
-
|
6 |
-
class IDLoss(nn.Module):
|
7 |
-
def __init__(self, model_paths):
|
8 |
-
super(IDLoss, self).__init__()
|
9 |
-
print('Loading ResNet ArcFace')
|
10 |
-
self.facenet = Backbone(input_size=112, num_layers=50, drop_ratio=0.6, mode='ir_se')
|
11 |
-
self.facenet.load_state_dict(torch.load(model_paths))
|
12 |
-
self.face_pool = torch.nn.AdaptiveAvgPool2d((112, 112))
|
13 |
-
self.facenet.eval()
|
14 |
-
|
15 |
-
def extract_feats(self, x):
|
16 |
-
x = x[:, :, 35:223, 32:220] # Crop interesting region
|
17 |
-
x = self.face_pool(x)
|
18 |
-
x_feats = self.facenet(x)
|
19 |
-
return x_feats
|
20 |
-
|
21 |
-
def forward(self, y_hat, y):
|
22 |
-
n_samples = y_hat.shape[0]
|
23 |
-
y_feats = self.extract_feats(y) # Otherwise use the feature from there
|
24 |
-
y_hat_feats = self.extract_feats(y_hat)
|
25 |
-
y_feats = y_feats.detach()
|
26 |
-
loss = 0
|
27 |
-
count = 0
|
28 |
-
for i in range(n_samples):
|
29 |
-
diff_target = y_hat_feats[i].dot(y_feats[i])
|
30 |
-
loss += 1 - diff_target
|
31 |
-
count += 1
|
32 |
-
|
33 |
-
return loss / count
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Zero-to-Hero/06-SL-AI-Image-Music-Video-UI-UX-URL/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: 06 SL AI Image Music Video UI UX URL
|
3 |
-
emoji: 📊
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: red
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.10.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/data/sound_dataset.py
DELETED
@@ -1,330 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
"""Dataset of audio with a simple description.
|
7 |
-
"""
|
8 |
-
|
9 |
-
from dataclasses import dataclass, fields, replace
|
10 |
-
import json
|
11 |
-
from pathlib import Path
|
12 |
-
import random
|
13 |
-
import typing as tp
|
14 |
-
|
15 |
-
import numpy as np
|
16 |
-
import torch
|
17 |
-
|
18 |
-
from .info_audio_dataset import (
|
19 |
-
InfoAudioDataset,
|
20 |
-
get_keyword_or_keyword_list
|
21 |
-
)
|
22 |
-
from ..modules.conditioners import (
|
23 |
-
ConditioningAttributes,
|
24 |
-
SegmentWithAttributes,
|
25 |
-
WavCondition,
|
26 |
-
)
|
27 |
-
|
28 |
-
|
29 |
-
EPS = torch.finfo(torch.float32).eps
|
30 |
-
TARGET_LEVEL_LOWER = -35
|
31 |
-
TARGET_LEVEL_UPPER = -15
|
32 |
-
|
33 |
-
|
34 |
-
@dataclass
|
35 |
-
class SoundInfo(SegmentWithAttributes):
|
36 |
-
"""Segment info augmented with Sound metadata.
|
37 |
-
"""
|
38 |
-
description: tp.Optional[str] = None
|
39 |
-
self_wav: tp.Optional[torch.Tensor] = None
|
40 |
-
|
41 |
-
@property
|
42 |
-
def has_sound_meta(self) -> bool:
|
43 |
-
return self.description is not None
|
44 |
-
|
45 |
-
def to_condition_attributes(self) -> ConditioningAttributes:
|
46 |
-
out = ConditioningAttributes()
|
47 |
-
|
48 |
-
for _field in fields(self):
|
49 |
-
key, value = _field.name, getattr(self, _field.name)
|
50 |
-
if key == 'self_wav':
|
51 |
-
out.wav[key] = value
|
52 |
-
else:
|
53 |
-
out.text[key] = value
|
54 |
-
return out
|
55 |
-
|
56 |
-
@staticmethod
|
57 |
-
def attribute_getter(attribute):
|
58 |
-
if attribute == 'description':
|
59 |
-
preprocess_func = get_keyword_or_keyword_list
|
60 |
-
else:
|
61 |
-
preprocess_func = None
|
62 |
-
return preprocess_func
|
63 |
-
|
64 |
-
@classmethod
|
65 |
-
def from_dict(cls, dictionary: dict, fields_required: bool = False):
|
66 |
-
_dictionary: tp.Dict[str, tp.Any] = {}
|
67 |
-
|
68 |
-
# allow a subset of attributes to not be loaded from the dictionary
|
69 |
-
# these attributes may be populated later
|
70 |
-
post_init_attributes = ['self_wav']
|
71 |
-
|
72 |
-
for _field in fields(cls):
|
73 |
-
if _field.name in post_init_attributes:
|
74 |
-
continue
|
75 |
-
elif _field.name not in dictionary:
|
76 |
-
if fields_required:
|
77 |
-
raise KeyError(f"Unexpected missing key: {_field.name}")
|
78 |
-
else:
|
79 |
-
preprocess_func: tp.Optional[tp.Callable] = cls.attribute_getter(_field.name)
|
80 |
-
value = dictionary[_field.name]
|
81 |
-
if preprocess_func:
|
82 |
-
value = preprocess_func(value)
|
83 |
-
_dictionary[_field.name] = value
|
84 |
-
return cls(**_dictionary)
|
85 |
-
|
86 |
-
|
87 |
-
class SoundDataset(InfoAudioDataset):
|
88 |
-
"""Sound audio dataset: Audio dataset with environmental sound-specific metadata.
|
89 |
-
|
90 |
-
Args:
|
91 |
-
info_fields_required (bool): Whether all the mandatory metadata fields should be in the loaded metadata.
|
92 |
-
external_metadata_source (tp.Optional[str]): Folder containing JSON metadata for the corresponding dataset.
|
93 |
-
The metadata files contained in this folder are expected to match the stem of the audio file with
|
94 |
-
a json extension.
|
95 |
-
aug_p (float): Probability of performing audio mixing augmentation on the batch.
|
96 |
-
mix_p (float): Proportion of batch items that are mixed together when applying audio mixing augmentation.
|
97 |
-
mix_snr_low (int): Lowerbound for SNR value sampled for mixing augmentation.
|
98 |
-
mix_snr_high (int): Upperbound for SNR value sampled for mixing augmentation.
|
99 |
-
mix_min_overlap (float): Minimum overlap between audio files when performing mixing augmentation.
|
100 |
-
kwargs: Additional arguments for AudioDataset.
|
101 |
-
|
102 |
-
See `audiocraft.data.info_audio_dataset.InfoAudioDataset` for full initialization arguments.
|
103 |
-
"""
|
104 |
-
def __init__(
|
105 |
-
self,
|
106 |
-
*args,
|
107 |
-
info_fields_required: bool = True,
|
108 |
-
external_metadata_source: tp.Optional[str] = None,
|
109 |
-
aug_p: float = 0.,
|
110 |
-
mix_p: float = 0.,
|
111 |
-
mix_snr_low: int = -5,
|
112 |
-
mix_snr_high: int = 5,
|
113 |
-
mix_min_overlap: float = 0.5,
|
114 |
-
**kwargs
|
115 |
-
):
|
116 |
-
kwargs['return_info'] = True # We require the info for each song of the dataset.
|
117 |
-
super().__init__(*args, **kwargs)
|
118 |
-
self.info_fields_required = info_fields_required
|
119 |
-
self.external_metadata_source = external_metadata_source
|
120 |
-
self.aug_p = aug_p
|
121 |
-
self.mix_p = mix_p
|
122 |
-
if self.aug_p > 0:
|
123 |
-
assert self.mix_p > 0, "Expecting some mixing proportion mix_p if aug_p > 0"
|
124 |
-
assert self.channels == 1, "SoundDataset with audio mixing considers only monophonic audio"
|
125 |
-
self.mix_snr_low = mix_snr_low
|
126 |
-
self.mix_snr_high = mix_snr_high
|
127 |
-
self.mix_min_overlap = mix_min_overlap
|
128 |
-
|
129 |
-
def _get_info_path(self, path: tp.Union[str, Path]) -> Path:
|
130 |
-
"""Get path of JSON with metadata (description, etc.).
|
131 |
-
If there exists a JSON with the same name as 'path.name', then it will be used.
|
132 |
-
Else, such JSON will be searched for in an external json source folder if it exists.
|
133 |
-
"""
|
134 |
-
info_path = Path(path).with_suffix('.json')
|
135 |
-
if Path(info_path).exists():
|
136 |
-
return info_path
|
137 |
-
elif self.external_metadata_source and (Path(self.external_metadata_source) / info_path.name).exists():
|
138 |
-
return Path(self.external_metadata_source) / info_path.name
|
139 |
-
else:
|
140 |
-
raise Exception(f"Unable to find a metadata JSON for path: {path}")
|
141 |
-
|
142 |
-
def __getitem__(self, index):
|
143 |
-
wav, info = super().__getitem__(index)
|
144 |
-
info_data = info.to_dict()
|
145 |
-
info_path = self._get_info_path(info.meta.path)
|
146 |
-
if Path(info_path).exists():
|
147 |
-
with open(info_path, 'r') as json_file:
|
148 |
-
sound_data = json.load(json_file)
|
149 |
-
sound_data.update(info_data)
|
150 |
-
sound_info = SoundInfo.from_dict(sound_data, fields_required=self.info_fields_required)
|
151 |
-
# if there are multiple descriptions, sample one randomly
|
152 |
-
if isinstance(sound_info.description, list):
|
153 |
-
sound_info.description = random.choice(sound_info.description)
|
154 |
-
else:
|
155 |
-
sound_info = SoundInfo.from_dict(info_data, fields_required=False)
|
156 |
-
|
157 |
-
sound_info.self_wav = WavCondition(
|
158 |
-
wav=wav[None], length=torch.tensor([info.n_frames]),
|
159 |
-
sample_rate=[sound_info.sample_rate], path=[info.meta.path], seek_time=[info.seek_time])
|
160 |
-
|
161 |
-
return wav, sound_info
|
162 |
-
|
163 |
-
def collater(self, samples):
|
164 |
-
# when training, audio mixing is performed in the collate function
|
165 |
-
wav, sound_info = super().collater(samples) # SoundDataset always returns infos
|
166 |
-
if self.aug_p > 0:
|
167 |
-
wav, sound_info = mix_samples(wav, sound_info, self.aug_p, self.mix_p,
|
168 |
-
snr_low=self.mix_snr_low, snr_high=self.mix_snr_high,
|
169 |
-
min_overlap=self.mix_min_overlap)
|
170 |
-
return wav, sound_info
|
171 |
-
|
172 |
-
|
173 |
-
def rms_f(x: torch.Tensor) -> torch.Tensor:
|
174 |
-
return (x ** 2).mean(1).pow(0.5)
|
175 |
-
|
176 |
-
|
177 |
-
def normalize(audio: torch.Tensor, target_level: int = -25) -> torch.Tensor:
|
178 |
-
"""Normalize the signal to the target level."""
|
179 |
-
rms = rms_f(audio)
|
180 |
-
scalar = 10 ** (target_level / 20) / (rms + EPS)
|
181 |
-
audio = audio * scalar.unsqueeze(1)
|
182 |
-
return audio
|
183 |
-
|
184 |
-
|
185 |
-
def is_clipped(audio: torch.Tensor, clipping_threshold: float = 0.99) -> torch.Tensor:
|
186 |
-
return (abs(audio) > clipping_threshold).any(1)
|
187 |
-
|
188 |
-
|
189 |
-
def mix_pair(src: torch.Tensor, dst: torch.Tensor, min_overlap: float) -> torch.Tensor:
|
190 |
-
start = random.randint(0, int(src.shape[1] * (1 - min_overlap)))
|
191 |
-
remainder = src.shape[1] - start
|
192 |
-
if dst.shape[1] > remainder:
|
193 |
-
src[:, start:] = src[:, start:] + dst[:, :remainder]
|
194 |
-
else:
|
195 |
-
src[:, start:start+dst.shape[1]] = src[:, start:start+dst.shape[1]] + dst
|
196 |
-
return src
|
197 |
-
|
198 |
-
|
199 |
-
def snr_mixer(clean: torch.Tensor, noise: torch.Tensor, snr: int, min_overlap: float,
|
200 |
-
target_level: int = -25, clipping_threshold: float = 0.99) -> torch.Tensor:
|
201 |
-
"""Function to mix clean speech and noise at various SNR levels.
|
202 |
-
|
203 |
-
Args:
|
204 |
-
clean (torch.Tensor): Clean audio source to mix, of shape [B, T].
|
205 |
-
noise (torch.Tensor): Noise audio source to mix, of shape [B, T].
|
206 |
-
snr (int): SNR level when mixing.
|
207 |
-
min_overlap (float): Minimum overlap between the two mixed sources.
|
208 |
-
target_level (int): Gain level in dB.
|
209 |
-
clipping_threshold (float): Threshold for clipping the audio.
|
210 |
-
Returns:
|
211 |
-
torch.Tensor: The mixed audio, of shape [B, T].
|
212 |
-
"""
|
213 |
-
if clean.shape[1] > noise.shape[1]:
|
214 |
-
noise = torch.nn.functional.pad(noise, (0, clean.shape[1] - noise.shape[1]))
|
215 |
-
else:
|
216 |
-
noise = noise[:, :clean.shape[1]]
|
217 |
-
|
218 |
-
# normalizing to -25 dB FS
|
219 |
-
clean = clean / (clean.max(1)[0].abs().unsqueeze(1) + EPS)
|
220 |
-
clean = normalize(clean, target_level)
|
221 |
-
rmsclean = rms_f(clean)
|
222 |
-
|
223 |
-
noise = noise / (noise.max(1)[0].abs().unsqueeze(1) + EPS)
|
224 |
-
noise = normalize(noise, target_level)
|
225 |
-
rmsnoise = rms_f(noise)
|
226 |
-
|
227 |
-
# set the noise level for a given SNR
|
228 |
-
noisescalar = (rmsclean / (10 ** (snr / 20)) / (rmsnoise + EPS)).unsqueeze(1)
|
229 |
-
noisenewlevel = noise * noisescalar
|
230 |
-
|
231 |
-
# mix noise and clean speech
|
232 |
-
noisyspeech = mix_pair(clean, noisenewlevel, min_overlap)
|
233 |
-
|
234 |
-
# randomly select RMS value between -15 dBFS and -35 dBFS and normalize noisyspeech with that value
|
235 |
-
# there is a chance of clipping that might happen with very less probability, which is not a major issue.
|
236 |
-
noisy_rms_level = np.random.randint(TARGET_LEVEL_LOWER, TARGET_LEVEL_UPPER)
|
237 |
-
rmsnoisy = rms_f(noisyspeech)
|
238 |
-
scalarnoisy = (10 ** (noisy_rms_level / 20) / (rmsnoisy + EPS)).unsqueeze(1)
|
239 |
-
noisyspeech = noisyspeech * scalarnoisy
|
240 |
-
clean = clean * scalarnoisy
|
241 |
-
noisenewlevel = noisenewlevel * scalarnoisy
|
242 |
-
|
243 |
-
# final check to see if there are any amplitudes exceeding +/- 1. If so, normalize all the signals accordingly
|
244 |
-
clipped = is_clipped(noisyspeech)
|
245 |
-
if clipped.any():
|
246 |
-
noisyspeech_maxamplevel = noisyspeech[clipped].max(1)[0].abs().unsqueeze(1) / (clipping_threshold - EPS)
|
247 |
-
noisyspeech[clipped] = noisyspeech[clipped] / noisyspeech_maxamplevel
|
248 |
-
|
249 |
-
return noisyspeech
|
250 |
-
|
251 |
-
|
252 |
-
def snr_mix(src: torch.Tensor, dst: torch.Tensor, snr_low: int, snr_high: int, min_overlap: float):
|
253 |
-
if snr_low == snr_high:
|
254 |
-
snr = snr_low
|
255 |
-
else:
|
256 |
-
snr = np.random.randint(snr_low, snr_high)
|
257 |
-
mix = snr_mixer(src, dst, snr, min_overlap)
|
258 |
-
return mix
|
259 |
-
|
260 |
-
|
261 |
-
def mix_text(src_text: str, dst_text: str):
|
262 |
-
"""Mix text from different sources by concatenating them."""
|
263 |
-
if src_text == dst_text:
|
264 |
-
return src_text
|
265 |
-
return src_text + " " + dst_text
|
266 |
-
|
267 |
-
|
268 |
-
def mix_samples(wavs: torch.Tensor, infos: tp.List[SoundInfo], aug_p: float, mix_p: float,
|
269 |
-
snr_low: int, snr_high: int, min_overlap: float):
|
270 |
-
"""Mix samples within a batch, summing the waveforms and concatenating the text infos.
|
271 |
-
|
272 |
-
Args:
|
273 |
-
wavs (torch.Tensor): Audio tensors of shape [B, C, T].
|
274 |
-
infos (list[SoundInfo]): List of SoundInfo items corresponding to the audio.
|
275 |
-
aug_p (float): Augmentation probability.
|
276 |
-
mix_p (float): Proportion of items in the batch to mix (and merge) together.
|
277 |
-
snr_low (int): Lowerbound for sampling SNR.
|
278 |
-
snr_high (int): Upperbound for sampling SNR.
|
279 |
-
min_overlap (float): Minimum overlap between mixed samples.
|
280 |
-
Returns:
|
281 |
-
tuple[torch.Tensor, list[SoundInfo]]: A tuple containing the mixed wavs
|
282 |
-
and mixed SoundInfo for the given batch.
|
283 |
-
"""
|
284 |
-
# no mixing to perform within the batch
|
285 |
-
if mix_p == 0:
|
286 |
-
return wavs, infos
|
287 |
-
|
288 |
-
if random.uniform(0, 1) < aug_p:
|
289 |
-
# perform all augmentations on waveforms as [B, T]
|
290 |
-
# randomly picking pairs of audio to mix
|
291 |
-
assert wavs.size(1) == 1, f"Mix samples requires monophonic audio but C={wavs.size(1)}"
|
292 |
-
wavs = wavs.mean(dim=1, keepdim=False)
|
293 |
-
B, T = wavs.shape
|
294 |
-
k = int(mix_p * B)
|
295 |
-
mixed_sources_idx = torch.randperm(B)[:k]
|
296 |
-
mixed_targets_idx = torch.randperm(B)[:k]
|
297 |
-
aug_wavs = snr_mix(
|
298 |
-
wavs[mixed_sources_idx],
|
299 |
-
wavs[mixed_targets_idx],
|
300 |
-
snr_low,
|
301 |
-
snr_high,
|
302 |
-
min_overlap,
|
303 |
-
)
|
304 |
-
# mixing textual descriptions in metadata
|
305 |
-
descriptions = [info.description for info in infos]
|
306 |
-
aug_infos = []
|
307 |
-
for i, j in zip(mixed_sources_idx, mixed_targets_idx):
|
308 |
-
text = mix_text(descriptions[i], descriptions[j])
|
309 |
-
m = replace(infos[i])
|
310 |
-
m.description = text
|
311 |
-
aug_infos.append(m)
|
312 |
-
|
313 |
-
# back to [B, C, T]
|
314 |
-
aug_wavs = aug_wavs.unsqueeze(1)
|
315 |
-
assert aug_wavs.shape[0] > 0, "Samples mixing returned empty batch."
|
316 |
-
assert aug_wavs.dim() == 3, f"Returned wav should be [B, C, T] but dim = {aug_wavs.dim()}"
|
317 |
-
assert aug_wavs.shape[0] == len(aug_infos), "Mismatch between number of wavs and infos in the batch"
|
318 |
-
|
319 |
-
return aug_wavs, aug_infos # [B, C, T]
|
320 |
-
else:
|
321 |
-
# randomly pick samples in the batch to match
|
322 |
-
# the batch size when performing audio mixing
|
323 |
-
B, C, T = wavs.shape
|
324 |
-
k = int(mix_p * B)
|
325 |
-
wav_idx = torch.randperm(B)[:k]
|
326 |
-
wavs = wavs[wav_idx]
|
327 |
-
infos = [infos[i] for i in wav_idx]
|
328 |
-
assert wavs.shape[0] == len(infos), "Mismatch between number of wavs and infos in the batch"
|
329 |
-
|
330 |
-
return wavs, infos # [B, C, T]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/data_gen/tts/runs/train_mfa_align.py
DELETED
@@ -1,46 +0,0 @@
|
|
1 |
-
import utils.commons.single_thread_env # NOQA
|
2 |
-
import glob
|
3 |
-
import subprocess
|
4 |
-
from textgrid import TextGrid
|
5 |
-
import os
|
6 |
-
from text_to_speech.utils.commons.hparams import hparams, set_hparams
|
7 |
-
|
8 |
-
|
9 |
-
def train_mfa_align(mfa_outputs="mfa_outputs",
|
10 |
-
mfa_inputs="mfa_inputs",
|
11 |
-
model_name=None, pretrain_model_name=None,
|
12 |
-
mfa_cmd='train'):
|
13 |
-
CORPUS = hparams['processed_data_dir'].split("/")[-1]
|
14 |
-
NUM_JOB = int(os.getenv('N_PROC', os.cpu_count()))
|
15 |
-
env_vars = [f'CORPUS={CORPUS}', f'NUM_JOB={NUM_JOB}']
|
16 |
-
if mfa_outputs is not None:
|
17 |
-
env_vars.append(f'MFA_OUTPUTS={mfa_outputs}')
|
18 |
-
if mfa_inputs is not None:
|
19 |
-
env_vars.append(f'MFA_INPUTS={mfa_inputs}')
|
20 |
-
if model_name is not None:
|
21 |
-
env_vars.append(f'MODEL_NAME={model_name}')
|
22 |
-
if pretrain_model_name is not None:
|
23 |
-
env_vars.append(f'PRETRAIN_MODEL_NAME={pretrain_model_name}')
|
24 |
-
if mfa_cmd is not None:
|
25 |
-
env_vars.append(f'MFA_CMD={mfa_cmd}')
|
26 |
-
env_str = ' '.join(env_vars)
|
27 |
-
print(f"| Run MFA for {CORPUS}. Env vars: {env_str}")
|
28 |
-
subprocess.check_call(f'{env_str} bash mfa_usr/run_mfa_train_align.sh', shell=True)
|
29 |
-
mfa_offset = hparams['preprocess_args']['mfa_offset']
|
30 |
-
if mfa_offset > 0:
|
31 |
-
for tg_fn in glob.glob(f'{hparams["processed_data_dir"]}/{mfa_outputs}/*.TextGrid'):
|
32 |
-
tg = TextGrid.fromFile(tg_fn)
|
33 |
-
max_time = tg.maxTime
|
34 |
-
for tier in tg.tiers:
|
35 |
-
for interval in tier.intervals:
|
36 |
-
interval.maxTime = min(interval.maxTime + mfa_offset, max_time)
|
37 |
-
interval.minTime = min(interval.minTime + mfa_offset, max_time)
|
38 |
-
tier.intervals[0].minTime = 0
|
39 |
-
tier.maxTime = min(tier.maxTime + mfa_offset, max_time)
|
40 |
-
tg.write(tg_fn)
|
41 |
-
TextGrid.fromFile(tg_fn)
|
42 |
-
|
43 |
-
|
44 |
-
if __name__ == '__main__':
|
45 |
-
set_hparams(print_hparams=False)
|
46 |
-
train_mfa_align()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGText/GlyphControl/ldm/modules/midas/midas/transforms.py
DELETED
@@ -1,234 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import cv2
|
3 |
-
import math
|
4 |
-
|
5 |
-
|
6 |
-
def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA):
|
7 |
-
"""Rezise the sample to ensure the given size. Keeps aspect ratio.
|
8 |
-
|
9 |
-
Args:
|
10 |
-
sample (dict): sample
|
11 |
-
size (tuple): image size
|
12 |
-
|
13 |
-
Returns:
|
14 |
-
tuple: new size
|
15 |
-
"""
|
16 |
-
shape = list(sample["disparity"].shape)
|
17 |
-
|
18 |
-
if shape[0] >= size[0] and shape[1] >= size[1]:
|
19 |
-
return sample
|
20 |
-
|
21 |
-
scale = [0, 0]
|
22 |
-
scale[0] = size[0] / shape[0]
|
23 |
-
scale[1] = size[1] / shape[1]
|
24 |
-
|
25 |
-
scale = max(scale)
|
26 |
-
|
27 |
-
shape[0] = math.ceil(scale * shape[0])
|
28 |
-
shape[1] = math.ceil(scale * shape[1])
|
29 |
-
|
30 |
-
# resize
|
31 |
-
sample["image"] = cv2.resize(
|
32 |
-
sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method
|
33 |
-
)
|
34 |
-
|
35 |
-
sample["disparity"] = cv2.resize(
|
36 |
-
sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST
|
37 |
-
)
|
38 |
-
sample["mask"] = cv2.resize(
|
39 |
-
sample["mask"].astype(np.float32),
|
40 |
-
tuple(shape[::-1]),
|
41 |
-
interpolation=cv2.INTER_NEAREST,
|
42 |
-
)
|
43 |
-
sample["mask"] = sample["mask"].astype(bool)
|
44 |
-
|
45 |
-
return tuple(shape)
|
46 |
-
|
47 |
-
|
48 |
-
class Resize(object):
|
49 |
-
"""Resize sample to given size (width, height).
|
50 |
-
"""
|
51 |
-
|
52 |
-
def __init__(
|
53 |
-
self,
|
54 |
-
width,
|
55 |
-
height,
|
56 |
-
resize_target=True,
|
57 |
-
keep_aspect_ratio=False,
|
58 |
-
ensure_multiple_of=1,
|
59 |
-
resize_method="lower_bound",
|
60 |
-
image_interpolation_method=cv2.INTER_AREA,
|
61 |
-
):
|
62 |
-
"""Init.
|
63 |
-
|
64 |
-
Args:
|
65 |
-
width (int): desired output width
|
66 |
-
height (int): desired output height
|
67 |
-
resize_target (bool, optional):
|
68 |
-
True: Resize the full sample (image, mask, target).
|
69 |
-
False: Resize image only.
|
70 |
-
Defaults to True.
|
71 |
-
keep_aspect_ratio (bool, optional):
|
72 |
-
True: Keep the aspect ratio of the input sample.
|
73 |
-
Output sample might not have the given width and height, and
|
74 |
-
resize behaviour depends on the parameter 'resize_method'.
|
75 |
-
Defaults to False.
|
76 |
-
ensure_multiple_of (int, optional):
|
77 |
-
Output width and height is constrained to be multiple of this parameter.
|
78 |
-
Defaults to 1.
|
79 |
-
resize_method (str, optional):
|
80 |
-
"lower_bound": Output will be at least as large as the given size.
|
81 |
-
"upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
|
82 |
-
"minimal": Scale as least as possible. (Output size might be smaller than given size.)
|
83 |
-
Defaults to "lower_bound".
|
84 |
-
"""
|
85 |
-
self.__width = width
|
86 |
-
self.__height = height
|
87 |
-
|
88 |
-
self.__resize_target = resize_target
|
89 |
-
self.__keep_aspect_ratio = keep_aspect_ratio
|
90 |
-
self.__multiple_of = ensure_multiple_of
|
91 |
-
self.__resize_method = resize_method
|
92 |
-
self.__image_interpolation_method = image_interpolation_method
|
93 |
-
|
94 |
-
def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
|
95 |
-
y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
96 |
-
|
97 |
-
if max_val is not None and y > max_val:
|
98 |
-
y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
99 |
-
|
100 |
-
if y < min_val:
|
101 |
-
y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
102 |
-
|
103 |
-
return y
|
104 |
-
|
105 |
-
def get_size(self, width, height):
|
106 |
-
# determine new height and width
|
107 |
-
scale_height = self.__height / height
|
108 |
-
scale_width = self.__width / width
|
109 |
-
|
110 |
-
if self.__keep_aspect_ratio:
|
111 |
-
if self.__resize_method == "lower_bound":
|
112 |
-
# scale such that output size is lower bound
|
113 |
-
if scale_width > scale_height:
|
114 |
-
# fit width
|
115 |
-
scale_height = scale_width
|
116 |
-
else:
|
117 |
-
# fit height
|
118 |
-
scale_width = scale_height
|
119 |
-
elif self.__resize_method == "upper_bound":
|
120 |
-
# scale such that output size is upper bound
|
121 |
-
if scale_width < scale_height:
|
122 |
-
# fit width
|
123 |
-
scale_height = scale_width
|
124 |
-
else:
|
125 |
-
# fit height
|
126 |
-
scale_width = scale_height
|
127 |
-
elif self.__resize_method == "minimal":
|
128 |
-
# scale as least as possbile
|
129 |
-
if abs(1 - scale_width) < abs(1 - scale_height):
|
130 |
-
# fit width
|
131 |
-
scale_height = scale_width
|
132 |
-
else:
|
133 |
-
# fit height
|
134 |
-
scale_width = scale_height
|
135 |
-
else:
|
136 |
-
raise ValueError(
|
137 |
-
f"resize_method {self.__resize_method} not implemented"
|
138 |
-
)
|
139 |
-
|
140 |
-
if self.__resize_method == "lower_bound":
|
141 |
-
new_height = self.constrain_to_multiple_of(
|
142 |
-
scale_height * height, min_val=self.__height
|
143 |
-
)
|
144 |
-
new_width = self.constrain_to_multiple_of(
|
145 |
-
scale_width * width, min_val=self.__width
|
146 |
-
)
|
147 |
-
elif self.__resize_method == "upper_bound":
|
148 |
-
new_height = self.constrain_to_multiple_of(
|
149 |
-
scale_height * height, max_val=self.__height
|
150 |
-
)
|
151 |
-
new_width = self.constrain_to_multiple_of(
|
152 |
-
scale_width * width, max_val=self.__width
|
153 |
-
)
|
154 |
-
elif self.__resize_method == "minimal":
|
155 |
-
new_height = self.constrain_to_multiple_of(scale_height * height)
|
156 |
-
new_width = self.constrain_to_multiple_of(scale_width * width)
|
157 |
-
else:
|
158 |
-
raise ValueError(f"resize_method {self.__resize_method} not implemented")
|
159 |
-
|
160 |
-
return (new_width, new_height)
|
161 |
-
|
162 |
-
def __call__(self, sample):
|
163 |
-
width, height = self.get_size(
|
164 |
-
sample["image"].shape[1], sample["image"].shape[0]
|
165 |
-
)
|
166 |
-
|
167 |
-
# resize sample
|
168 |
-
sample["image"] = cv2.resize(
|
169 |
-
sample["image"],
|
170 |
-
(width, height),
|
171 |
-
interpolation=self.__image_interpolation_method,
|
172 |
-
)
|
173 |
-
|
174 |
-
if self.__resize_target:
|
175 |
-
if "disparity" in sample:
|
176 |
-
sample["disparity"] = cv2.resize(
|
177 |
-
sample["disparity"],
|
178 |
-
(width, height),
|
179 |
-
interpolation=cv2.INTER_NEAREST,
|
180 |
-
)
|
181 |
-
|
182 |
-
if "depth" in sample:
|
183 |
-
sample["depth"] = cv2.resize(
|
184 |
-
sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST
|
185 |
-
)
|
186 |
-
|
187 |
-
sample["mask"] = cv2.resize(
|
188 |
-
sample["mask"].astype(np.float32),
|
189 |
-
(width, height),
|
190 |
-
interpolation=cv2.INTER_NEAREST,
|
191 |
-
)
|
192 |
-
sample["mask"] = sample["mask"].astype(bool)
|
193 |
-
|
194 |
-
return sample
|
195 |
-
|
196 |
-
|
197 |
-
class NormalizeImage(object):
|
198 |
-
"""Normlize image by given mean and std.
|
199 |
-
"""
|
200 |
-
|
201 |
-
def __init__(self, mean, std):
|
202 |
-
self.__mean = mean
|
203 |
-
self.__std = std
|
204 |
-
|
205 |
-
def __call__(self, sample):
|
206 |
-
sample["image"] = (sample["image"] - self.__mean) / self.__std
|
207 |
-
|
208 |
-
return sample
|
209 |
-
|
210 |
-
|
211 |
-
class PrepareForNet(object):
|
212 |
-
"""Prepare sample for usage as network input.
|
213 |
-
"""
|
214 |
-
|
215 |
-
def __init__(self):
|
216 |
-
pass
|
217 |
-
|
218 |
-
def __call__(self, sample):
|
219 |
-
image = np.transpose(sample["image"], (2, 0, 1))
|
220 |
-
sample["image"] = np.ascontiguousarray(image).astype(np.float32)
|
221 |
-
|
222 |
-
if "mask" in sample:
|
223 |
-
sample["mask"] = sample["mask"].astype(np.float32)
|
224 |
-
sample["mask"] = np.ascontiguousarray(sample["mask"])
|
225 |
-
|
226 |
-
if "disparity" in sample:
|
227 |
-
disparity = sample["disparity"].astype(np.float32)
|
228 |
-
sample["disparity"] = np.ascontiguousarray(disparity)
|
229 |
-
|
230 |
-
if "depth" in sample:
|
231 |
-
depth = sample["depth"].astype(np.float32)
|
232 |
-
sample["depth"] = np.ascontiguousarray(depth)
|
233 |
-
|
234 |
-
return sample
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIKey/TestStatic/index.html
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
<!DOCTYPE html>
|
2 |
-
<html>
|
3 |
-
<head>
|
4 |
-
<meta charset="utf-8" />
|
5 |
-
<meta name="viewport" content="width=device-width" />
|
6 |
-
<title>My static Space</title>
|
7 |
-
<style>
|
8 |
-
.space {
|
9 |
-
max-width: 100%;
|
10 |
-
max-height: 100%;
|
11 |
-
width: 100vw;
|
12 |
-
height: 100vh;
|
13 |
-
overflow: hidden;
|
14 |
-
}
|
15 |
-
.iframe {
|
16 |
-
min-width: 100%;
|
17 |
-
min-height: 100%;
|
18 |
-
background: black;
|
19 |
-
}
|
20 |
-
</style>
|
21 |
-
</head>
|
22 |
-
<body>
|
23 |
-
<div class="space">
|
24 |
-
<iframe
|
25 |
-
class="iframe"
|
26 |
-
allowfullscreen="true"
|
27 |
-
frameborder="0"
|
28 |
-
src="https://chat.d-id.com/">
|
29 |
-
</iframe>
|
30 |
-
</div>
|
31 |
-
</body>
|
32 |
-
</html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ALR03/gradiolangchainChatbotOpenAI/app.py
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import gradio as gr
|
3 |
-
from langchain.chat_models import ChatOpenAI
|
4 |
-
from langchain import LLMChain, PromptTemplate
|
5 |
-
from langchain.memory import ConversationBufferMemory
|
6 |
-
|
7 |
-
OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
|
8 |
-
|
9 |
-
template = """You are a helpful assistant to answer all user queries.
|
10 |
-
{chat_history}
|
11 |
-
User: {user_message}
|
12 |
-
Chatbot:"""
|
13 |
-
|
14 |
-
prompt = PromptTemplate(
|
15 |
-
input_variables=["chat_history", "user_message"], template=template
|
16 |
-
)
|
17 |
-
|
18 |
-
memory = ConversationBufferMemory(memory_key="chat_history")
|
19 |
-
|
20 |
-
llm_chain = LLMChain(
|
21 |
-
llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"),
|
22 |
-
prompt=prompt,
|
23 |
-
verbose=True,
|
24 |
-
memory=memory,
|
25 |
-
)
|
26 |
-
|
27 |
-
def get_text_response(user_message,history):
|
28 |
-
response = llm_chain.predict(user_message = user_message)
|
29 |
-
return response
|
30 |
-
|
31 |
-
demo = gr.ChatInterface(get_text_response)
|
32 |
-
|
33 |
-
if __name__ == "__main__":
|
34 |
-
demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/stores/webSearchParameters.ts
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
import { writable } from "svelte/store";
|
2 |
-
export interface WebSearchParameters {
|
3 |
-
useSearch: boolean;
|
4 |
-
nItems: number;
|
5 |
-
}
|
6 |
-
export const webSearchParameters = writable<WebSearchParameters>({
|
7 |
-
useSearch: false,
|
8 |
-
nItems: 5,
|
9 |
-
});
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/selector/__init__.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
from agentverse.registry import Registry
|
2 |
-
|
3 |
-
selector_registry = Registry(name="SelectorRegistry")
|
4 |
-
|
5 |
-
from .base import BaseSelector
|
6 |
-
from .basic import BasicSelector
|
7 |
-
from .classroom import ClassroomSelector
|
8 |
-
from .sde_team import SdeTeamSelector
|
9 |
-
from .sde_team_given_tests import SdeTeamGivenTestsSelector
|
10 |
-
from .pokemon import PokemonSelector
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinput/ColorInput.js
DELETED
@@ -1,91 +0,0 @@
|
|
1 |
-
import ColorInputBase from '../colorinputbase/ColorInputBase.js';
|
2 |
-
import Methods from './methods/Methods.js';
|
3 |
-
import CreateBackground from '../../utils/build/CreateBackground.js';
|
4 |
-
|
5 |
-
const GetValue = Phaser.Utils.Objects.GetValue;
|
6 |
-
|
7 |
-
class ColorInput extends ColorInputBase {
|
8 |
-
constructor(scene, config) {
|
9 |
-
if (config === undefined) {
|
10 |
-
config = {};
|
11 |
-
}
|
12 |
-
|
13 |
-
super(scene, config);
|
14 |
-
this.type = 'rexColorInput';
|
15 |
-
|
16 |
-
if (!config.hasOwnProperty('colorPicker')) {
|
17 |
-
config.colorPicker = {
|
18 |
-
background: { color: 0x0 }
|
19 |
-
}
|
20 |
-
}
|
21 |
-
|
22 |
-
var colorPickerConfig = config.colorPicker;
|
23 |
-
var hasColorPicker = (colorPickerConfig !== false) && (colorPickerConfig !== null);
|
24 |
-
|
25 |
-
if (hasColorPicker) {
|
26 |
-
this.setColorPickerSize(
|
27 |
-
GetValue(colorPickerConfig, 'width', 160),
|
28 |
-
GetValue(colorPickerConfig, 'height', 170)
|
29 |
-
);
|
30 |
-
|
31 |
-
var createBackgroundCallback;
|
32 |
-
var background = GetValue(colorPickerConfig, 'background');
|
33 |
-
if (background) {
|
34 |
-
createBackgroundCallback = function (scene) {
|
35 |
-
return CreateBackground(scene, background);
|
36 |
-
}
|
37 |
-
} else {
|
38 |
-
createBackgroundCallback = GetValue(colorPickerConfig, 'createBackgroundCallback');
|
39 |
-
}
|
40 |
-
this.setCreateColorPickerBackgroundCallback(createBackgroundCallback);
|
41 |
-
|
42 |
-
this.setColorPickerHPalettePosition(GetValue(colorPickerConfig, 'hPalettePosition', 0));
|
43 |
-
this.setColorPickerExpandDirection(GetValue(colorPickerConfig, 'expandDirection'));
|
44 |
-
this.setColorPickerEaseInDuration(GetValue(colorPickerConfig, 'easeIn', 200));
|
45 |
-
this.setColorPickerEaseOutDuration(GetValue(colorPickerConfig, 'easeOut', 200));
|
46 |
-
this.setColorPickerTransitInCallback(GetValue(colorPickerConfig, 'transitIn'));
|
47 |
-
this.setColorPickerTransitOutCallback(GetValue(colorPickerConfig, 'transitOut'));
|
48 |
-
this.setColorPickerBounds(GetValue(colorPickerConfig, 'bounds'));
|
49 |
-
|
50 |
-
var colorPickerSpaceConfig = GetValue(colorPickerConfig, 'space');
|
51 |
-
if (colorPickerSpaceConfig === undefined) {
|
52 |
-
colorPickerSpaceConfig = { left: 10, right: 10, top: 10, bottom: 10, item: 8 }
|
53 |
-
}
|
54 |
-
this.setColorPickerSpace(colorPickerSpaceConfig);
|
55 |
-
}
|
56 |
-
|
57 |
-
var colorComponentsConfig = config.colorComponents;
|
58 |
-
var hasColorComponents = (colorComponentsConfig !== false) && (colorComponentsConfig !== null);
|
59 |
-
if (hasColorPicker && hasColorComponents) {
|
60 |
-
this.setColorComponentsHeight(GetValue(colorComponentsConfig, 'height', 30));
|
61 |
-
|
62 |
-
this.setColorComponentsFormatLabelConfig(GetValue(colorComponentsConfig, 'formatLabel'));
|
63 |
-
|
64 |
-
var colorComponentsInputTextConfig = GetValue(colorComponentsConfig, 'inputText');
|
65 |
-
if (!colorComponentsInputTextConfig) {
|
66 |
-
colorComponentsInputTextConfig = GetValue(config, 'inputText');
|
67 |
-
}
|
68 |
-
this.setColorComponentsInputTextConfig(colorComponentsInputTextConfig);
|
69 |
-
|
70 |
-
var colorComponentsSpace = GetValue(colorComponentsConfig, 'space');
|
71 |
-
if (colorComponentsSpace === undefined) {
|
72 |
-
colorComponentsSpace = { item: 8 }
|
73 |
-
}
|
74 |
-
this.setColorComponentsSpace(colorComponentsSpace);
|
75 |
-
}
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
var swatch = this.childrenMap.swatch;
|
80 |
-
if (swatch && hasColorPicker) {
|
81 |
-
this.onClick(swatch, this.openColorPicker, this);
|
82 |
-
}
|
83 |
-
}
|
84 |
-
}
|
85 |
-
|
86 |
-
Object.assign(
|
87 |
-
ColorInput.prototype,
|
88 |
-
Methods,
|
89 |
-
)
|
90 |
-
|
91 |
-
export default ColorInput;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateRoundRectangle.js
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
import MergeStyle from './utils/MergeStyle.js';
|
2 |
-
import RoundRectangle from '../../roundrectangle/RoundRectangle.js';
|
3 |
-
|
4 |
-
var CreateRoundRectangle = function (scene, data, view, styles, customBuilders) {
|
5 |
-
data = MergeStyle(data, styles);
|
6 |
-
|
7 |
-
var gameObject = new RoundRectangle(scene, data);
|
8 |
-
scene.add.existing(gameObject);
|
9 |
-
return gameObject;
|
10 |
-
}
|
11 |
-
|
12 |
-
export default CreateRoundRectangle;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlanMars/QYL-AI-Space/modules/webui_locale.py
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import locale
|
3 |
-
import commentjson as json
|
4 |
-
|
5 |
-
|
6 |
-
class I18nAuto:
|
7 |
-
def __init__(self):
|
8 |
-
if os.path.exists("config.json"):
|
9 |
-
with open("config.json", "r", encoding='utf-8') as f:
|
10 |
-
config = json.load(f)
|
11 |
-
else:
|
12 |
-
config = {}
|
13 |
-
lang_config = config.get("language", "auto")
|
14 |
-
language = os.environ.get("LANGUAGE", lang_config)
|
15 |
-
if language == "auto":
|
16 |
-
language = locale.getdefaultlocale()[0] # get the language code of the system (ex. zh_CN)
|
17 |
-
self.language_map = {}
|
18 |
-
self.file_is_exists = os.path.isfile(f"./locale/{language}.json")
|
19 |
-
if self.file_is_exists:
|
20 |
-
with open(f"./locale/{language}.json", "r", encoding="utf-8") as f:
|
21 |
-
self.language_map.update(json.load(f))
|
22 |
-
|
23 |
-
def __call__(self, key):
|
24 |
-
if self.file_is_exists and key in self.language_map:
|
25 |
-
return self.language_map[key]
|
26 |
-
else:
|
27 |
-
return key
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ali36Ahmad/magic-diffusion/share_btn.py
DELETED
@@ -1,88 +0,0 @@
|
|
1 |
-
community_icon_html = """<svg id="share-btn-share-icon" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32">
|
2 |
-
<path d="M20.6081 3C21.7684 3 22.8053 3.49196 23.5284 4.38415C23.9756 4.93678 24.4428 5.82749 24.4808 7.16133C24.9674 7.01707 25.4353 6.93643 25.8725 6.93643C26.9833 6.93643 27.9865 7.37587 28.696 8.17411C29.6075 9.19872 30.0124 10.4579 29.8361 11.7177C29.7523 12.3177 29.5581 12.8555 29.2678 13.3534C29.8798 13.8646 30.3306 14.5763 30.5485 15.4322C30.719 16.1032 30.8939 17.5006 29.9808 18.9403C30.0389 19.0342 30.0934 19.1319 30.1442 19.2318C30.6932 20.3074 30.7283 21.5229 30.2439 22.6548C29.5093 24.3704 27.6841 25.7219 24.1397 27.1727C21.9347 28.0753 19.9174 28.6523 19.8994 28.6575C16.9842 29.4379 14.3477 29.8345 12.0653 29.8345C7.87017 29.8345 4.8668 28.508 3.13831 25.8921C0.356375 21.6797 0.754104 17.8269 4.35369 14.1131C6.34591 12.058 7.67023 9.02782 7.94613 8.36275C8.50224 6.39343 9.97271 4.20438 12.4172 4.20438H12.4179C12.6236 4.20438 12.8314 4.2214 13.0364 4.25468C14.107 4.42854 15.0428 5.06476 15.7115 6.02205C16.4331 5.09583 17.134 4.359 17.7682 3.94323C18.7242 3.31737 19.6794 3 20.6081 3ZM20.6081 5.95917C20.2427 5.95917 19.7963 6.1197 19.3039 6.44225C17.7754 7.44319 14.8258 12.6772 13.7458 14.7131C13.3839 15.3952 12.7655 15.6837 12.2086 15.6837C11.1036 15.6837 10.2408 14.5497 12.1076 13.1085C14.9146 10.9402 13.9299 7.39584 12.5898 7.1776C12.5311 7.16799 12.4731 7.16355 12.4172 7.16355C11.1989 7.16355 10.6615 9.33114 10.6615 9.33114C10.6615 9.33114 9.0863 13.4148 6.38031 16.206C3.67434 18.998 3.5346 21.2388 5.50675 24.2246C6.85185 26.2606 9.42666 26.8753 12.0653 26.8753C14.8021 26.8753 17.6077 26.2139 19.1799 25.793C19.2574 25.7723 28.8193 22.984 27.6081 20.6107C27.4046 20.212 27.0693 20.0522 26.6471 20.0522C24.9416 20.0522 21.8393 22.6726 20.5057 22.6726C20.2076 22.6726 19.9976 22.5416 19.9116 22.222C19.3433 20.1173 28.552 19.2325 27.7758 16.1839C27.639 15.6445 27.2677 15.4256 26.746 15.4263C24.4923 15.4263 19.4358 19.5181 18.3759 19.5181C18.2949 19.5181 18.2368 19.4937 18.2053 19.4419C17.6743 18.557 17.9653 17.9394 21.7082 15.6009C25.4511 13.2617 28.0783 11.8545 26.5841 10.1752C26.4121 9.98141 26.1684 9.8956 25.8725 9.8956C23.6001 9.89634 18.2311 14.9403 18.2311 14.9403C18.2311 14.9403 16.7821 16.496 15.9057 16.496C15.7043 16.496 15.533 16.4139 15.4169 16.2112C14.7956 15.1296 21.1879 10.1286 21.5484 8.06535C21.7928 6.66715 21.3771 5.95917 20.6081 5.95917Z" fill="#FF9D00"></path>
|
3 |
-
<path d="M5.50686 24.2246C3.53472 21.2387 3.67446 18.9979 6.38043 16.206C9.08641 13.4147 10.6615 9.33111 10.6615 9.33111C10.6615 9.33111 11.2499 6.95933 12.59 7.17757C13.93 7.39581 14.9139 10.9401 12.1069 13.1084C9.29997 15.276 12.6659 16.7489 13.7459 14.713C14.8258 12.6772 17.7747 7.44316 19.304 6.44221C20.8326 5.44128 21.9089 6.00204 21.5484 8.06532C21.188 10.1286 14.795 15.1295 15.4171 16.2118C16.0391 17.2934 18.2312 14.9402 18.2312 14.9402C18.2312 14.9402 25.0907 8.49588 26.5842 10.1752C28.0776 11.8545 25.4512 13.2616 21.7082 15.6008C17.9646 17.9393 17.6744 18.557 18.2054 19.4418C18.7372 20.3266 26.9998 13.1351 27.7759 16.1838C28.5513 19.2324 19.3434 20.1173 19.9117 22.2219C20.48 24.3274 26.3979 18.2382 27.6082 20.6107C28.8193 22.9839 19.2574 25.7722 19.18 25.7929C16.0914 26.62 8.24723 28.3726 5.50686 24.2246Z" fill="#FFD21E"></path>
|
4 |
-
</svg>"""
|
5 |
-
|
6 |
-
loading_icon_html = """<svg id="share-btn-loading-icon" style="display:none;" class="animate-spin"
|
7 |
-
style="color: #ffffff;
|
8 |
-
"
|
9 |
-
xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" fill="none" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><circle style="opacity: 0.25;" cx="12" cy="12" r="10" stroke="white" stroke-width="4"></circle><path style="opacity: 0.75;" fill="white" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path></svg>"""
|
10 |
-
|
11 |
-
share_js = """async () => {
|
12 |
-
async function uploadFile(file){
|
13 |
-
const UPLOAD_URL = 'https://huggingface.co/uploads';
|
14 |
-
const response = await fetch(UPLOAD_URL, {
|
15 |
-
method: 'POST',
|
16 |
-
headers: {
|
17 |
-
'Content-Type': file.type,
|
18 |
-
'X-Requested-With': 'XMLHttpRequest',
|
19 |
-
},
|
20 |
-
body: file, /// <- File inherits from Blob
|
21 |
-
});
|
22 |
-
const url = await response.text();
|
23 |
-
return url;
|
24 |
-
}
|
25 |
-
async function getInputImgFile(imgEl){
|
26 |
-
const res = await fetch(imgEl.src);
|
27 |
-
const blob = await res.blob();
|
28 |
-
const imgId = Date.now() % 200;
|
29 |
-
const isPng = imgEl.src.startsWith(`data:image/png`);
|
30 |
-
if(isPng){
|
31 |
-
const fileName = `magic-prompt-${{imgId}}.png`;
|
32 |
-
return new File([blob], fileName, { type: 'image/png' });
|
33 |
-
}else{
|
34 |
-
const fileName = `magic-prompt-${{imgId}}.jpg`;
|
35 |
-
return new File([blob], fileName, { type: 'image/jpeg' });
|
36 |
-
}
|
37 |
-
}
|
38 |
-
const gradioEl = document.querySelector('body > gradio-app');
|
39 |
-
// const gradioEl = document.querySelector("gradio-app").shadowRoot;
|
40 |
-
const inputImgEl = gradioEl.querySelector('#input-img img');
|
41 |
-
const imgEls = gradioEl.querySelectorAll('#generated-gallery img');
|
42 |
-
const promptTxt = gradioEl.querySelector('#translated textarea').value;
|
43 |
-
let titleTxt = promptTxt;
|
44 |
-
if(titleTxt.length > 100){
|
45 |
-
titleTxt = titleTxt.slice(0, 100) + ' ...';
|
46 |
-
}
|
47 |
-
const shareBtnEl = gradioEl.querySelector('#share-btn');
|
48 |
-
const shareIconEl = gradioEl.querySelector('#share-btn-share-icon');
|
49 |
-
const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon');
|
50 |
-
if(!imgEls.length){
|
51 |
-
return;
|
52 |
-
};
|
53 |
-
shareBtnEl.style.pointerEvents = 'none';
|
54 |
-
shareIconEl.style.display = 'none';
|
55 |
-
loadingIconEl.style.removeProperty('display');
|
56 |
-
const files = await Promise.all(
|
57 |
-
[...imgEls].map(async (imgEl) => {
|
58 |
-
const res = await fetch(imgEl.src);
|
59 |
-
const blob = await res.blob();
|
60 |
-
const imgId = Date.now() % 200;
|
61 |
-
const fileName = `sd-perception-${{imgId}}.jpg`;
|
62 |
-
return new File([blob], fileName, { type: 'image/jpeg' });
|
63 |
-
})
|
64 |
-
);
|
65 |
-
const inputFile = await getInputImgFile(inputImgEl);
|
66 |
-
files.push(inputFile);
|
67 |
-
const urls = await Promise.all(files.map((f) => uploadFile(f)));
|
68 |
-
const urlInputImg = urls.pop();
|
69 |
-
const htmlImgs = urls.map(url => `<img src='${url}' width='400' height='400'>`);
|
70 |
-
const htmlImgsMd = htmlImgs.join(`\n`);
|
71 |
-
const descriptionMd = `#### Input img:
|
72 |
-
<img src='${urlInputImg}' style='max-height: 350px;'>
|
73 |
-
#### Caption:
|
74 |
-
${promptTxt}
|
75 |
-
#### Generations:
|
76 |
-
<div style='display: flex; flex-wrap: wrap; column-gap: 0.75rem;'>
|
77 |
-
${htmlImgsMd}
|
78 |
-
</div>`;
|
79 |
-
const params = new URLSearchParams({
|
80 |
-
title: titleTxt,
|
81 |
-
description: descriptionMd,
|
82 |
-
});
|
83 |
-
const paramsStr = params.toString();
|
84 |
-
window.open(`https://huggingface.co/spaces/huggingface-projects/magic-diffusion/new?${paramsStr}`, '_blank');
|
85 |
-
shareBtnEl.style.removeProperty('pointer-events');
|
86 |
-
shareIconEl.style.removeProperty('display');
|
87 |
-
loadingIconEl.style.display = 'none';
|
88 |
-
}"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/configs/3millions.py
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
from easydict import EasyDict as edict
|
2 |
-
|
3 |
-
# configs for test speed
|
4 |
-
|
5 |
-
config = edict()
|
6 |
-
config.loss = "arcface"
|
7 |
-
config.network = "r50"
|
8 |
-
config.resume = False
|
9 |
-
config.output = None
|
10 |
-
config.embedding_size = 512
|
11 |
-
config.sample_rate = 1.0
|
12 |
-
config.fp16 = True
|
13 |
-
config.momentum = 0.9
|
14 |
-
config.weight_decay = 5e-4
|
15 |
-
config.batch_size = 128
|
16 |
-
config.lr = 0.1 # batch size is 512
|
17 |
-
|
18 |
-
config.rec = "synthetic"
|
19 |
-
config.num_classes = 300 * 10000
|
20 |
-
config.num_epoch = 30
|
21 |
-
config.warmup_epoch = -1
|
22 |
-
config.decay_epoch = [10, 16, 22]
|
23 |
-
config.val_targets = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alpaca233/SadTalker/src/face3d/models/facerecon_model.py
DELETED
@@ -1,220 +0,0 @@
|
|
1 |
-
"""This script defines the face reconstruction model for Deep3DFaceRecon_pytorch
|
2 |
-
"""
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
from src.face3d.models.base_model import BaseModel
|
7 |
-
from src.face3d.models import networks
|
8 |
-
from src.face3d.models.bfm import ParametricFaceModel
|
9 |
-
from src.face3d.models.losses import perceptual_loss, photo_loss, reg_loss, reflectance_loss, landmark_loss
|
10 |
-
from src.face3d.util import util
|
11 |
-
from src.face3d.util.nvdiffrast import MeshRenderer
|
12 |
-
# from src.face3d.util.preprocess import estimate_norm_torch
|
13 |
-
|
14 |
-
import trimesh
|
15 |
-
from scipy.io import savemat
|
16 |
-
|
17 |
-
class FaceReconModel(BaseModel):
|
18 |
-
|
19 |
-
@staticmethod
|
20 |
-
def modify_commandline_options(parser, is_train=False):
|
21 |
-
""" Configures options specific for CUT model
|
22 |
-
"""
|
23 |
-
# net structure and parameters
|
24 |
-
parser.add_argument('--net_recon', type=str, default='resnet50', choices=['resnet18', 'resnet34', 'resnet50'], help='network structure')
|
25 |
-
parser.add_argument('--init_path', type=str, default='./checkpoints/init_model/resnet50-0676ba61.pth')
|
26 |
-
parser.add_argument('--use_last_fc', type=util.str2bool, nargs='?', const=True, default=False, help='zero initialize the last fc')
|
27 |
-
parser.add_argument('--bfm_folder', type=str, default='./checkpoints/BFM_Fitting/')
|
28 |
-
parser.add_argument('--bfm_model', type=str, default='BFM_model_front.mat', help='bfm model')
|
29 |
-
|
30 |
-
# renderer parameters
|
31 |
-
parser.add_argument('--focal', type=float, default=1015.)
|
32 |
-
parser.add_argument('--center', type=float, default=112.)
|
33 |
-
parser.add_argument('--camera_d', type=float, default=10.)
|
34 |
-
parser.add_argument('--z_near', type=float, default=5.)
|
35 |
-
parser.add_argument('--z_far', type=float, default=15.)
|
36 |
-
|
37 |
-
if is_train:
|
38 |
-
# training parameters
|
39 |
-
parser.add_argument('--net_recog', type=str, default='r50', choices=['r18', 'r43', 'r50'], help='face recog network structure')
|
40 |
-
parser.add_argument('--net_recog_path', type=str, default='checkpoints/recog_model/ms1mv3_arcface_r50_fp16/backbone.pth')
|
41 |
-
parser.add_argument('--use_crop_face', type=util.str2bool, nargs='?', const=True, default=False, help='use crop mask for photo loss')
|
42 |
-
parser.add_argument('--use_predef_M', type=util.str2bool, nargs='?', const=True, default=False, help='use predefined M for predicted face')
|
43 |
-
|
44 |
-
|
45 |
-
# augmentation parameters
|
46 |
-
parser.add_argument('--shift_pixs', type=float, default=10., help='shift pixels')
|
47 |
-
parser.add_argument('--scale_delta', type=float, default=0.1, help='delta scale factor')
|
48 |
-
parser.add_argument('--rot_angle', type=float, default=10., help='rot angles, degree')
|
49 |
-
|
50 |
-
# loss weights
|
51 |
-
parser.add_argument('--w_feat', type=float, default=0.2, help='weight for feat loss')
|
52 |
-
parser.add_argument('--w_color', type=float, default=1.92, help='weight for loss loss')
|
53 |
-
parser.add_argument('--w_reg', type=float, default=3.0e-4, help='weight for reg loss')
|
54 |
-
parser.add_argument('--w_id', type=float, default=1.0, help='weight for id_reg loss')
|
55 |
-
parser.add_argument('--w_exp', type=float, default=0.8, help='weight for exp_reg loss')
|
56 |
-
parser.add_argument('--w_tex', type=float, default=1.7e-2, help='weight for tex_reg loss')
|
57 |
-
parser.add_argument('--w_gamma', type=float, default=10.0, help='weight for gamma loss')
|
58 |
-
parser.add_argument('--w_lm', type=float, default=1.6e-3, help='weight for lm loss')
|
59 |
-
parser.add_argument('--w_reflc', type=float, default=5.0, help='weight for reflc loss')
|
60 |
-
|
61 |
-
opt, _ = parser.parse_known_args()
|
62 |
-
parser.set_defaults(
|
63 |
-
focal=1015., center=112., camera_d=10., use_last_fc=False, z_near=5., z_far=15.
|
64 |
-
)
|
65 |
-
if is_train:
|
66 |
-
parser.set_defaults(
|
67 |
-
use_crop_face=True, use_predef_M=False
|
68 |
-
)
|
69 |
-
return parser
|
70 |
-
|
71 |
-
def __init__(self, opt):
|
72 |
-
"""Initialize this model class.
|
73 |
-
|
74 |
-
Parameters:
|
75 |
-
opt -- training/test options
|
76 |
-
|
77 |
-
A few things can be done here.
|
78 |
-
- (required) call the initialization function of BaseModel
|
79 |
-
- define loss function, visualization images, model names, and optimizers
|
80 |
-
"""
|
81 |
-
BaseModel.__init__(self, opt) # call the initialization method of BaseModel
|
82 |
-
|
83 |
-
self.visual_names = ['output_vis']
|
84 |
-
self.model_names = ['net_recon']
|
85 |
-
self.parallel_names = self.model_names + ['renderer']
|
86 |
-
|
87 |
-
self.facemodel = ParametricFaceModel(
|
88 |
-
bfm_folder=opt.bfm_folder, camera_distance=opt.camera_d, focal=opt.focal, center=opt.center,
|
89 |
-
is_train=self.isTrain, default_name=opt.bfm_model
|
90 |
-
)
|
91 |
-
|
92 |
-
fov = 2 * np.arctan(opt.center / opt.focal) * 180 / np.pi
|
93 |
-
self.renderer = MeshRenderer(
|
94 |
-
rasterize_fov=fov, znear=opt.z_near, zfar=opt.z_far, rasterize_size=int(2 * opt.center)
|
95 |
-
)
|
96 |
-
|
97 |
-
if self.isTrain:
|
98 |
-
self.loss_names = ['all', 'feat', 'color', 'lm', 'reg', 'gamma', 'reflc']
|
99 |
-
|
100 |
-
self.net_recog = networks.define_net_recog(
|
101 |
-
net_recog=opt.net_recog, pretrained_path=opt.net_recog_path
|
102 |
-
)
|
103 |
-
# loss func name: (compute_%s_loss) % loss_name
|
104 |
-
self.compute_feat_loss = perceptual_loss
|
105 |
-
self.comupte_color_loss = photo_loss
|
106 |
-
self.compute_lm_loss = landmark_loss
|
107 |
-
self.compute_reg_loss = reg_loss
|
108 |
-
self.compute_reflc_loss = reflectance_loss
|
109 |
-
|
110 |
-
self.optimizer = torch.optim.Adam(self.net_recon.parameters(), lr=opt.lr)
|
111 |
-
self.optimizers = [self.optimizer]
|
112 |
-
self.parallel_names += ['net_recog']
|
113 |
-
# Our program will automatically call <model.setup> to define schedulers, load networks, and print networks
|
114 |
-
|
115 |
-
def set_input(self, input):
|
116 |
-
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
|
117 |
-
|
118 |
-
Parameters:
|
119 |
-
input: a dictionary that contains the data itself and its metadata information.
|
120 |
-
"""
|
121 |
-
self.input_img = input['imgs'].to(self.device)
|
122 |
-
self.atten_mask = input['msks'].to(self.device) if 'msks' in input else None
|
123 |
-
self.gt_lm = input['lms'].to(self.device) if 'lms' in input else None
|
124 |
-
self.trans_m = input['M'].to(self.device) if 'M' in input else None
|
125 |
-
self.image_paths = input['im_paths'] if 'im_paths' in input else None
|
126 |
-
|
127 |
-
def forward(self, output_coeff, device):
|
128 |
-
self.facemodel.to(device)
|
129 |
-
self.pred_vertex, self.pred_tex, self.pred_color, self.pred_lm = \
|
130 |
-
self.facemodel.compute_for_render(output_coeff)
|
131 |
-
self.pred_mask, _, self.pred_face = self.renderer(
|
132 |
-
self.pred_vertex, self.facemodel.face_buf, feat=self.pred_color)
|
133 |
-
|
134 |
-
self.pred_coeffs_dict = self.facemodel.split_coeff(output_coeff)
|
135 |
-
|
136 |
-
|
137 |
-
def compute_losses(self):
|
138 |
-
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
|
139 |
-
|
140 |
-
assert self.net_recog.training == False
|
141 |
-
trans_m = self.trans_m
|
142 |
-
if not self.opt.use_predef_M:
|
143 |
-
trans_m = estimate_norm_torch(self.pred_lm, self.input_img.shape[-2])
|
144 |
-
|
145 |
-
pred_feat = self.net_recog(self.pred_face, trans_m)
|
146 |
-
gt_feat = self.net_recog(self.input_img, self.trans_m)
|
147 |
-
self.loss_feat = self.opt.w_feat * self.compute_feat_loss(pred_feat, gt_feat)
|
148 |
-
|
149 |
-
face_mask = self.pred_mask
|
150 |
-
if self.opt.use_crop_face:
|
151 |
-
face_mask, _, _ = self.renderer(self.pred_vertex, self.facemodel.front_face_buf)
|
152 |
-
|
153 |
-
face_mask = face_mask.detach()
|
154 |
-
self.loss_color = self.opt.w_color * self.comupte_color_loss(
|
155 |
-
self.pred_face, self.input_img, self.atten_mask * face_mask)
|
156 |
-
|
157 |
-
loss_reg, loss_gamma = self.compute_reg_loss(self.pred_coeffs_dict, self.opt)
|
158 |
-
self.loss_reg = self.opt.w_reg * loss_reg
|
159 |
-
self.loss_gamma = self.opt.w_gamma * loss_gamma
|
160 |
-
|
161 |
-
self.loss_lm = self.opt.w_lm * self.compute_lm_loss(self.pred_lm, self.gt_lm)
|
162 |
-
|
163 |
-
self.loss_reflc = self.opt.w_reflc * self.compute_reflc_loss(self.pred_tex, self.facemodel.skin_mask)
|
164 |
-
|
165 |
-
self.loss_all = self.loss_feat + self.loss_color + self.loss_reg + self.loss_gamma \
|
166 |
-
+ self.loss_lm + self.loss_reflc
|
167 |
-
|
168 |
-
|
169 |
-
def optimize_parameters(self, isTrain=True):
|
170 |
-
self.forward()
|
171 |
-
self.compute_losses()
|
172 |
-
"""Update network weights; it will be called in every training iteration."""
|
173 |
-
if isTrain:
|
174 |
-
self.optimizer.zero_grad()
|
175 |
-
self.loss_all.backward()
|
176 |
-
self.optimizer.step()
|
177 |
-
|
178 |
-
def compute_visuals(self):
|
179 |
-
with torch.no_grad():
|
180 |
-
input_img_numpy = 255. * self.input_img.detach().cpu().permute(0, 2, 3, 1).numpy()
|
181 |
-
output_vis = self.pred_face * self.pred_mask + (1 - self.pred_mask) * self.input_img
|
182 |
-
output_vis_numpy_raw = 255. * output_vis.detach().cpu().permute(0, 2, 3, 1).numpy()
|
183 |
-
|
184 |
-
if self.gt_lm is not None:
|
185 |
-
gt_lm_numpy = self.gt_lm.cpu().numpy()
|
186 |
-
pred_lm_numpy = self.pred_lm.detach().cpu().numpy()
|
187 |
-
output_vis_numpy = util.draw_landmarks(output_vis_numpy_raw, gt_lm_numpy, 'b')
|
188 |
-
output_vis_numpy = util.draw_landmarks(output_vis_numpy, pred_lm_numpy, 'r')
|
189 |
-
|
190 |
-
output_vis_numpy = np.concatenate((input_img_numpy,
|
191 |
-
output_vis_numpy_raw, output_vis_numpy), axis=-2)
|
192 |
-
else:
|
193 |
-
output_vis_numpy = np.concatenate((input_img_numpy,
|
194 |
-
output_vis_numpy_raw), axis=-2)
|
195 |
-
|
196 |
-
self.output_vis = torch.tensor(
|
197 |
-
output_vis_numpy / 255., dtype=torch.float32
|
198 |
-
).permute(0, 3, 1, 2).to(self.device)
|
199 |
-
|
200 |
-
def save_mesh(self, name):
|
201 |
-
|
202 |
-
recon_shape = self.pred_vertex # get reconstructed shape
|
203 |
-
recon_shape[..., -1] = 10 - recon_shape[..., -1] # from camera space to world space
|
204 |
-
recon_shape = recon_shape.cpu().numpy()[0]
|
205 |
-
recon_color = self.pred_color
|
206 |
-
recon_color = recon_color.cpu().numpy()[0]
|
207 |
-
tri = self.facemodel.face_buf.cpu().numpy()
|
208 |
-
mesh = trimesh.Trimesh(vertices=recon_shape, faces=tri, vertex_colors=np.clip(255. * recon_color, 0, 255).astype(np.uint8))
|
209 |
-
mesh.export(name)
|
210 |
-
|
211 |
-
def save_coeff(self,name):
|
212 |
-
|
213 |
-
pred_coeffs = {key:self.pred_coeffs_dict[key].cpu().numpy() for key in self.pred_coeffs_dict}
|
214 |
-
pred_lm = self.pred_lm.cpu().numpy()
|
215 |
-
pred_lm = np.stack([pred_lm[:,:,0],self.input_img.shape[2]-1-pred_lm[:,:,1]],axis=2) # transfer to image coordinate
|
216 |
-
pred_coeffs['lm68'] = pred_lm
|
217 |
-
savemat(name,pred_coeffs)
|
218 |
-
|
219 |
-
|
220 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/ops/grid_sample_gradfix.py
DELETED
@@ -1,83 +0,0 @@
|
|
1 |
-
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
"""Custom replacement for `torch.nn.functional.grid_sample` that
|
10 |
-
supports arbitrarily high order gradients between the input and output.
|
11 |
-
Only works on 2D images and assumes
|
12 |
-
`mode='bilinear'`, `padding_mode='zeros'`, `align_corners=False`."""
|
13 |
-
|
14 |
-
import warnings
|
15 |
-
import torch
|
16 |
-
|
17 |
-
# pylint: disable=redefined-builtin
|
18 |
-
# pylint: disable=arguments-differ
|
19 |
-
# pylint: disable=protected-access
|
20 |
-
|
21 |
-
#----------------------------------------------------------------------------
|
22 |
-
|
23 |
-
enabled = False # Enable the custom op by setting this to true.
|
24 |
-
|
25 |
-
#----------------------------------------------------------------------------
|
26 |
-
|
27 |
-
def grid_sample(input, grid):
|
28 |
-
if _should_use_custom_op():
|
29 |
-
return _GridSample2dForward.apply(input, grid)
|
30 |
-
return torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
|
31 |
-
|
32 |
-
#----------------------------------------------------------------------------
|
33 |
-
|
34 |
-
def _should_use_custom_op():
|
35 |
-
if not enabled:
|
36 |
-
return False
|
37 |
-
if any(torch.__version__.startswith(x) for x in ['1.7.', '1.8.', '1.9']):
|
38 |
-
return True
|
39 |
-
warnings.warn(f'grid_sample_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.grid_sample().')
|
40 |
-
return False
|
41 |
-
|
42 |
-
#----------------------------------------------------------------------------
|
43 |
-
|
44 |
-
class _GridSample2dForward(torch.autograd.Function):
|
45 |
-
@staticmethod
|
46 |
-
def forward(ctx, input, grid):
|
47 |
-
assert input.ndim == 4
|
48 |
-
assert grid.ndim == 4
|
49 |
-
output = torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
|
50 |
-
ctx.save_for_backward(input, grid)
|
51 |
-
return output
|
52 |
-
|
53 |
-
@staticmethod
|
54 |
-
def backward(ctx, grad_output):
|
55 |
-
input, grid = ctx.saved_tensors
|
56 |
-
grad_input, grad_grid = _GridSample2dBackward.apply(grad_output, input, grid)
|
57 |
-
return grad_input, grad_grid
|
58 |
-
|
59 |
-
#----------------------------------------------------------------------------
|
60 |
-
|
61 |
-
class _GridSample2dBackward(torch.autograd.Function):
|
62 |
-
@staticmethod
|
63 |
-
def forward(ctx, grad_output, input, grid):
|
64 |
-
op = torch._C._jit_get_operation('aten::grid_sampler_2d_backward')
|
65 |
-
grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False)
|
66 |
-
ctx.save_for_backward(grid)
|
67 |
-
return grad_input, grad_grid
|
68 |
-
|
69 |
-
@staticmethod
|
70 |
-
def backward(ctx, grad2_grad_input, grad2_grad_grid):
|
71 |
-
_ = grad2_grad_grid # unused
|
72 |
-
grid, = ctx.saved_tensors
|
73 |
-
grad2_grad_output = None
|
74 |
-
grad2_input = None
|
75 |
-
grad2_grid = None
|
76 |
-
|
77 |
-
if ctx.needs_input_grad[0]:
|
78 |
-
grad2_grad_output = _GridSample2dForward.apply(grad2_grad_input, grid)
|
79 |
-
|
80 |
-
assert not ctx.needs_input_grad[2]
|
81 |
-
return grad2_grad_output, grad2_input, grad2_grid
|
82 |
-
|
83 |
-
#----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/training/dreambooth.md
DELETED
@@ -1,475 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# DreamBooth
|
14 |
-
|
15 |
-
[DreamBooth](https://arxiv.org/abs/2208.12242)는 한 주제에 대한 적은 이미지(3~5개)만으로도 stable diffusion과 같이 text-to-image 모델을 개인화할 수 있는 방법입니다. 이를 통해 모델은 다양한 장면, 포즈 및 장면(뷰)에서 피사체에 대해 맥락화(contextualized)된 이미지를 생성할 수 있습니다.
|
16 |
-
|
17 |
-

|
18 |
-
<a href="https://dreambooth.github.io">project's blog.</a></small>
|
19 |
-
<small><a href="https://dreambooth.github.io">프로젝트 블로그</a>에서의 Dreambooth 예시</small>
|
20 |
-
|
21 |
-
|
22 |
-
이 가이드는 다양한 GPU, Flax 사양에 대해 [`CompVis/stable-diffusion-v1-4`](https://huggingface.co/CompVis/stable-diffusion-v1-4) 모델로 DreamBooth를 파인튜닝하는 방법을 보여줍니다. 더 깊이 파고들어 작동 방식을 확인하는 데 관심이 있는 경우, 이 가이드에 사용된 DreamBooth의 모든 학습 스크립트를 [여기](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth)에서 찾을 수 있습니다.
|
23 |
-
|
24 |
-
스크립트를 실행하기 전에 라이브러리의 학습에 필요한 dependencies를 설치해야 합니다. 또한 `main` GitHub 브랜치에서 🧨 Diffusers를 설치하는 것이 좋습니다.
|
25 |
-
|
26 |
-
```bash
|
27 |
-
pip install git+https://github.com/huggingface/diffusers
|
28 |
-
pip install -U -r diffusers/examples/dreambooth/requirements.txt
|
29 |
-
```
|
30 |
-
|
31 |
-
xFormers는 학습에 필요한 요구 사항은 아니지만, 가능하면 [설치](../optimization/xformers)하는 것이 좋습니다. 학습 속도를 높이고 메모리 사용량을 줄일 수 있기 때문입니다.
|
32 |
-
|
33 |
-
모든 dependencies을 설정한 후 다음을 사용하여 [🤗 Accelerate](https://github.com/huggingface/accelerate/) 환경을 다음과 같이 초기화합니다:
|
34 |
-
|
35 |
-
```bash
|
36 |
-
accelerate config
|
37 |
-
```
|
38 |
-
|
39 |
-
별도 설정 없이 기본 🤗 Accelerate 환경을 설치하려면 다음을 실행합니다:
|
40 |
-
|
41 |
-
```bash
|
42 |
-
accelerate config default
|
43 |
-
```
|
44 |
-
|
45 |
-
또는 현재 환경이 노트북과 같은 대화형 셸을 지원하지 않는 경우 다음을 사용할 수 있습니다:
|
46 |
-
|
47 |
-
```py
|
48 |
-
from accelerate.utils import write_basic_config
|
49 |
-
|
50 |
-
write_basic_config()
|
51 |
-
```
|
52 |
-
|
53 |
-
## 파인튜닝
|
54 |
-
|
55 |
-
<Tip warning={true}>
|
56 |
-
|
57 |
-
DreamBooth 파인튜닝은 하이퍼파라미터에 매우 민감하고 과적합되기 쉽습니다. 적절한 하이퍼파라미터를 선택하는 데 도움이 되도록 다양한 권장 설정이 포함된 [심층 분석](https://huggingface.co/blog/dreambooth)을 살펴보는 것이 좋습니다.
|
58 |
-
|
59 |
-
</Tip>
|
60 |
-
|
61 |
-
<frameworkcontent>
|
62 |
-
<pt>
|
63 |
-
[몇 장의 강아지 이미지들](https://drive.google.com/drive/folders/1BO_dyz-p65qhBRRMRA4TbZ8qW4rB99JZ)로 DreamBooth를 시도해봅시다.
|
64 |
-
이를 다운로드해 디렉터리에 저장한 다음 `INSTANCE_DIR` 환경 변수를 해당 경로로 설정합니다:
|
65 |
-
|
66 |
-
|
67 |
-
```bash
|
68 |
-
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
|
69 |
-
export INSTANCE_DIR="path_to_training_images"
|
70 |
-
export OUTPUT_DIR="path_to_saved_model"
|
71 |
-
```
|
72 |
-
|
73 |
-
그런 다음, 다음 명령을 사용하여 학습 스크립트를 실행할 수 있습니다 (전체 학습 스크립트는 [여기](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py)에서 찾을 수 있습니다):
|
74 |
-
|
75 |
-
```bash
|
76 |
-
accelerate launch train_dreambooth.py \
|
77 |
-
--pretrained_model_name_or_path=$MODEL_NAME \
|
78 |
-
--instance_data_dir=$INSTANCE_DIR \
|
79 |
-
--output_dir=$OUTPUT_DIR \
|
80 |
-
--instance_prompt="a photo of sks dog" \
|
81 |
-
--resolution=512 \
|
82 |
-
--train_batch_size=1 \
|
83 |
-
--gradient_accumulation_steps=1 \
|
84 |
-
--learning_rate=5e-6 \
|
85 |
-
--lr_scheduler="constant" \
|
86 |
-
--lr_warmup_steps=0 \
|
87 |
-
--max_train_steps=400
|
88 |
-
```
|
89 |
-
</pt>
|
90 |
-
<jax>
|
91 |
-
|
92 |
-
TPU에 액세스할 수 있거나 더 빠르게 훈련하고 싶다면 [Flax 학습 스크립트](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_flax.py)를 사용해 볼 수 있습니다. Flax 학습 스크립트는 gradient checkpointing 또는 gradient accumulation을 지원하지 않으므로, 메모리가 30GB 이상인 GPU가 필요합니다.
|
93 |
-
|
94 |
-
스크립트를 실행하기 전에 요구 사항이 설치되어 있는지 확인하십시오.
|
95 |
-
|
96 |
-
```bash
|
97 |
-
pip install -U -r requirements.txt
|
98 |
-
```
|
99 |
-
|
100 |
-
그러면 다음 명령어로 학습 스크립트를 실행시킬 수 있습니다:
|
101 |
-
|
102 |
-
```bash
|
103 |
-
export MODEL_NAME="duongna/stable-diffusion-v1-4-flax"
|
104 |
-
export INSTANCE_DIR="path-to-instance-images"
|
105 |
-
export OUTPUT_DIR="path-to-save-model"
|
106 |
-
|
107 |
-
python train_dreambooth_flax.py \
|
108 |
-
--pretrained_model_name_or_path=$MODEL_NAME \
|
109 |
-
--instance_data_dir=$INSTANCE_DIR \
|
110 |
-
--output_dir=$OUTPUT_DIR \
|
111 |
-
--instance_prompt="a photo of sks dog" \
|
112 |
-
--resolution=512 \
|
113 |
-
--train_batch_size=1 \
|
114 |
-
--learning_rate=5e-6 \
|
115 |
-
--max_train_steps=400
|
116 |
-
```
|
117 |
-
</jax>
|
118 |
-
</frameworkcontent>
|
119 |
-
|
120 |
-
### Prior-preserving(사전 보존) loss를 사용한 파인튜닝
|
121 |
-
|
122 |
-
과적합과 language drift를 방지하기 위해 사전 보존이 사용됩니다(관심이 있는 경우 [논문](https://arxiv.org/abs/2208.12242)을 참조하세요). 사전 보존을 위해 동일한 클래스의 다른 이미지를 학습 프로세스의 일부로 사용합니다. 좋은 점은 Stable Diffusion 모델 자체를 사용하여 이러한 이미지를 생성할 수 있다는 것입니다! 학습 스크립트는 생성된 이미지를 우리가 지정한 로컬 경로에 저장합니다.
|
123 |
-
|
124 |
-
저자들에 따르면 사전 보존을 위해 `num_epochs * num_samples`개의 이미지를 생성하는 것이 좋습니다. 200-300개에서 대부분 잘 작동합니다.
|
125 |
-
|
126 |
-
<frameworkcontent>
|
127 |
-
<pt>
|
128 |
-
```bash
|
129 |
-
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
|
130 |
-
export INSTANCE_DIR="path_to_training_images"
|
131 |
-
export CLASS_DIR="path_to_class_images"
|
132 |
-
export OUTPUT_DIR="path_to_saved_model"
|
133 |
-
|
134 |
-
accelerate launch train_dreambooth.py \
|
135 |
-
--pretrained_model_name_or_path=$MODEL_NAME \
|
136 |
-
--instance_data_dir=$INSTANCE_DIR \
|
137 |
-
--class_data_dir=$CLASS_DIR \
|
138 |
-
--output_dir=$OUTPUT_DIR \
|
139 |
-
--with_prior_preservation --prior_loss_weight=1.0 \
|
140 |
-
--instance_prompt="a photo of sks dog" \
|
141 |
-
--class_prompt="a photo of dog" \
|
142 |
-
--resolution=512 \
|
143 |
-
--train_batch_size=1 \
|
144 |
-
--gradient_accumulation_steps=1 \
|
145 |
-
--learning_rate=5e-6 \
|
146 |
-
--lr_scheduler="constant" \
|
147 |
-
--lr_warmup_steps=0 \
|
148 |
-
--num_class_images=200 \
|
149 |
-
--max_train_steps=800
|
150 |
-
```
|
151 |
-
</pt>
|
152 |
-
<jax>
|
153 |
-
```bash
|
154 |
-
export MODEL_NAME="duongna/stable-diffusion-v1-4-flax"
|
155 |
-
export INSTANCE_DIR="path-to-instance-images"
|
156 |
-
export CLASS_DIR="path-to-class-images"
|
157 |
-
export OUTPUT_DIR="path-to-save-model"
|
158 |
-
|
159 |
-
python train_dreambooth_flax.py \
|
160 |
-
--pretrained_model_name_or_path=$MODEL_NAME \
|
161 |
-
--instance_data_dir=$INSTANCE_DIR \
|
162 |
-
--class_data_dir=$CLASS_DIR \
|
163 |
-
--output_dir=$OUTPUT_DIR \
|
164 |
-
--with_prior_preservation --prior_loss_weight=1.0 \
|
165 |
-
--instance_prompt="a photo of sks dog" \
|
166 |
-
--class_prompt="a photo of dog" \
|
167 |
-
--resolution=512 \
|
168 |
-
--train_batch_size=1 \
|
169 |
-
--learning_rate=5e-6 \
|
170 |
-
--num_class_images=200 \
|
171 |
-
--max_train_steps=800
|
172 |
-
```
|
173 |
-
</jax>
|
174 |
-
</frameworkcontent>
|
175 |
-
|
176 |
-
## 텍스트 인코더와 and UNet로 파인튜닝하기
|
177 |
-
|
178 |
-
해당 스크립트를 사용하면 `unet`과 함께 `text_encoder`를 파인튜닝할 수 있습니다. 실험에서(자세한 내용은 [🧨 Diffusers를 사용해 DreamBooth로 Stable Diffusion 학습하기](https://huggingface.co/blog/dreambooth) 게시물을 확인하세요), 특히 얼굴 이미지를 생성할 때 훨씬 더 나은 결과를 얻을 수 있습니다.
|
179 |
-
|
180 |
-
<Tip warning={true}>
|
181 |
-
|
182 |
-
텍스트 인코더를 학습시키려면 추가 메모리가 필요해 16GB GPU로는 동작하지 않습니다. 이 옵션을 사용하려면 최소 24GB VRAM이 필요합니다.
|
183 |
-
|
184 |
-
</Tip>
|
185 |
-
|
186 |
-
`--train_text_encoder` 인수를 학습 스크립트에 전달하여 `text_encoder` 및 `unet`을 파인튜닝할 수 있습니다:
|
187 |
-
|
188 |
-
<frameworkcontent>
|
189 |
-
<pt>
|
190 |
-
```bash
|
191 |
-
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
|
192 |
-
export INSTANCE_DIR="path_to_training_images"
|
193 |
-
export CLASS_DIR="path_to_class_images"
|
194 |
-
export OUTPUT_DIR="path_to_saved_model"
|
195 |
-
|
196 |
-
accelerate launch train_dreambooth.py \
|
197 |
-
--pretrained_model_name_or_path=$MODEL_NAME \
|
198 |
-
--train_text_encoder \
|
199 |
-
--instance_data_dir=$INSTANCE_DIR \
|
200 |
-
--class_data_dir=$CLASS_DIR \
|
201 |
-
--output_dir=$OUTPUT_DIR \
|
202 |
-
--with_prior_preservation --prior_loss_weight=1.0 \
|
203 |
-
--instance_prompt="a photo of sks dog" \
|
204 |
-
--class_prompt="a photo of dog" \
|
205 |
-
--resolution=512 \
|
206 |
-
--train_batch_size=1 \
|
207 |
-
--use_8bit_adam
|
208 |
-
--gradient_checkpointing \
|
209 |
-
--learning_rate=2e-6 \
|
210 |
-
--lr_scheduler="constant" \
|
211 |
-
--lr_warmup_steps=0 \
|
212 |
-
--num_class_images=200 \
|
213 |
-
--max_train_steps=800
|
214 |
-
```
|
215 |
-
</pt>
|
216 |
-
<jax>
|
217 |
-
```bash
|
218 |
-
export MODEL_NAME="duongna/stable-diffusion-v1-4-flax"
|
219 |
-
export INSTANCE_DIR="path-to-instance-images"
|
220 |
-
export CLASS_DIR="path-to-class-images"
|
221 |
-
export OUTPUT_DIR="path-to-save-model"
|
222 |
-
|
223 |
-
python train_dreambooth_flax.py \
|
224 |
-
--pretrained_model_name_or_path=$MODEL_NAME \
|
225 |
-
--train_text_encoder \
|
226 |
-
--instance_data_dir=$INSTANCE_DIR \
|
227 |
-
--class_data_dir=$CLASS_DIR \
|
228 |
-
--output_dir=$OUTPUT_DIR \
|
229 |
-
--with_prior_preservation --prior_loss_weight=1.0 \
|
230 |
-
--instance_prompt="a photo of sks dog" \
|
231 |
-
--class_prompt="a photo of dog" \
|
232 |
-
--resolution=512 \
|
233 |
-
--train_batch_size=1 \
|
234 |
-
--learning_rate=2e-6 \
|
235 |
-
--num_class_images=200 \
|
236 |
-
--max_train_steps=800
|
237 |
-
```
|
238 |
-
</jax>
|
239 |
-
</frameworkcontent>
|
240 |
-
|
241 |
-
## LoRA로 파인튜닝하기
|
242 |
-
|
243 |
-
DreamBooth에서 대규모 모델의 학습을 가속화하기 위한 파인튜닝 기술인 LoRA(Low-Rank Adaptation of Large Language Models)를 사용할 수 있습니다. 자세한 내용은 [LoRA 학습](training/lora#dreambooth) 가이드를 참조하세요.
|
244 |
-
|
245 |
-
### 학습 중 체크포인트 저장하기
|
246 |
-
|
247 |
-
Dreambooth로 훈련하는 동안 과적합하기 쉬우므로, 때때로 학습 중에 정기적인 체크포인트를 저장하는 것이 유용합니다. 중간 체크포인트 중 하나가 최종 모델보다 더 잘 작동할 수 있습니다! 체크포인트 저장 기능을 활성화하려면 학습 스크립트에 다음 인수를 전달해야 합니다:
|
248 |
-
|
249 |
-
```bash
|
250 |
-
--checkpointing_steps=500
|
251 |
-
```
|
252 |
-
|
253 |
-
이렇게 하면 `output_dir`의 하위 폴더에 전체 학습 상태가 저장됩니다. 하위 폴더 이름은 접두사 `checkpoint-`로 시작하고 지금까지 수행된 step 수입니다. 예시로 `checkpoint-1500`은 1500 학습 step 후에 저장된 체크포인트입니다.
|
254 |
-
|
255 |
-
#### 저장된 체크포인트에서 훈련 재개하기
|
256 |
-
|
257 |
-
저장된 체크포인트에서 훈련을 재개하려면, `--resume_from_checkpoint` 인수를 전달한 다음 사용할 체크포인트의 이름을 지정하면 됩니다. 특수 문자열 `"latest"`를 사용하여 저장된 마지막 체크포인트(즉, step 수가 가장 많은 체크포인트)에서 재개할 수도 있습니다. 예를 들어 다음은 1500 step 후에 저장된 체크포인트에서부터 학습을 재개합니다:
|
258 |
-
|
259 |
-
```bash
|
260 |
-
--resume_from_checkpoint="checkpoint-1500"
|
261 |
-
```
|
262 |
-
|
263 |
-
원하는 경우 일부 하이퍼파라미터를 조정할 수 있습니다.
|
264 |
-
|
265 |
-
#### 저장된 체크포인트를 사용하여 추론 수행하기
|
266 |
-
|
267 |
-
저장된 체크포인트는 훈련 재개에 적합한 형식으로 저장됩니다. 여기에는 모델 가중치뿐만 아니라 옵티마이저, 데이터 로더 및 학습률의 상태도 포함됩니다.
|
268 |
-
|
269 |
-
**`"accelerate>=0.16.0"`**이 설치된 경우 다음 코드를 사용하여 중간 체크포인트에서 추론을 실행합니다.
|
270 |
-
|
271 |
-
```python
|
272 |
-
from diffusers import DiffusionPipeline, UNet2DConditionModel
|
273 |
-
from transformers import CLIPTextModel
|
274 |
-
import torch
|
275 |
-
|
276 |
-
# 학습에 사용된 것과 동일한 인수(model, revision)로 파이프라인을 불러옵니다.
|
277 |
-
model_id = "CompVis/stable-diffusion-v1-4"
|
278 |
-
|
279 |
-
unet = UNet2DConditionModel.from_pretrained("/sddata/dreambooth/daruma-v2-1/checkpoint-100/unet")
|
280 |
-
|
281 |
-
# `args.train_text_encoder`로 학습한 경우면 텍스트 인코더를 꼭 불러오세요
|
282 |
-
text_encoder = CLIPTextModel.from_pretrained("/sddata/dreambooth/daruma-v2-1/checkpoint-100/text_encoder")
|
283 |
-
|
284 |
-
pipeline = DiffusionPipeline.from_pretrained(model_id, unet=unet, text_encoder=text_encoder, dtype=torch.float16)
|
285 |
-
pipeline.to("cuda")
|
286 |
-
|
287 |
-
# 추론을 수행하거나 저장하거나, 허브에 푸시합니다.
|
288 |
-
pipeline.save_pretrained("dreambooth-pipeline")
|
289 |
-
```
|
290 |
-
|
291 |
-
If you have **`"accelerate<0.16.0"`** installed, you need to convert it to an inference pipeline first:
|
292 |
-
|
293 |
-
```python
|
294 |
-
from accelerate import Accelerator
|
295 |
-
from diffusers import DiffusionPipeline
|
296 |
-
|
297 |
-
# 학습에 사용된 것과 동일한 인수(model, revision)로 파이프라인을 불러옵니다.
|
298 |
-
model_id = "CompVis/stable-diffusion-v1-4"
|
299 |
-
pipeline = DiffusionPipeline.from_pretrained(model_id)
|
300 |
-
|
301 |
-
accelerator = Accelerator()
|
302 |
-
|
303 |
-
# 초기 학습에 `--train_text_encoder`가 사용된 경우 text_encoder를 사용합니다.
|
304 |
-
unet, text_encoder = accelerator.prepare(pipeline.unet, pipeline.text_encoder)
|
305 |
-
|
306 |
-
# 체크포인트 경로로부터 상태를 복원합니다. 여기서는 절대 경로를 사용해야 합니다.
|
307 |
-
accelerator.load_state("/sddata/dreambooth/daruma-v2-1/checkpoint-100")
|
308 |
-
|
309 |
-
# unwrapped 모델로 파이프라인을 다시 빌드합니다.(.unet and .text_encoder로의 할당도 작동해야 합니다)
|
310 |
-
pipeline = DiffusionPipeline.from_pretrained(
|
311 |
-
model_id,
|
312 |
-
unet=accelerator.unwrap_model(unet),
|
313 |
-
text_encoder=accelerator.unwrap_model(text_encoder),
|
314 |
-
)
|
315 |
-
|
316 |
-
# 추론을 수행하거나 저장하거나, 허브에 푸시합니다.
|
317 |
-
pipeline.save_pretrained("dreambooth-pipeline")
|
318 |
-
```
|
319 |
-
|
320 |
-
## 각 GPU 용량에서의 최적화
|
321 |
-
|
322 |
-
하드웨어에 따라 16GB에서 8GB까지 GPU에서 DreamBooth를 최적화하는 몇 가지 방법이 있습니다!
|
323 |
-
|
324 |
-
### xFormers
|
325 |
-
|
326 |
-
[xFormers](https://github.com/facebookresearch/xformers)는 Transformers를 최적화하기 위한 toolbox이며, 🧨 Diffusers에서 사용되는[memory-efficient attention](https://facebookresearch.github.io/xformers/components/ops.html#module-xformers.ops) 메커니즘을 포함하고 있습니다. [xFormers를 설치](./optimization/xformers)한 다음 학습 스크립트에 다음 인수를 추가합니다:
|
327 |
-
|
328 |
-
```bash
|
329 |
-
--enable_xformers_memory_efficient_attention
|
330 |
-
```
|
331 |
-
|
332 |
-
xFormers는 Flax에서 사용할 수 없습니다.
|
333 |
-
|
334 |
-
### 그래디언트 없음으로 설정
|
335 |
-
|
336 |
-
메모리 사용량을 줄일 수 있는 또 다른 방법은 [기울기 설정](https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html)을 0 대신 `None`으로 하는 것입니다. 그러나 이로 인해 특정 동작이 변경될 수 있으므로 문제가 발생하면 �� 인수를 제거해 보십시오. 학습 스크립트에 다음 인수를 추가하여 그래디언트를 `None`으로 설정합니다.
|
337 |
-
|
338 |
-
```bash
|
339 |
-
--set_grads_to_none
|
340 |
-
```
|
341 |
-
|
342 |
-
### 16GB GPU
|
343 |
-
|
344 |
-
Gradient checkpointing과 [bitsandbytes](https://github.com/TimDettmers/bitsandbytes)의 8비트 옵티마이저의 도움으로, 16GB GPU에서 dreambooth를 훈련할 수 있습니다. bitsandbytes가 설치되어 있는지 확인하세요:
|
345 |
-
|
346 |
-
```bash
|
347 |
-
pip install bitsandbytes
|
348 |
-
```
|
349 |
-
|
350 |
-
그 다음, 학습 스크립트에 `--use_8bit_adam` 옵션을 명시합니다:
|
351 |
-
|
352 |
-
```bash
|
353 |
-
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
|
354 |
-
export INSTANCE_DIR="path_to_training_images"
|
355 |
-
export CLASS_DIR="path_to_class_images"
|
356 |
-
export OUTPUT_DIR="path_to_saved_model"
|
357 |
-
|
358 |
-
accelerate launch train_dreambooth.py \
|
359 |
-
--pretrained_model_name_or_path=$MODEL_NAME \
|
360 |
-
--instance_data_dir=$INSTANCE_DIR \
|
361 |
-
--class_data_dir=$CLASS_DIR \
|
362 |
-
--output_dir=$OUTPUT_DIR \
|
363 |
-
--with_prior_preservation --prior_loss_weight=1.0 \
|
364 |
-
--instance_prompt="a photo of sks dog" \
|
365 |
-
--class_prompt="a photo of dog" \
|
366 |
-
--resolution=512 \
|
367 |
-
--train_batch_size=1 \
|
368 |
-
--gradient_accumulation_steps=2 --gradient_checkpointing \
|
369 |
-
--use_8bit_adam \
|
370 |
-
--learning_rate=5e-6 \
|
371 |
-
--lr_scheduler="constant" \
|
372 |
-
--lr_warmup_steps=0 \
|
373 |
-
--num_class_images=200 \
|
374 |
-
--max_train_steps=800
|
375 |
-
```
|
376 |
-
|
377 |
-
### 12GB GPU
|
378 |
-
|
379 |
-
12GB GPU에서 DreamBooth를 실행하려면 gradient checkpointing, 8비트 옵티마이저, xFormers를 활성화하고 그래디언트를 `None`으로 설정해야 합니다.
|
380 |
-
|
381 |
-
```bash
|
382 |
-
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
|
383 |
-
export INSTANCE_DIR="path-to-instance-images"
|
384 |
-
export CLASS_DIR="path-to-class-images"
|
385 |
-
export OUTPUT_DIR="path-to-save-model"
|
386 |
-
|
387 |
-
accelerate launch train_dreambooth.py \
|
388 |
-
--pretrained_model_name_or_path=$MODEL_NAME \
|
389 |
-
--instance_data_dir=$INSTANCE_DIR \
|
390 |
-
--class_data_dir=$CLASS_DIR \
|
391 |
-
--output_dir=$OUTPUT_DIR \
|
392 |
-
--with_prior_preservation --prior_loss_weight=1.0 \
|
393 |
-
--instance_prompt="a photo of sks dog" \
|
394 |
-
--class_prompt="a photo of dog" \
|
395 |
-
--resolution=512 \
|
396 |
-
--train_batch_size=1 \
|
397 |
-
--gradient_accumulation_steps=1 --gradient_checkpointing \
|
398 |
-
--use_8bit_adam \
|
399 |
-
--enable_xformers_memory_efficient_attention \
|
400 |
-
--set_grads_to_none \
|
401 |
-
--learning_rate=2e-6 \
|
402 |
-
--lr_scheduler="constant" \
|
403 |
-
--lr_warmup_steps=0 \
|
404 |
-
--num_class_images=200 \
|
405 |
-
--max_train_steps=800
|
406 |
-
```
|
407 |
-
|
408 |
-
### 8GB GPU에서 학습하기
|
409 |
-
|
410 |
-
8GB GPU에 대해서는 [DeepSpeed](https://www.deepspeed.ai/)를 사용해 일부 텐서를 VRAM에서 CPU 또는 NVME로 오프로드하여 더 적은 GPU 메모리로 학습할 수도 있습니다.
|
411 |
-
|
412 |
-
🤗 Accelerate 환경을 구성하려면 다음 명령을 실행하세요:
|
413 |
-
|
414 |
-
```bash
|
415 |
-
accelerate config
|
416 |
-
```
|
417 |
-
|
418 |
-
환경 구성 중에 DeepSpeed를 사용할 것을 확인하세요.
|
419 |
-
그러면 DeepSpeed stage 2, fp16 혼합 정밀도를 결합하고 모델 매개변수와 옵티마이저 상태를 모두 CPU로 오프로드하면 8GB VRAM 미만에서 학습할 수 있습니다.
|
420 |
-
단점은 더 많은 시스템 RAM(약 25GB)이 필요하다는 것입니다. 추가 구성 옵션은 [DeepSpeed 문서](https://huggingface.co/docs/accelerate/usage_guides/deepspeed)를 참조하세요.
|
421 |
-
|
422 |
-
또한 기본 Adam 옵티마이저를 DeepSpeed의 최적화된 Adam 버전으로 변경해야 합니다.
|
423 |
-
이는 상당한 속도 향상을 위한 Adam인 [`deepspeed.ops.adam.DeepSpeedCPUAdam`](https://deepspeed.readthedocs.io/en/latest/optimizers.html#adam-cpu)입니다.
|
424 |
-
`DeepSpeedCPUAdam`을 활성화하려면 시스템의 CUDA toolchain 버전이 PyTorch와 함께 설치된 것과 동일해야 합니다.
|
425 |
-
|
426 |
-
8비트 옵티마이저는 현재 DeepSpeed와 호환되지 않는 것 같습니다.
|
427 |
-
|
428 |
-
다음 명령으로 학습을 시작합니다:
|
429 |
-
|
430 |
-
```bash
|
431 |
-
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
|
432 |
-
export INSTANCE_DIR="path_to_training_images"
|
433 |
-
export CLASS_DIR="path_to_class_images"
|
434 |
-
export OUTPUT_DIR="path_to_saved_model"
|
435 |
-
|
436 |
-
accelerate launch train_dreambooth.py \
|
437 |
-
--pretrained_model_name_or_path=$MODEL_NAME \
|
438 |
-
--instance_data_dir=$INSTANCE_DIR \
|
439 |
-
--class_data_dir=$CLASS_DIR \
|
440 |
-
--output_dir=$OUTPUT_DIR \
|
441 |
-
--with_prior_preservation --prior_loss_weight=1.0 \
|
442 |
-
--instance_prompt="a photo of sks dog" \
|
443 |
-
--class_prompt="a photo of dog" \
|
444 |
-
--resolution=512 \
|
445 |
-
--train_batch_size=1 \
|
446 |
-
--sample_batch_size=1 \
|
447 |
-
--gradient_accumulation_steps=1 --gradient_checkpointing \
|
448 |
-
--learning_rate=5e-6 \
|
449 |
-
--lr_scheduler="constant" \
|
450 |
-
--lr_warmup_steps=0 \
|
451 |
-
--num_class_images=200 \
|
452 |
-
--max_train_steps=800 \
|
453 |
-
--mixed_precision=fp16
|
454 |
-
```
|
455 |
-
|
456 |
-
## 추론
|
457 |
-
|
458 |
-
모델을 학습한 후에는, 모델이 저장된 경로를 지정해 [`StableDiffusionPipeline`]로 추론을 수행할 수 있습니다. 프롬프트에 학습에 사용된 특수 `식별자`(이전 예시의 `sks`)가 포함되어 있는지 확인하세요.
|
459 |
-
|
460 |
-
**`"accelerate>=0.16.0"`**이 설치되어 있는 경우 다음 코드를 사용하여 중간 체크포인트에서 추론을 실행할 수 있습니다:
|
461 |
-
|
462 |
-
```python
|
463 |
-
from diffusers import StableDiffusionPipeline
|
464 |
-
import torch
|
465 |
-
|
466 |
-
model_id = "path_to_saved_model"
|
467 |
-
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
|
468 |
-
|
469 |
-
prompt = "A photo of sks dog in a bucket"
|
470 |
-
image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
|
471 |
-
|
472 |
-
image.save("dog-bucket.png")
|
473 |
-
```
|
474 |
-
|
475 |
-
[저장된 학습 체크포인트](#inference-from-a-saved-checkpoint)에서도 추론을 실행할 수도 있습니다.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/empirical_attention/README.md
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
# An Empirical Study of Spatial Attention Mechanisms in Deep Networks
|
2 |
-
|
3 |
-
## Introduction
|
4 |
-
|
5 |
-
[ALGORITHM]
|
6 |
-
|
7 |
-
```latex
|
8 |
-
@article{zhu2019empirical,
|
9 |
-
title={An Empirical Study of Spatial Attention Mechanisms in Deep Networks},
|
10 |
-
author={Zhu, Xizhou and Cheng, Dazhi and Zhang, Zheng and Lin, Stephen and Dai, Jifeng},
|
11 |
-
journal={arXiv preprint arXiv:1904.05873},
|
12 |
-
year={2019}
|
13 |
-
}
|
14 |
-
```
|
15 |
-
|
16 |
-
## Results and Models
|
17 |
-
|
18 |
-
| Backbone | Attention Component | DCN | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download |
|
19 |
-
|:---------:|:-------------------:|:----:|:-------:|:--------:|:--------------:|:------:|:------:|:--------:|
|
20 |
-
| R-50 | 1111 | N | 1x | 8.0 | 13.8 | 40.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco/faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco/faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130_210344.log.json) |
|
21 |
-
| R-50 | 0010 | N | 1x | 4.2 | 18.4 | 39.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco/faster_rcnn_r50_fpn_attention_0010_1x_coco_20200130-7cb0c14d.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco/faster_rcnn_r50_fpn_attention_0010_1x_coco_20200130_210125.log.json) |
|
22 |
-
| R-50 | 1111 | Y | 1x | 8.0 | 12.7 | 42.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco_20200130-8b2523a6.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco_20200130_204442.log.json) |
|
23 |
-
| R-50 | 0010 | Y | 1x | 4.2 | 17.1 | 42.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco_20200130-1a2e831d.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco_20200130_210410.log.json) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = './ga_faster_r50_caffe_fpn_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://detectron2/resnet101_caffe',
|
4 |
-
backbone=dict(depth=101))
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/base.py
DELETED
@@ -1,355 +0,0 @@
|
|
1 |
-
from abc import ABCMeta, abstractmethod
|
2 |
-
from collections import OrderedDict
|
3 |
-
|
4 |
-
import mmcv
|
5 |
-
import numpy as np
|
6 |
-
import torch
|
7 |
-
import torch.distributed as dist
|
8 |
-
import torch.nn as nn
|
9 |
-
from mmcv.runner import auto_fp16
|
10 |
-
from mmcv.utils import print_log
|
11 |
-
|
12 |
-
from mmdet.core.visualization import imshow_det_bboxes
|
13 |
-
from mmdet.utils import get_root_logger
|
14 |
-
|
15 |
-
|
16 |
-
class BaseDetector(nn.Module, metaclass=ABCMeta):
|
17 |
-
"""Base class for detectors."""
|
18 |
-
|
19 |
-
def __init__(self):
|
20 |
-
super(BaseDetector, self).__init__()
|
21 |
-
self.fp16_enabled = False
|
22 |
-
|
23 |
-
@property
|
24 |
-
def with_neck(self):
|
25 |
-
"""bool: whether the detector has a neck"""
|
26 |
-
return hasattr(self, 'neck') and self.neck is not None
|
27 |
-
|
28 |
-
# TODO: these properties need to be carefully handled
|
29 |
-
# for both single stage & two stage detectors
|
30 |
-
@property
|
31 |
-
def with_shared_head(self):
|
32 |
-
"""bool: whether the detector has a shared head in the RoI Head"""
|
33 |
-
return hasattr(self, 'roi_head') and self.roi_head.with_shared_head
|
34 |
-
|
35 |
-
@property
|
36 |
-
def with_bbox(self):
|
37 |
-
"""bool: whether the detector has a bbox head"""
|
38 |
-
return ((hasattr(self, 'roi_head') and self.roi_head.with_bbox)
|
39 |
-
or (hasattr(self, 'bbox_head') and self.bbox_head is not None))
|
40 |
-
|
41 |
-
@property
|
42 |
-
def with_mask(self):
|
43 |
-
"""bool: whether the detector has a mask head"""
|
44 |
-
return ((hasattr(self, 'roi_head') and self.roi_head.with_mask)
|
45 |
-
or (hasattr(self, 'mask_head') and self.mask_head is not None))
|
46 |
-
|
47 |
-
@abstractmethod
|
48 |
-
def extract_feat(self, imgs):
|
49 |
-
"""Extract features from images."""
|
50 |
-
pass
|
51 |
-
|
52 |
-
def extract_feats(self, imgs):
|
53 |
-
"""Extract features from multiple images.
|
54 |
-
|
55 |
-
Args:
|
56 |
-
imgs (list[torch.Tensor]): A list of images. The images are
|
57 |
-
augmented from the same image but in different ways.
|
58 |
-
|
59 |
-
Returns:
|
60 |
-
list[torch.Tensor]: Features of different images
|
61 |
-
"""
|
62 |
-
assert isinstance(imgs, list)
|
63 |
-
return [self.extract_feat(img) for img in imgs]
|
64 |
-
|
65 |
-
def forward_train(self, imgs, img_metas, **kwargs):
|
66 |
-
"""
|
67 |
-
Args:
|
68 |
-
img (list[Tensor]): List of tensors of shape (1, C, H, W).
|
69 |
-
Typically these should be mean centered and std scaled.
|
70 |
-
img_metas (list[dict]): List of image info dict where each dict
|
71 |
-
has: 'img_shape', 'scale_factor', 'flip', and may also contain
|
72 |
-
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
|
73 |
-
For details on the values of these keys, see
|
74 |
-
:class:`mmdet.datasets.pipelines.Collect`.
|
75 |
-
kwargs (keyword arguments): Specific to concrete implementation.
|
76 |
-
"""
|
77 |
-
# NOTE the batched image size information may be useful, e.g.
|
78 |
-
# in DETR, this is needed for the construction of masks, which is
|
79 |
-
# then used for the transformer_head.
|
80 |
-
batch_input_shape = tuple(imgs[0].size()[-2:])
|
81 |
-
for img_meta in img_metas:
|
82 |
-
img_meta['batch_input_shape'] = batch_input_shape
|
83 |
-
|
84 |
-
async def async_simple_test(self, img, img_metas, **kwargs):
|
85 |
-
raise NotImplementedError
|
86 |
-
|
87 |
-
@abstractmethod
|
88 |
-
def simple_test(self, img, img_metas, **kwargs):
|
89 |
-
pass
|
90 |
-
|
91 |
-
@abstractmethod
|
92 |
-
def aug_test(self, imgs, img_metas, **kwargs):
|
93 |
-
"""Test function with test time augmentation."""
|
94 |
-
pass
|
95 |
-
|
96 |
-
def init_weights(self, pretrained=None):
|
97 |
-
"""Initialize the weights in detector.
|
98 |
-
|
99 |
-
Args:
|
100 |
-
pretrained (str, optional): Path to pre-trained weights.
|
101 |
-
Defaults to None.
|
102 |
-
"""
|
103 |
-
if pretrained is not None:
|
104 |
-
logger = get_root_logger()
|
105 |
-
print_log(f'load model from: {pretrained}', logger=logger)
|
106 |
-
|
107 |
-
async def aforward_test(self, *, img, img_metas, **kwargs):
|
108 |
-
for var, name in [(img, 'img'), (img_metas, 'img_metas')]:
|
109 |
-
if not isinstance(var, list):
|
110 |
-
raise TypeError(f'{name} must be a list, but got {type(var)}')
|
111 |
-
|
112 |
-
num_augs = len(img)
|
113 |
-
if num_augs != len(img_metas):
|
114 |
-
raise ValueError(f'num of augmentations ({len(img)}) '
|
115 |
-
f'!= num of image metas ({len(img_metas)})')
|
116 |
-
# TODO: remove the restriction of samples_per_gpu == 1 when prepared
|
117 |
-
samples_per_gpu = img[0].size(0)
|
118 |
-
assert samples_per_gpu == 1
|
119 |
-
|
120 |
-
if num_augs == 1:
|
121 |
-
return await self.async_simple_test(img[0], img_metas[0], **kwargs)
|
122 |
-
else:
|
123 |
-
raise NotImplementedError
|
124 |
-
|
125 |
-
def forward_test(self, imgs, img_metas, **kwargs):
|
126 |
-
"""
|
127 |
-
Args:
|
128 |
-
imgs (List[Tensor]): the outer list indicates test-time
|
129 |
-
augmentations and inner Tensor should have a shape NxCxHxW,
|
130 |
-
which contains all images in the batch.
|
131 |
-
img_metas (List[List[dict]]): the outer list indicates test-time
|
132 |
-
augs (multiscale, flip, etc.) and the inner list indicates
|
133 |
-
images in a batch.
|
134 |
-
"""
|
135 |
-
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
|
136 |
-
if not isinstance(var, list):
|
137 |
-
raise TypeError(f'{name} must be a list, but got {type(var)}')
|
138 |
-
|
139 |
-
num_augs = len(imgs)
|
140 |
-
if num_augs != len(img_metas):
|
141 |
-
raise ValueError(f'num of augmentations ({len(imgs)}) '
|
142 |
-
f'!= num of image meta ({len(img_metas)})')
|
143 |
-
|
144 |
-
# NOTE the batched image size information may be useful, e.g.
|
145 |
-
# in DETR, this is needed for the construction of masks, which is
|
146 |
-
# then used for the transformer_head.
|
147 |
-
for img, img_meta in zip(imgs, img_metas):
|
148 |
-
batch_size = len(img_meta)
|
149 |
-
for img_id in range(batch_size):
|
150 |
-
img_meta[img_id]['batch_input_shape'] = tuple(img.size()[-2:])
|
151 |
-
|
152 |
-
if num_augs == 1:
|
153 |
-
# proposals (List[List[Tensor]]): the outer list indicates
|
154 |
-
# test-time augs (multiscale, flip, etc.) and the inner list
|
155 |
-
# indicates images in a batch.
|
156 |
-
# The Tensor should have a shape Px4, where P is the number of
|
157 |
-
# proposals.
|
158 |
-
if 'proposals' in kwargs:
|
159 |
-
kwargs['proposals'] = kwargs['proposals'][0]
|
160 |
-
return self.simple_test(imgs[0], img_metas[0], **kwargs)
|
161 |
-
else:
|
162 |
-
assert imgs[0].size(0) == 1, 'aug test does not support ' \
|
163 |
-
'inference with batch size ' \
|
164 |
-
f'{imgs[0].size(0)}'
|
165 |
-
# TODO: support test augmentation for predefined proposals
|
166 |
-
assert 'proposals' not in kwargs
|
167 |
-
return self.aug_test(imgs, img_metas, **kwargs)
|
168 |
-
|
169 |
-
@auto_fp16(apply_to=('img', ))
|
170 |
-
def forward(self, img, img_metas, return_loss=True, **kwargs):
|
171 |
-
"""Calls either :func:`forward_train` or :func:`forward_test` depending
|
172 |
-
on whether ``return_loss`` is ``True``.
|
173 |
-
|
174 |
-
Note this setting will change the expected inputs. When
|
175 |
-
``return_loss=True``, img and img_meta are single-nested (i.e. Tensor
|
176 |
-
and List[dict]), and when ``resturn_loss=False``, img and img_meta
|
177 |
-
should be double nested (i.e. List[Tensor], List[List[dict]]), with
|
178 |
-
the outer list indicating test time augmentations.
|
179 |
-
"""
|
180 |
-
if return_loss:
|
181 |
-
return self.forward_train(img, img_metas, **kwargs)
|
182 |
-
else:
|
183 |
-
return self.forward_test(img, img_metas, **kwargs)
|
184 |
-
|
185 |
-
def _parse_losses(self, losses):
|
186 |
-
"""Parse the raw outputs (losses) of the network.
|
187 |
-
|
188 |
-
Args:
|
189 |
-
losses (dict): Raw output of the network, which usually contain
|
190 |
-
losses and other necessary infomation.
|
191 |
-
|
192 |
-
Returns:
|
193 |
-
tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor \
|
194 |
-
which may be a weighted sum of all losses, log_vars contains \
|
195 |
-
all the variables to be sent to the logger.
|
196 |
-
"""
|
197 |
-
log_vars = OrderedDict()
|
198 |
-
for loss_name, loss_value in losses.items():
|
199 |
-
if isinstance(loss_value, torch.Tensor):
|
200 |
-
log_vars[loss_name] = loss_value.mean()
|
201 |
-
elif isinstance(loss_value, list):
|
202 |
-
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
|
203 |
-
else:
|
204 |
-
raise TypeError(
|
205 |
-
f'{loss_name} is not a tensor or list of tensors')
|
206 |
-
|
207 |
-
loss = sum(_value for _key, _value in log_vars.items()
|
208 |
-
if 'loss' in _key)
|
209 |
-
|
210 |
-
log_vars['loss'] = loss
|
211 |
-
for loss_name, loss_value in log_vars.items():
|
212 |
-
# reduce loss when distributed training
|
213 |
-
if dist.is_available() and dist.is_initialized():
|
214 |
-
loss_value = loss_value.data.clone()
|
215 |
-
dist.all_reduce(loss_value.div_(dist.get_world_size()))
|
216 |
-
log_vars[loss_name] = loss_value.item()
|
217 |
-
|
218 |
-
return loss, log_vars
|
219 |
-
|
220 |
-
def train_step(self, data, optimizer):
|
221 |
-
"""The iteration step during training.
|
222 |
-
|
223 |
-
This method defines an iteration step during training, except for the
|
224 |
-
back propagation and optimizer updating, which are done in an optimizer
|
225 |
-
hook. Note that in some complicated cases or models, the whole process
|
226 |
-
including back propagation and optimizer updating is also defined in
|
227 |
-
this method, such as GAN.
|
228 |
-
|
229 |
-
Args:
|
230 |
-
data (dict): The output of dataloader.
|
231 |
-
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
|
232 |
-
runner is passed to ``train_step()``. This argument is unused
|
233 |
-
and reserved.
|
234 |
-
|
235 |
-
Returns:
|
236 |
-
dict: It should contain at least 3 keys: ``loss``, ``log_vars``, \
|
237 |
-
``num_samples``.
|
238 |
-
|
239 |
-
- ``loss`` is a tensor for back propagation, which can be a \
|
240 |
-
weighted sum of multiple losses.
|
241 |
-
- ``log_vars`` contains all the variables to be sent to the
|
242 |
-
logger.
|
243 |
-
- ``num_samples`` indicates the batch size (when the model is \
|
244 |
-
DDP, it means the batch size on each GPU), which is used for \
|
245 |
-
averaging the logs.
|
246 |
-
"""
|
247 |
-
losses = self(**data)
|
248 |
-
loss, log_vars = self._parse_losses(losses)
|
249 |
-
|
250 |
-
outputs = dict(
|
251 |
-
loss=loss, log_vars=log_vars, num_samples=len(data['img_metas']))
|
252 |
-
|
253 |
-
return outputs
|
254 |
-
|
255 |
-
def val_step(self, data, optimizer):
|
256 |
-
"""The iteration step during validation.
|
257 |
-
|
258 |
-
This method shares the same signature as :func:`train_step`, but used
|
259 |
-
during val epochs. Note that the evaluation after training epochs is
|
260 |
-
not implemented with this method, but an evaluation hook.
|
261 |
-
"""
|
262 |
-
losses = self(**data)
|
263 |
-
loss, log_vars = self._parse_losses(losses)
|
264 |
-
|
265 |
-
outputs = dict(
|
266 |
-
loss=loss, log_vars=log_vars, num_samples=len(data['img_metas']))
|
267 |
-
|
268 |
-
return outputs
|
269 |
-
|
270 |
-
def show_result(self,
|
271 |
-
img,
|
272 |
-
result,
|
273 |
-
score_thr=0.3,
|
274 |
-
bbox_color=(72, 101, 241),
|
275 |
-
text_color=(72, 101, 241),
|
276 |
-
mask_color=None,
|
277 |
-
thickness=2,
|
278 |
-
font_size=13,
|
279 |
-
win_name='',
|
280 |
-
show=False,
|
281 |
-
wait_time=0,
|
282 |
-
out_file=None):
|
283 |
-
"""Draw `result` over `img`.
|
284 |
-
|
285 |
-
Args:
|
286 |
-
img (str or Tensor): The image to be displayed.
|
287 |
-
result (Tensor or tuple): The results to draw over `img`
|
288 |
-
bbox_result or (bbox_result, segm_result).
|
289 |
-
score_thr (float, optional): Minimum score of bboxes to be shown.
|
290 |
-
Default: 0.3.
|
291 |
-
bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
|
292 |
-
The tuple of color should be in BGR order. Default: 'green'
|
293 |
-
text_color (str or tuple(int) or :obj:`Color`):Color of texts.
|
294 |
-
The tuple of color should be in BGR order. Default: 'green'
|
295 |
-
mask_color (None or str or tuple(int) or :obj:`Color`):
|
296 |
-
Color of masks. The tuple of color should be in BGR order.
|
297 |
-
Default: None
|
298 |
-
thickness (int): Thickness of lines. Default: 2
|
299 |
-
font_size (int): Font size of texts. Default: 13
|
300 |
-
win_name (str): The window name. Default: ''
|
301 |
-
wait_time (float): Value of waitKey param.
|
302 |
-
Default: 0.
|
303 |
-
show (bool): Whether to show the image.
|
304 |
-
Default: False.
|
305 |
-
out_file (str or None): The filename to write the image.
|
306 |
-
Default: None.
|
307 |
-
|
308 |
-
Returns:
|
309 |
-
img (Tensor): Only if not `show` or `out_file`
|
310 |
-
"""
|
311 |
-
img = mmcv.imread(img)
|
312 |
-
img = img.copy()
|
313 |
-
if isinstance(result, tuple):
|
314 |
-
bbox_result, segm_result = result
|
315 |
-
if isinstance(segm_result, tuple):
|
316 |
-
segm_result = segm_result[0] # ms rcnn
|
317 |
-
else:
|
318 |
-
bbox_result, segm_result = result, None
|
319 |
-
bboxes = np.vstack(bbox_result)
|
320 |
-
labels = [
|
321 |
-
np.full(bbox.shape[0], i, dtype=np.int32)
|
322 |
-
for i, bbox in enumerate(bbox_result)
|
323 |
-
]
|
324 |
-
labels = np.concatenate(labels)
|
325 |
-
# draw segmentation masks
|
326 |
-
segms = None
|
327 |
-
if segm_result is not None and len(labels) > 0: # non empty
|
328 |
-
segms = mmcv.concat_list(segm_result)
|
329 |
-
if isinstance(segms[0], torch.Tensor):
|
330 |
-
segms = torch.stack(segms, dim=0).detach().cpu().numpy()
|
331 |
-
else:
|
332 |
-
segms = np.stack(segms, axis=0)
|
333 |
-
# if out_file specified, do not show image in window
|
334 |
-
if out_file is not None:
|
335 |
-
show = False
|
336 |
-
# draw bounding boxes
|
337 |
-
img = imshow_det_bboxes(
|
338 |
-
img,
|
339 |
-
bboxes,
|
340 |
-
labels,
|
341 |
-
segms,
|
342 |
-
class_names=self.CLASSES,
|
343 |
-
score_thr=score_thr,
|
344 |
-
bbox_color=bbox_color,
|
345 |
-
text_color=text_color,
|
346 |
-
mask_color=mask_color,
|
347 |
-
thickness=thickness,
|
348 |
-
font_size=font_size,
|
349 |
-
win_name=win_name,
|
350 |
-
show=show,
|
351 |
-
wait_time=wait_time,
|
352 |
-
out_file=out_file)
|
353 |
-
|
354 |
-
# if not (show or out_file):
|
355 |
-
return img
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/tools/deployment/onnx2tensorrt.py
DELETED
@@ -1,179 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import os
|
3 |
-
import os.path as osp
|
4 |
-
|
5 |
-
import numpy as np
|
6 |
-
import onnx
|
7 |
-
import onnxruntime as ort
|
8 |
-
import torch
|
9 |
-
from mmcv.ops import get_onnxruntime_op_path
|
10 |
-
from mmcv.tensorrt import (TRTWraper, is_tensorrt_plugin_loaded, onnx2trt,
|
11 |
-
save_trt_engine)
|
12 |
-
from mmcv.visualization.image import imshow_det_bboxes
|
13 |
-
|
14 |
-
from mmdet.core import get_classes, preprocess_example_input
|
15 |
-
|
16 |
-
|
17 |
-
def get_GiB(x: int):
|
18 |
-
"""return x GiB."""
|
19 |
-
return x * (1 << 30)
|
20 |
-
|
21 |
-
|
22 |
-
def onnx2tensorrt(onnx_file,
|
23 |
-
trt_file,
|
24 |
-
input_config,
|
25 |
-
verify=False,
|
26 |
-
show=False,
|
27 |
-
dataset='coco',
|
28 |
-
workspace_size=1):
|
29 |
-
onnx_model = onnx.load(onnx_file)
|
30 |
-
input_shape = input_config['input_shape']
|
31 |
-
# create trt engine and wraper
|
32 |
-
opt_shape_dict = {'input': [input_shape, input_shape, input_shape]}
|
33 |
-
max_workspace_size = get_GiB(workspace_size)
|
34 |
-
trt_engine = onnx2trt(
|
35 |
-
onnx_model,
|
36 |
-
opt_shape_dict,
|
37 |
-
fp16_mode=False,
|
38 |
-
max_workspace_size=max_workspace_size)
|
39 |
-
save_dir, _ = osp.split(trt_file)
|
40 |
-
if save_dir:
|
41 |
-
os.makedirs(save_dir, exist_ok=True)
|
42 |
-
save_trt_engine(trt_engine, trt_file)
|
43 |
-
print(f'Successfully created TensorRT engine: {trt_file}')
|
44 |
-
|
45 |
-
if verify:
|
46 |
-
one_img, one_meta = preprocess_example_input(input_config)
|
47 |
-
input_img_cpu = one_img.detach().cpu().numpy()
|
48 |
-
input_img_cuda = one_img.cuda()
|
49 |
-
|
50 |
-
img = one_meta['show_img']
|
51 |
-
|
52 |
-
# Get results from TensorRT
|
53 |
-
trt_model = TRTWraper(trt_file, ['input'], ['boxes', 'labels'])
|
54 |
-
with torch.no_grad():
|
55 |
-
trt_outputs = trt_model({'input': input_img_cuda})
|
56 |
-
trt_boxes = trt_outputs['boxes'].detach().cpu().numpy()
|
57 |
-
trt_labels = trt_outputs['labels'].detach().cpu().numpy()
|
58 |
-
|
59 |
-
# Get results from ONNXRuntime
|
60 |
-
ort_custom_op_path = get_onnxruntime_op_path()
|
61 |
-
session_options = ort.SessionOptions()
|
62 |
-
if osp.exists(ort_custom_op_path):
|
63 |
-
session_options.register_custom_ops_library(ort_custom_op_path)
|
64 |
-
sess = ort.InferenceSession(onnx_file, session_options)
|
65 |
-
onnx_outputs = sess.run(None, {
|
66 |
-
'input': input_img_cpu,
|
67 |
-
})
|
68 |
-
ort_boxes, ort_labels = onnx_outputs
|
69 |
-
|
70 |
-
# Show detection outputs
|
71 |
-
if show:
|
72 |
-
CLASSES = get_classes(dataset)
|
73 |
-
score_thr = 0.35
|
74 |
-
imshow_det_bboxes(
|
75 |
-
img.copy(),
|
76 |
-
trt_boxes,
|
77 |
-
trt_labels,
|
78 |
-
CLASSES,
|
79 |
-
score_thr=score_thr,
|
80 |
-
win_name='TensorRT')
|
81 |
-
imshow_det_bboxes(
|
82 |
-
img.copy(),
|
83 |
-
ort_boxes,
|
84 |
-
ort_labels,
|
85 |
-
CLASSES,
|
86 |
-
score_thr=score_thr,
|
87 |
-
win_name='ONNXRuntime')
|
88 |
-
# Compare results
|
89 |
-
np.testing.assert_allclose(
|
90 |
-
ort_boxes, trt_boxes, rtol=1e-03, atol=1e-05)
|
91 |
-
np.testing.assert_allclose(ort_labels, trt_labels)
|
92 |
-
print('The numerical values are the same ' +
|
93 |
-
'between ONNXRuntime and TensorRT')
|
94 |
-
|
95 |
-
|
96 |
-
def parse_args():
|
97 |
-
parser = argparse.ArgumentParser(
|
98 |
-
description='Convert MMDetection models from ONNX to TensorRT')
|
99 |
-
parser.add_argument('model', help='Filename of input ONNX model')
|
100 |
-
parser.add_argument(
|
101 |
-
'--trt-file',
|
102 |
-
type=str,
|
103 |
-
default='tmp.trt',
|
104 |
-
help='Filename of output TensorRT engine')
|
105 |
-
parser.add_argument(
|
106 |
-
'--input-img', type=str, default='', help='Image for test')
|
107 |
-
parser.add_argument(
|
108 |
-
'--show', action='store_true', help='Whether to show output results')
|
109 |
-
parser.add_argument(
|
110 |
-
'--dataset', type=str, default='coco', help='Dataset name')
|
111 |
-
parser.add_argument(
|
112 |
-
'--verify',
|
113 |
-
action='store_true',
|
114 |
-
help='Verify the outputs of ONNXRuntime and TensorRT')
|
115 |
-
parser.add_argument(
|
116 |
-
'--to-rgb',
|
117 |
-
action='store_false',
|
118 |
-
help='Feed model with RGB or BGR image. Default is RGB.')
|
119 |
-
parser.add_argument(
|
120 |
-
'--shape',
|
121 |
-
type=int,
|
122 |
-
nargs='+',
|
123 |
-
default=[400, 600],
|
124 |
-
help='Input size of the model')
|
125 |
-
parser.add_argument(
|
126 |
-
'--mean',
|
127 |
-
type=float,
|
128 |
-
nargs='+',
|
129 |
-
default=[123.675, 116.28, 103.53],
|
130 |
-
help='Mean value used for preprocess input data')
|
131 |
-
parser.add_argument(
|
132 |
-
'--std',
|
133 |
-
type=float,
|
134 |
-
nargs='+',
|
135 |
-
default=[58.395, 57.12, 57.375],
|
136 |
-
help='Variance value used for preprocess input data')
|
137 |
-
parser.add_argument(
|
138 |
-
'--workspace-size',
|
139 |
-
type=int,
|
140 |
-
default=1,
|
141 |
-
help='Max workspace size in GiB')
|
142 |
-
args = parser.parse_args()
|
143 |
-
return args
|
144 |
-
|
145 |
-
|
146 |
-
if __name__ == '__main__':
|
147 |
-
|
148 |
-
assert is_tensorrt_plugin_loaded(), 'TensorRT plugin should be compiled.'
|
149 |
-
args = parse_args()
|
150 |
-
|
151 |
-
if not args.input_img:
|
152 |
-
args.input_img = osp.join(osp.dirname(__file__), '../demo/demo.jpg')
|
153 |
-
|
154 |
-
if len(args.shape) == 1:
|
155 |
-
input_shape = (1, 3, args.shape[0], args.shape[0])
|
156 |
-
elif len(args.shape) == 2:
|
157 |
-
input_shape = (1, 3) + tuple(args.shape)
|
158 |
-
else:
|
159 |
-
raise ValueError('invalid input shape')
|
160 |
-
|
161 |
-
assert len(args.mean) == 3
|
162 |
-
assert len(args.std) == 3
|
163 |
-
|
164 |
-
normalize_cfg = {'mean': args.mean, 'std': args.std, 'to_rgb': args.to_rgb}
|
165 |
-
input_config = {
|
166 |
-
'input_shape': input_shape,
|
167 |
-
'input_path': args.input_img,
|
168 |
-
'normalize_cfg': normalize_cfg
|
169 |
-
}
|
170 |
-
|
171 |
-
# Create TensorRT engine
|
172 |
-
onnx2tensorrt(
|
173 |
-
args.model,
|
174 |
-
args.trt_file,
|
175 |
-
input_config,
|
176 |
-
verify=args.verify,
|
177 |
-
show=args.show,
|
178 |
-
dataset=args.dataset,
|
179 |
-
workspace_size=args.workspace_size)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/_base_/datasets/cityscapes_769x769.py
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
_base_ = './cityscapes.py'
|
2 |
-
img_norm_cfg = dict(
|
3 |
-
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
4 |
-
crop_size = (769, 769)
|
5 |
-
train_pipeline = [
|
6 |
-
dict(type='LoadImageFromFile'),
|
7 |
-
dict(type='LoadAnnotations'),
|
8 |
-
dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)),
|
9 |
-
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
10 |
-
dict(type='RandomFlip', prob=0.5),
|
11 |
-
dict(type='PhotoMetricDistortion'),
|
12 |
-
dict(type='Normalize', **img_norm_cfg),
|
13 |
-
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
14 |
-
dict(type='DefaultFormatBundle'),
|
15 |
-
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
16 |
-
]
|
17 |
-
test_pipeline = [
|
18 |
-
dict(type='LoadImageFromFile'),
|
19 |
-
dict(
|
20 |
-
type='MultiScaleFlipAug',
|
21 |
-
img_scale=(2049, 1025),
|
22 |
-
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
23 |
-
flip=False,
|
24 |
-
transforms=[
|
25 |
-
dict(type='Resize', keep_ratio=True),
|
26 |
-
dict(type='RandomFlip'),
|
27 |
-
dict(type='Normalize', **img_norm_cfg),
|
28 |
-
dict(type='ImageToTensor', keys=['img']),
|
29 |
-
dict(type='Collect', keys=['img']),
|
30 |
-
])
|
31 |
-
]
|
32 |
-
data = dict(
|
33 |
-
train=dict(pipeline=train_pipeline),
|
34 |
-
val=dict(pipeline=test_pipeline),
|
35 |
-
test=dict(pipeline=test_pipeline))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes.py
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
_base_ = './deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='torchvision://resnet18',
|
4 |
-
backbone=dict(type='ResNet', depth=18),
|
5 |
-
decode_head=dict(
|
6 |
-
c1_in_channels=64,
|
7 |
-
c1_channels=12,
|
8 |
-
in_channels=512,
|
9 |
-
channels=128,
|
10 |
-
),
|
11 |
-
auxiliary_head=dict(in_channels=256, channels=64))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/send_pictures/script.py
DELETED
@@ -1,58 +0,0 @@
|
|
1 |
-
import base64
|
2 |
-
from io import BytesIO
|
3 |
-
|
4 |
-
import gradio as gr
|
5 |
-
import torch
|
6 |
-
from transformers import BlipForConditionalGeneration, BlipProcessor
|
7 |
-
|
8 |
-
from modules import chat, shared, ui_chat
|
9 |
-
from modules.ui import gather_interface_values
|
10 |
-
from modules.utils import gradio
|
11 |
-
|
12 |
-
input_hijack = {
|
13 |
-
'state': False,
|
14 |
-
'value': ["", ""]
|
15 |
-
}
|
16 |
-
|
17 |
-
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
18 |
-
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base", torch_dtype=torch.float32).to("cpu")
|
19 |
-
|
20 |
-
|
21 |
-
def chat_input_modifier(text, visible_text, state):
|
22 |
-
global input_hijack
|
23 |
-
if input_hijack['state']:
|
24 |
-
input_hijack['state'] = False
|
25 |
-
return input_hijack['value']
|
26 |
-
else:
|
27 |
-
return text, visible_text
|
28 |
-
|
29 |
-
|
30 |
-
def caption_image(raw_image):
|
31 |
-
inputs = processor(raw_image.convert('RGB'), return_tensors="pt").to("cpu", torch.float32)
|
32 |
-
out = model.generate(**inputs, max_new_tokens=100)
|
33 |
-
return processor.decode(out[0], skip_special_tokens=True)
|
34 |
-
|
35 |
-
|
36 |
-
def generate_chat_picture(picture, name1, name2):
|
37 |
-
text = f'*{name1} sends {name2} a picture that contains the following: “{caption_image(picture)}”*'
|
38 |
-
# lower the resolution of sent images for the chat, otherwise the log size gets out of control quickly with all the base64 values in visible history
|
39 |
-
picture.thumbnail((300, 300))
|
40 |
-
buffer = BytesIO()
|
41 |
-
picture.save(buffer, format="JPEG")
|
42 |
-
img_str = base64.b64encode(buffer.getvalue()).decode('utf-8')
|
43 |
-
visible_text = f'<img src="data:image/jpeg;base64,{img_str}" alt="{text}">'
|
44 |
-
return text, visible_text
|
45 |
-
|
46 |
-
|
47 |
-
def ui():
|
48 |
-
picture_select = gr.Image(label='Send a picture', type='pil')
|
49 |
-
|
50 |
-
# Prepare the input hijack, update the interface values, call the generation function, and clear the picture
|
51 |
-
picture_select.upload(
|
52 |
-
lambda picture, name1, name2: input_hijack.update({
|
53 |
-
"state": True,
|
54 |
-
"value": generate_chat_picture(picture, name1, name2)
|
55 |
-
}), [picture_select, shared.gradio['name1'], shared.gradio['name2']], None).then(
|
56 |
-
gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
57 |
-
chat.generate_chat_reply_wrapper, gradio(ui_chat.inputs), gradio('display', 'history'), show_progress=False).then(
|
58 |
-
lambda: None, None, picture_select, show_progress=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/img.py
DELETED
@@ -1,645 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
pygments.formatters.img
|
3 |
-
~~~~~~~~~~~~~~~~~~~~~~~
|
4 |
-
|
5 |
-
Formatter for Pixmap output.
|
6 |
-
|
7 |
-
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
|
8 |
-
:license: BSD, see LICENSE for details.
|
9 |
-
"""
|
10 |
-
|
11 |
-
import os
|
12 |
-
import sys
|
13 |
-
|
14 |
-
from pip._vendor.pygments.formatter import Formatter
|
15 |
-
from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
|
16 |
-
get_choice_opt
|
17 |
-
|
18 |
-
import subprocess
|
19 |
-
|
20 |
-
# Import this carefully
|
21 |
-
try:
|
22 |
-
from PIL import Image, ImageDraw, ImageFont
|
23 |
-
pil_available = True
|
24 |
-
except ImportError:
|
25 |
-
pil_available = False
|
26 |
-
|
27 |
-
try:
|
28 |
-
import _winreg
|
29 |
-
except ImportError:
|
30 |
-
try:
|
31 |
-
import winreg as _winreg
|
32 |
-
except ImportError:
|
33 |
-
_winreg = None
|
34 |
-
|
35 |
-
__all__ = ['ImageFormatter', 'GifImageFormatter', 'JpgImageFormatter',
|
36 |
-
'BmpImageFormatter']
|
37 |
-
|
38 |
-
|
39 |
-
# For some unknown reason every font calls it something different
|
40 |
-
STYLES = {
|
41 |
-
'NORMAL': ['', 'Roman', 'Book', 'Normal', 'Regular', 'Medium'],
|
42 |
-
'ITALIC': ['Oblique', 'Italic'],
|
43 |
-
'BOLD': ['Bold'],
|
44 |
-
'BOLDITALIC': ['Bold Oblique', 'Bold Italic'],
|
45 |
-
}
|
46 |
-
|
47 |
-
# A sane default for modern systems
|
48 |
-
DEFAULT_FONT_NAME_NIX = 'DejaVu Sans Mono'
|
49 |
-
DEFAULT_FONT_NAME_WIN = 'Courier New'
|
50 |
-
DEFAULT_FONT_NAME_MAC = 'Menlo'
|
51 |
-
|
52 |
-
|
53 |
-
class PilNotAvailable(ImportError):
|
54 |
-
"""When Python imaging library is not available"""
|
55 |
-
|
56 |
-
|
57 |
-
class FontNotFound(Exception):
|
58 |
-
"""When there are no usable fonts specified"""
|
59 |
-
|
60 |
-
|
61 |
-
class FontManager:
|
62 |
-
"""
|
63 |
-
Manages a set of fonts: normal, italic, bold, etc...
|
64 |
-
"""
|
65 |
-
|
66 |
-
def __init__(self, font_name, font_size=14):
|
67 |
-
self.font_name = font_name
|
68 |
-
self.font_size = font_size
|
69 |
-
self.fonts = {}
|
70 |
-
self.encoding = None
|
71 |
-
if sys.platform.startswith('win'):
|
72 |
-
if not font_name:
|
73 |
-
self.font_name = DEFAULT_FONT_NAME_WIN
|
74 |
-
self._create_win()
|
75 |
-
elif sys.platform.startswith('darwin'):
|
76 |
-
if not font_name:
|
77 |
-
self.font_name = DEFAULT_FONT_NAME_MAC
|
78 |
-
self._create_mac()
|
79 |
-
else:
|
80 |
-
if not font_name:
|
81 |
-
self.font_name = DEFAULT_FONT_NAME_NIX
|
82 |
-
self._create_nix()
|
83 |
-
|
84 |
-
def _get_nix_font_path(self, name, style):
|
85 |
-
proc = subprocess.Popen(['fc-list', "%s:style=%s" % (name, style), 'file'],
|
86 |
-
stdout=subprocess.PIPE, stderr=None)
|
87 |
-
stdout, _ = proc.communicate()
|
88 |
-
if proc.returncode == 0:
|
89 |
-
lines = stdout.splitlines()
|
90 |
-
for line in lines:
|
91 |
-
if line.startswith(b'Fontconfig warning:'):
|
92 |
-
continue
|
93 |
-
path = line.decode().strip().strip(':')
|
94 |
-
if path:
|
95 |
-
return path
|
96 |
-
return None
|
97 |
-
|
98 |
-
def _create_nix(self):
|
99 |
-
for name in STYLES['NORMAL']:
|
100 |
-
path = self._get_nix_font_path(self.font_name, name)
|
101 |
-
if path is not None:
|
102 |
-
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
|
103 |
-
break
|
104 |
-
else:
|
105 |
-
raise FontNotFound('No usable fonts named: "%s"' %
|
106 |
-
self.font_name)
|
107 |
-
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
|
108 |
-
for stylename in STYLES[style]:
|
109 |
-
path = self._get_nix_font_path(self.font_name, stylename)
|
110 |
-
if path is not None:
|
111 |
-
self.fonts[style] = ImageFont.truetype(path, self.font_size)
|
112 |
-
break
|
113 |
-
else:
|
114 |
-
if style == 'BOLDITALIC':
|
115 |
-
self.fonts[style] = self.fonts['BOLD']
|
116 |
-
else:
|
117 |
-
self.fonts[style] = self.fonts['NORMAL']
|
118 |
-
|
119 |
-
def _get_mac_font_path(self, font_map, name, style):
|
120 |
-
return font_map.get((name + ' ' + style).strip().lower())
|
121 |
-
|
122 |
-
def _create_mac(self):
|
123 |
-
font_map = {}
|
124 |
-
for font_dir in (os.path.join(os.getenv("HOME"), 'Library/Fonts/'),
|
125 |
-
'/Library/Fonts/', '/System/Library/Fonts/'):
|
126 |
-
font_map.update(
|
127 |
-
(os.path.splitext(f)[0].lower(), os.path.join(font_dir, f))
|
128 |
-
for f in os.listdir(font_dir)
|
129 |
-
if f.lower().endswith(('ttf', 'ttc')))
|
130 |
-
|
131 |
-
for name in STYLES['NORMAL']:
|
132 |
-
path = self._get_mac_font_path(font_map, self.font_name, name)
|
133 |
-
if path is not None:
|
134 |
-
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
|
135 |
-
break
|
136 |
-
else:
|
137 |
-
raise FontNotFound('No usable fonts named: "%s"' %
|
138 |
-
self.font_name)
|
139 |
-
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
|
140 |
-
for stylename in STYLES[style]:
|
141 |
-
path = self._get_mac_font_path(font_map, self.font_name, stylename)
|
142 |
-
if path is not None:
|
143 |
-
self.fonts[style] = ImageFont.truetype(path, self.font_size)
|
144 |
-
break
|
145 |
-
else:
|
146 |
-
if style == 'BOLDITALIC':
|
147 |
-
self.fonts[style] = self.fonts['BOLD']
|
148 |
-
else:
|
149 |
-
self.fonts[style] = self.fonts['NORMAL']
|
150 |
-
|
151 |
-
def _lookup_win(self, key, basename, styles, fail=False):
|
152 |
-
for suffix in ('', ' (TrueType)'):
|
153 |
-
for style in styles:
|
154 |
-
try:
|
155 |
-
valname = '%s%s%s' % (basename, style and ' '+style, suffix)
|
156 |
-
val, _ = _winreg.QueryValueEx(key, valname)
|
157 |
-
return val
|
158 |
-
except OSError:
|
159 |
-
continue
|
160 |
-
else:
|
161 |
-
if fail:
|
162 |
-
raise FontNotFound('Font %s (%s) not found in registry' %
|
163 |
-
(basename, styles[0]))
|
164 |
-
return None
|
165 |
-
|
166 |
-
def _create_win(self):
|
167 |
-
lookuperror = None
|
168 |
-
keynames = [ (_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'),
|
169 |
-
(_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows\CurrentVersion\Fonts'),
|
170 |
-
(_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'),
|
171 |
-
(_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows\CurrentVersion\Fonts') ]
|
172 |
-
for keyname in keynames:
|
173 |
-
try:
|
174 |
-
key = _winreg.OpenKey(*keyname)
|
175 |
-
try:
|
176 |
-
path = self._lookup_win(key, self.font_name, STYLES['NORMAL'], True)
|
177 |
-
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
|
178 |
-
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
|
179 |
-
path = self._lookup_win(key, self.font_name, STYLES[style])
|
180 |
-
if path:
|
181 |
-
self.fonts[style] = ImageFont.truetype(path, self.font_size)
|
182 |
-
else:
|
183 |
-
if style == 'BOLDITALIC':
|
184 |
-
self.fonts[style] = self.fonts['BOLD']
|
185 |
-
else:
|
186 |
-
self.fonts[style] = self.fonts['NORMAL']
|
187 |
-
return
|
188 |
-
except FontNotFound as err:
|
189 |
-
lookuperror = err
|
190 |
-
finally:
|
191 |
-
_winreg.CloseKey(key)
|
192 |
-
except OSError:
|
193 |
-
pass
|
194 |
-
else:
|
195 |
-
# If we get here, we checked all registry keys and had no luck
|
196 |
-
# We can be in one of two situations now:
|
197 |
-
# * All key lookups failed. In this case lookuperror is None and we
|
198 |
-
# will raise a generic error
|
199 |
-
# * At least one lookup failed with a FontNotFound error. In this
|
200 |
-
# case, we will raise that as a more specific error
|
201 |
-
if lookuperror:
|
202 |
-
raise lookuperror
|
203 |
-
raise FontNotFound('Can\'t open Windows font registry key')
|
204 |
-
|
205 |
-
def get_char_size(self):
|
206 |
-
"""
|
207 |
-
Get the character size.
|
208 |
-
"""
|
209 |
-
return self.get_text_size('M')
|
210 |
-
|
211 |
-
def get_text_size(self, text):
|
212 |
-
"""
|
213 |
-
Get the text size (width, height).
|
214 |
-
"""
|
215 |
-
font = self.fonts['NORMAL']
|
216 |
-
if hasattr(font, 'getbbox'): # Pillow >= 9.2.0
|
217 |
-
return font.getbbox(text)[2:4]
|
218 |
-
else:
|
219 |
-
return font.getsize(text)
|
220 |
-
|
221 |
-
def get_font(self, bold, oblique):
|
222 |
-
"""
|
223 |
-
Get the font based on bold and italic flags.
|
224 |
-
"""
|
225 |
-
if bold and oblique:
|
226 |
-
return self.fonts['BOLDITALIC']
|
227 |
-
elif bold:
|
228 |
-
return self.fonts['BOLD']
|
229 |
-
elif oblique:
|
230 |
-
return self.fonts['ITALIC']
|
231 |
-
else:
|
232 |
-
return self.fonts['NORMAL']
|
233 |
-
|
234 |
-
|
235 |
-
class ImageFormatter(Formatter):
|
236 |
-
"""
|
237 |
-
Create a PNG image from source code. This uses the Python Imaging Library to
|
238 |
-
generate a pixmap from the source code.
|
239 |
-
|
240 |
-
.. versionadded:: 0.10
|
241 |
-
|
242 |
-
Additional options accepted:
|
243 |
-
|
244 |
-
`image_format`
|
245 |
-
An image format to output to that is recognised by PIL, these include:
|
246 |
-
|
247 |
-
* "PNG" (default)
|
248 |
-
* "JPEG"
|
249 |
-
* "BMP"
|
250 |
-
* "GIF"
|
251 |
-
|
252 |
-
`line_pad`
|
253 |
-
The extra spacing (in pixels) between each line of text.
|
254 |
-
|
255 |
-
Default: 2
|
256 |
-
|
257 |
-
`font_name`
|
258 |
-
The font name to be used as the base font from which others, such as
|
259 |
-
bold and italic fonts will be generated. This really should be a
|
260 |
-
monospace font to look sane.
|
261 |
-
|
262 |
-
Default: "Courier New" on Windows, "Menlo" on Mac OS, and
|
263 |
-
"DejaVu Sans Mono" on \\*nix
|
264 |
-
|
265 |
-
`font_size`
|
266 |
-
The font size in points to be used.
|
267 |
-
|
268 |
-
Default: 14
|
269 |
-
|
270 |
-
`image_pad`
|
271 |
-
The padding, in pixels to be used at each edge of the resulting image.
|
272 |
-
|
273 |
-
Default: 10
|
274 |
-
|
275 |
-
`line_numbers`
|
276 |
-
Whether line numbers should be shown: True/False
|
277 |
-
|
278 |
-
Default: True
|
279 |
-
|
280 |
-
`line_number_start`
|
281 |
-
The line number of the first line.
|
282 |
-
|
283 |
-
Default: 1
|
284 |
-
|
285 |
-
`line_number_step`
|
286 |
-
The step used when printing line numbers.
|
287 |
-
|
288 |
-
Default: 1
|
289 |
-
|
290 |
-
`line_number_bg`
|
291 |
-
The background colour (in "#123456" format) of the line number bar, or
|
292 |
-
None to use the style background color.
|
293 |
-
|
294 |
-
Default: "#eed"
|
295 |
-
|
296 |
-
`line_number_fg`
|
297 |
-
The text color of the line numbers (in "#123456"-like format).
|
298 |
-
|
299 |
-
Default: "#886"
|
300 |
-
|
301 |
-
`line_number_chars`
|
302 |
-
The number of columns of line numbers allowable in the line number
|
303 |
-
margin.
|
304 |
-
|
305 |
-
Default: 2
|
306 |
-
|
307 |
-
`line_number_bold`
|
308 |
-
Whether line numbers will be bold: True/False
|
309 |
-
|
310 |
-
Default: False
|
311 |
-
|
312 |
-
`line_number_italic`
|
313 |
-
Whether line numbers will be italicized: True/False
|
314 |
-
|
315 |
-
Default: False
|
316 |
-
|
317 |
-
`line_number_separator`
|
318 |
-
Whether a line will be drawn between the line number area and the
|
319 |
-
source code area: True/False
|
320 |
-
|
321 |
-
Default: True
|
322 |
-
|
323 |
-
`line_number_pad`
|
324 |
-
The horizontal padding (in pixels) between the line number margin, and
|
325 |
-
the source code area.
|
326 |
-
|
327 |
-
Default: 6
|
328 |
-
|
329 |
-
`hl_lines`
|
330 |
-
Specify a list of lines to be highlighted.
|
331 |
-
|
332 |
-
.. versionadded:: 1.2
|
333 |
-
|
334 |
-
Default: empty list
|
335 |
-
|
336 |
-
`hl_color`
|
337 |
-
Specify the color for highlighting lines.
|
338 |
-
|
339 |
-
.. versionadded:: 1.2
|
340 |
-
|
341 |
-
Default: highlight color of the selected style
|
342 |
-
"""
|
343 |
-
|
344 |
-
# Required by the pygments mapper
|
345 |
-
name = 'img'
|
346 |
-
aliases = ['img', 'IMG', 'png']
|
347 |
-
filenames = ['*.png']
|
348 |
-
|
349 |
-
unicodeoutput = False
|
350 |
-
|
351 |
-
default_image_format = 'png'
|
352 |
-
|
353 |
-
def __init__(self, **options):
|
354 |
-
"""
|
355 |
-
See the class docstring for explanation of options.
|
356 |
-
"""
|
357 |
-
if not pil_available:
|
358 |
-
raise PilNotAvailable(
|
359 |
-
'Python Imaging Library is required for this formatter')
|
360 |
-
Formatter.__init__(self, **options)
|
361 |
-
self.encoding = 'latin1' # let pygments.format() do the right thing
|
362 |
-
# Read the style
|
363 |
-
self.styles = dict(self.style)
|
364 |
-
if self.style.background_color is None:
|
365 |
-
self.background_color = '#fff'
|
366 |
-
else:
|
367 |
-
self.background_color = self.style.background_color
|
368 |
-
# Image options
|
369 |
-
self.image_format = get_choice_opt(
|
370 |
-
options, 'image_format', ['png', 'jpeg', 'gif', 'bmp'],
|
371 |
-
self.default_image_format, normcase=True)
|
372 |
-
self.image_pad = get_int_opt(options, 'image_pad', 10)
|
373 |
-
self.line_pad = get_int_opt(options, 'line_pad', 2)
|
374 |
-
# The fonts
|
375 |
-
fontsize = get_int_opt(options, 'font_size', 14)
|
376 |
-
self.fonts = FontManager(options.get('font_name', ''), fontsize)
|
377 |
-
self.fontw, self.fonth = self.fonts.get_char_size()
|
378 |
-
# Line number options
|
379 |
-
self.line_number_fg = options.get('line_number_fg', '#886')
|
380 |
-
self.line_number_bg = options.get('line_number_bg', '#eed')
|
381 |
-
self.line_number_chars = get_int_opt(options,
|
382 |
-
'line_number_chars', 2)
|
383 |
-
self.line_number_bold = get_bool_opt(options,
|
384 |
-
'line_number_bold', False)
|
385 |
-
self.line_number_italic = get_bool_opt(options,
|
386 |
-
'line_number_italic', False)
|
387 |
-
self.line_number_pad = get_int_opt(options, 'line_number_pad', 6)
|
388 |
-
self.line_numbers = get_bool_opt(options, 'line_numbers', True)
|
389 |
-
self.line_number_separator = get_bool_opt(options,
|
390 |
-
'line_number_separator', True)
|
391 |
-
self.line_number_step = get_int_opt(options, 'line_number_step', 1)
|
392 |
-
self.line_number_start = get_int_opt(options, 'line_number_start', 1)
|
393 |
-
if self.line_numbers:
|
394 |
-
self.line_number_width = (self.fontw * self.line_number_chars +
|
395 |
-
self.line_number_pad * 2)
|
396 |
-
else:
|
397 |
-
self.line_number_width = 0
|
398 |
-
self.hl_lines = []
|
399 |
-
hl_lines_str = get_list_opt(options, 'hl_lines', [])
|
400 |
-
for line in hl_lines_str:
|
401 |
-
try:
|
402 |
-
self.hl_lines.append(int(line))
|
403 |
-
except ValueError:
|
404 |
-
pass
|
405 |
-
self.hl_color = options.get('hl_color',
|
406 |
-
self.style.highlight_color) or '#f90'
|
407 |
-
self.drawables = []
|
408 |
-
|
409 |
-
def get_style_defs(self, arg=''):
|
410 |
-
raise NotImplementedError('The -S option is meaningless for the image '
|
411 |
-
'formatter. Use -O style=<stylename> instead.')
|
412 |
-
|
413 |
-
def _get_line_height(self):
|
414 |
-
"""
|
415 |
-
Get the height of a line.
|
416 |
-
"""
|
417 |
-
return self.fonth + self.line_pad
|
418 |
-
|
419 |
-
def _get_line_y(self, lineno):
|
420 |
-
"""
|
421 |
-
Get the Y coordinate of a line number.
|
422 |
-
"""
|
423 |
-
return lineno * self._get_line_height() + self.image_pad
|
424 |
-
|
425 |
-
def _get_char_width(self):
|
426 |
-
"""
|
427 |
-
Get the width of a character.
|
428 |
-
"""
|
429 |
-
return self.fontw
|
430 |
-
|
431 |
-
def _get_char_x(self, linelength):
|
432 |
-
"""
|
433 |
-
Get the X coordinate of a character position.
|
434 |
-
"""
|
435 |
-
return linelength + self.image_pad + self.line_number_width
|
436 |
-
|
437 |
-
def _get_text_pos(self, linelength, lineno):
|
438 |
-
"""
|
439 |
-
Get the actual position for a character and line position.
|
440 |
-
"""
|
441 |
-
return self._get_char_x(linelength), self._get_line_y(lineno)
|
442 |
-
|
443 |
-
def _get_linenumber_pos(self, lineno):
|
444 |
-
"""
|
445 |
-
Get the actual position for the start of a line number.
|
446 |
-
"""
|
447 |
-
return (self.image_pad, self._get_line_y(lineno))
|
448 |
-
|
449 |
-
def _get_text_color(self, style):
|
450 |
-
"""
|
451 |
-
Get the correct color for the token from the style.
|
452 |
-
"""
|
453 |
-
if style['color'] is not None:
|
454 |
-
fill = '#' + style['color']
|
455 |
-
else:
|
456 |
-
fill = '#000'
|
457 |
-
return fill
|
458 |
-
|
459 |
-
def _get_text_bg_color(self, style):
|
460 |
-
"""
|
461 |
-
Get the correct background color for the token from the style.
|
462 |
-
"""
|
463 |
-
if style['bgcolor'] is not None:
|
464 |
-
bg_color = '#' + style['bgcolor']
|
465 |
-
else:
|
466 |
-
bg_color = None
|
467 |
-
return bg_color
|
468 |
-
|
469 |
-
def _get_style_font(self, style):
|
470 |
-
"""
|
471 |
-
Get the correct font for the style.
|
472 |
-
"""
|
473 |
-
return self.fonts.get_font(style['bold'], style['italic'])
|
474 |
-
|
475 |
-
def _get_image_size(self, maxlinelength, maxlineno):
|
476 |
-
"""
|
477 |
-
Get the required image size.
|
478 |
-
"""
|
479 |
-
return (self._get_char_x(maxlinelength) + self.image_pad,
|
480 |
-
self._get_line_y(maxlineno + 0) + self.image_pad)
|
481 |
-
|
482 |
-
def _draw_linenumber(self, posno, lineno):
|
483 |
-
"""
|
484 |
-
Remember a line number drawable to paint later.
|
485 |
-
"""
|
486 |
-
self._draw_text(
|
487 |
-
self._get_linenumber_pos(posno),
|
488 |
-
str(lineno).rjust(self.line_number_chars),
|
489 |
-
font=self.fonts.get_font(self.line_number_bold,
|
490 |
-
self.line_number_italic),
|
491 |
-
text_fg=self.line_number_fg,
|
492 |
-
text_bg=None,
|
493 |
-
)
|
494 |
-
|
495 |
-
def _draw_text(self, pos, text, font, text_fg, text_bg):
|
496 |
-
"""
|
497 |
-
Remember a single drawable tuple to paint later.
|
498 |
-
"""
|
499 |
-
self.drawables.append((pos, text, font, text_fg, text_bg))
|
500 |
-
|
501 |
-
def _create_drawables(self, tokensource):
|
502 |
-
"""
|
503 |
-
Create drawables for the token content.
|
504 |
-
"""
|
505 |
-
lineno = charno = maxcharno = 0
|
506 |
-
maxlinelength = linelength = 0
|
507 |
-
for ttype, value in tokensource:
|
508 |
-
while ttype not in self.styles:
|
509 |
-
ttype = ttype.parent
|
510 |
-
style = self.styles[ttype]
|
511 |
-
# TODO: make sure tab expansion happens earlier in the chain. It
|
512 |
-
# really ought to be done on the input, as to do it right here is
|
513 |
-
# quite complex.
|
514 |
-
value = value.expandtabs(4)
|
515 |
-
lines = value.splitlines(True)
|
516 |
-
# print lines
|
517 |
-
for i, line in enumerate(lines):
|
518 |
-
temp = line.rstrip('\n')
|
519 |
-
if temp:
|
520 |
-
self._draw_text(
|
521 |
-
self._get_text_pos(linelength, lineno),
|
522 |
-
temp,
|
523 |
-
font = self._get_style_font(style),
|
524 |
-
text_fg = self._get_text_color(style),
|
525 |
-
text_bg = self._get_text_bg_color(style),
|
526 |
-
)
|
527 |
-
temp_width, _ = self.fonts.get_text_size(temp)
|
528 |
-
linelength += temp_width
|
529 |
-
maxlinelength = max(maxlinelength, linelength)
|
530 |
-
charno += len(temp)
|
531 |
-
maxcharno = max(maxcharno, charno)
|
532 |
-
if line.endswith('\n'):
|
533 |
-
# add a line for each extra line in the value
|
534 |
-
linelength = 0
|
535 |
-
charno = 0
|
536 |
-
lineno += 1
|
537 |
-
self.maxlinelength = maxlinelength
|
538 |
-
self.maxcharno = maxcharno
|
539 |
-
self.maxlineno = lineno
|
540 |
-
|
541 |
-
def _draw_line_numbers(self):
|
542 |
-
"""
|
543 |
-
Create drawables for the line numbers.
|
544 |
-
"""
|
545 |
-
if not self.line_numbers:
|
546 |
-
return
|
547 |
-
for p in range(self.maxlineno):
|
548 |
-
n = p + self.line_number_start
|
549 |
-
if (n % self.line_number_step) == 0:
|
550 |
-
self._draw_linenumber(p, n)
|
551 |
-
|
552 |
-
def _paint_line_number_bg(self, im):
|
553 |
-
"""
|
554 |
-
Paint the line number background on the image.
|
555 |
-
"""
|
556 |
-
if not self.line_numbers:
|
557 |
-
return
|
558 |
-
if self.line_number_fg is None:
|
559 |
-
return
|
560 |
-
draw = ImageDraw.Draw(im)
|
561 |
-
recth = im.size[-1]
|
562 |
-
rectw = self.image_pad + self.line_number_width - self.line_number_pad
|
563 |
-
draw.rectangle([(0, 0), (rectw, recth)],
|
564 |
-
fill=self.line_number_bg)
|
565 |
-
if self.line_number_separator:
|
566 |
-
draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
|
567 |
-
del draw
|
568 |
-
|
569 |
-
def format(self, tokensource, outfile):
|
570 |
-
"""
|
571 |
-
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
|
572 |
-
tuples and write it into ``outfile``.
|
573 |
-
|
574 |
-
This implementation calculates where it should draw each token on the
|
575 |
-
pixmap, then calculates the required pixmap size and draws the items.
|
576 |
-
"""
|
577 |
-
self._create_drawables(tokensource)
|
578 |
-
self._draw_line_numbers()
|
579 |
-
im = Image.new(
|
580 |
-
'RGB',
|
581 |
-
self._get_image_size(self.maxlinelength, self.maxlineno),
|
582 |
-
self.background_color
|
583 |
-
)
|
584 |
-
self._paint_line_number_bg(im)
|
585 |
-
draw = ImageDraw.Draw(im)
|
586 |
-
# Highlight
|
587 |
-
if self.hl_lines:
|
588 |
-
x = self.image_pad + self.line_number_width - self.line_number_pad + 1
|
589 |
-
recth = self._get_line_height()
|
590 |
-
rectw = im.size[0] - x
|
591 |
-
for linenumber in self.hl_lines:
|
592 |
-
y = self._get_line_y(linenumber - 1)
|
593 |
-
draw.rectangle([(x, y), (x + rectw, y + recth)],
|
594 |
-
fill=self.hl_color)
|
595 |
-
for pos, value, font, text_fg, text_bg in self.drawables:
|
596 |
-
if text_bg:
|
597 |
-
text_size = draw.textsize(text=value, font=font)
|
598 |
-
draw.rectangle([pos[0], pos[1], pos[0] + text_size[0], pos[1] + text_size[1]], fill=text_bg)
|
599 |
-
draw.text(pos, value, font=font, fill=text_fg)
|
600 |
-
im.save(outfile, self.image_format.upper())
|
601 |
-
|
602 |
-
|
603 |
-
# Add one formatter per format, so that the "-f gif" option gives the correct result
|
604 |
-
# when used in pygmentize.
|
605 |
-
|
606 |
-
class GifImageFormatter(ImageFormatter):
|
607 |
-
"""
|
608 |
-
Create a GIF image from source code. This uses the Python Imaging Library to
|
609 |
-
generate a pixmap from the source code.
|
610 |
-
|
611 |
-
.. versionadded:: 1.0
|
612 |
-
"""
|
613 |
-
|
614 |
-
name = 'img_gif'
|
615 |
-
aliases = ['gif']
|
616 |
-
filenames = ['*.gif']
|
617 |
-
default_image_format = 'gif'
|
618 |
-
|
619 |
-
|
620 |
-
class JpgImageFormatter(ImageFormatter):
|
621 |
-
"""
|
622 |
-
Create a JPEG image from source code. This uses the Python Imaging Library to
|
623 |
-
generate a pixmap from the source code.
|
624 |
-
|
625 |
-
.. versionadded:: 1.0
|
626 |
-
"""
|
627 |
-
|
628 |
-
name = 'img_jpg'
|
629 |
-
aliases = ['jpg', 'jpeg']
|
630 |
-
filenames = ['*.jpg']
|
631 |
-
default_image_format = 'jpeg'
|
632 |
-
|
633 |
-
|
634 |
-
class BmpImageFormatter(ImageFormatter):
|
635 |
-
"""
|
636 |
-
Create a bitmap image from source code. This uses the Python Imaging Library to
|
637 |
-
generate a pixmap from the source code.
|
638 |
-
|
639 |
-
.. versionadded:: 1.0
|
640 |
-
"""
|
641 |
-
|
642 |
-
name = 'img_bmp'
|
643 |
-
aliases = ['bmp', 'bitmap']
|
644 |
-
filenames = ['*.bmp']
|
645 |
-
default_image_format = 'bmp'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AutoGeneralAI/voice-assistant/README_zh.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
# voice-assistant
|
2 |
-
|
3 |
-
实现的功能:语音助手。通过调用OpenAI官方API实现。
|
4 |
-
|
5 |
-
在线demo: https://huggingface.co/spaces/AutoGeneralAI/voice-assistant
|
6 |
-
|
7 |
-
## 使用方法
|
8 |
-
将自己的OpenAI API KEY https://platform.openai.com/ 放入key输入框,然后就可以愉快的语音对话了。
|
9 |
-
|
10 |
-
> 初次使用,浏览器,比如Chrome会询问是否允许打开麦克风,选择允许。
|
11 |
-
|
12 |
-
> 可以保存对话记录。目前只实现了对话。人类的角色可以语音,AI角色还是以文字输出。文字朗读出来的功能留待下次实现或者欢迎提pr。
|
13 |
-
|
14 |
-

|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE.md
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
|
2 |
-
Please select an issue template from
|
3 |
-
https://github.com/facebookresearch/detectron2/issues/new/choose .
|
4 |
-
|
5 |
-
Otherwise your issue will be closed.
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/roi_heads/fed_loss.py
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import json
|
3 |
-
import numpy as np
|
4 |
-
from torch.nn import functional as F
|
5 |
-
|
6 |
-
def load_class_freq(
|
7 |
-
path='datasets/lvis/lvis_v1_train_cat_info.json',
|
8 |
-
freq_weight=0.5):
|
9 |
-
cat_info = json.load(open(path, 'r'))
|
10 |
-
cat_info = torch.tensor(
|
11 |
-
[c['image_count'] for c in sorted(cat_info, key=lambda x: x['id'])])
|
12 |
-
freq_weight = cat_info.float() ** freq_weight
|
13 |
-
return freq_weight
|
14 |
-
|
15 |
-
def get_fed_loss_inds(
|
16 |
-
gt_classes, num_sample_cats=50, C=1203, \
|
17 |
-
weight=None, fed_cls_inds=-1):
|
18 |
-
appeared = torch.unique(gt_classes) # C'
|
19 |
-
prob = appeared.new_ones(C + 1).float()
|
20 |
-
prob[-1] = 0
|
21 |
-
if len(appeared) < num_sample_cats:
|
22 |
-
if weight is not None:
|
23 |
-
prob[:C] = weight.float().clone()
|
24 |
-
prob[appeared] = 0
|
25 |
-
if fed_cls_inds > 0:
|
26 |
-
prob[fed_cls_inds:] = 0
|
27 |
-
more_appeared = torch.multinomial(
|
28 |
-
prob, num_sample_cats - len(appeared),
|
29 |
-
replacement=False)
|
30 |
-
appeared = torch.cat([appeared, more_appeared])
|
31 |
-
return appeared
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/B2gan/LLM_Can_See/ai_functions.py
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
import openai
|
2 |
-
|
3 |
-
def is_string(input):
|
4 |
-
return isinstance(input, str)
|
5 |
-
|
6 |
-
def ai_function(Question, Scene, model = "gpt-4"):
|
7 |
-
system_message = {
|
8 |
-
"role": "system",
|
9 |
-
"content": (
|
10 |
-
"You are an AI that assists the blind"
|
11 |
-
)
|
12 |
-
}
|
13 |
-
user_message = {
|
14 |
-
"role": "user",
|
15 |
-
"content": (
|
16 |
-
"Model, I need your assistance. I am developing a tool that can help blind people "
|
17 |
-
"navigate their surroundings safely by offering detailed and relevant descriptions. "
|
18 |
-
"These descriptions will be ranked using the CLIP model. "
|
19 |
-
"The initial scene description from the blind person's perspective is: " + Scene +
|
20 |
-
". "
|
21 |
-
"And the blind person asked: " + Question +
|
22 |
-
". "
|
23 |
-
"I need you to generate five sets of short, specific and actionable phrases or sentences that address the blind person's question first, "
|
24 |
-
"and also accurately reflect the Scene. "
|
25 |
-
"These descriptions must prioritize safety and accessibility, offering information about potential obstacles or hazards. "
|
26 |
-
"They should serve as a practical guide for the blind, so they should be as detailed and vivid as possible. "
|
27 |
-
"All descriptions should adhere to the context provided by the initial Scene and the blind person's question. "
|
28 |
-
"Please provide these five sets of descriptions directly, all in English, "
|
29 |
-
"and without any redundant information. Thank you for your assistance."
|
30 |
-
)
|
31 |
-
}
|
32 |
-
|
33 |
-
if is_string(model) == False:
|
34 |
-
return model(user_message["content"])
|
35 |
-
|
36 |
-
messages = [system_message, user_message]
|
37 |
-
|
38 |
-
response = openai.ChatCompletion.create(
|
39 |
-
model=model,
|
40 |
-
messages=messages,
|
41 |
-
temperature=0.2,
|
42 |
-
max_tokens=200,
|
43 |
-
)
|
44 |
-
|
45 |
-
return response.choices[0].message["content"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Canal De Youtube Apk.md
DELETED
@@ -1,72 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Cómo descargar canal de YouTube APK para Android</h1>
|
3 |
-
<p>YouTube es una de las plataformas para compartir videos más populares del mundo, con miles de millones de usuarios viendo y creando contenido cada día. Sin embargo, si desea disfrutar de más características y funciones en YouTube, es posible que desee descargar YouTube Channel APK para su dispositivo Android. </p>
|
4 |
-
<h2>descargar canal de youtube apk</h2><br /><p><b><b>Download Zip</b> ⭐ <a href="https://bltlly.com/2v6Ktf">https://bltlly.com/2v6Ktf</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es el canal de YouTube APK? </h2>
|
6 |
-
<p>Canal de YouTube APK es una versión modificada de la aplicación oficial de YouTube que le permite acceder a más opciones y ajustes en su cuenta de YouTube. Con YouTube Channel APK, puedes:</p>
|
7 |
-
<ul>
|
8 |
-
<li>Descargar vídeos y listas de reproducción para ver sin conexión</li>
|
9 |
-
<li>Reproducir vídeos en segundo plano o en el modo imagen en imagen</li>
|
10 |
-
<li>Cambiar la velocidad de reproducción y la resolución de los vídeos</li>
|
11 |
-
<li>Habilitar el modo oscuro y el modo de zoom</li>
|
12 |
-
<li>Bloquear anuncios y saltar intros</li>
|
13 |
-
<li>Suscribirse a canales sin iniciar sesión</li>
|
14 |
-
<li> ¡Y mucho más! </li>
|
15 |
-
</ul>
|
16 |
-
<p>Canal de YouTube APK no está disponible en Google Play Store, por lo que necesita descargarlo de una fuente de terceros. Sin embargo, antes de hacer eso, usted necesita saber por qué debe descargar YouTube Channel APK en primer lugar. </p>
|
17 |
-
<h2>¿Por qué descargar canal de YouTube APK? </h2>
|
18 |
-
<p>Hay muchas razones por las que es posible que desee descargar YouTube Channel APK para su dispositivo Android. Estos son algunos de ellos:</p>
|
19 |
-
<ul <li>Guarde sus datos móviles y el espacio de almacenamiento mediante la descarga de vídeos y listas de reproducción para su visualización sin conexión. Puede elegir la calidad y el formato de los vídeos que desea descargar, y verlos en cualquier momento, en cualquier lugar, sin búfer o interrupciones. </li>
|
20 |
-
<li>Disfrute de la multitarea y la comodidad mediante la reproducción de vídeos en el fondo o en el modo de imagen en imagen. Puede escuchar música, podcasts o audiolibros mientras usa otras aplicaciones o navega por la web. También puede cambiar el tamaño y mover la ventana de vídeo alrededor de la pantalla como desee. </li>
|
21 |
-
|
22 |
-
<li>Proteja sus ojos y la batería mediante el modo oscuro y el modo de zoom. El modo oscuro reduce el brillo y el contraste de la pantalla, facilitando la visualización de vídeos en entornos con poca luz. El modo zoom le permite llenar toda la pantalla con el vídeo, eliminando las barras negras y maximizando su inmersión. </li>
|
23 |
-
<li>Evite los anuncios molestos y saltar las intros bloqueándolos con YouTube Channel APK. Puedes disfrutar de videos ininterrumpidos y sin publicidad sin tener que esperar ni hacer clic en nada. También puedes saltarte las largas y aburridas intros que tienen algunos canales, y llegar directamente al contenido que quieres ver. </li>
|
24 |
-
<li>Explorar más contenido y canales sin iniciar sesión mediante la suscripción a ellos con YouTube Channel APK. Puedes seguir cualquier canal que te guste sin tener que crear o iniciar sesión en tu cuenta de YouTube. También puede acceder a más vídeos que están restringidos o no disponibles en su región. </li>
|
25 |
-
</ul>
|
26 |
-
<p>Como puedes ver, YouTube Channel APK ofrece muchas ventajas sobre la aplicación oficial de YouTube. Sin embargo, antes de descargarlo, necesitas saber cómo hacerlo de forma segura y fácil. </p>
|
27 |
-
<h2>Cómo descargar YouTube canal APK de forma segura y fácil? </h2>
|
28 |
-
<p>Descarga de canal de YouTube APK no es difícil, pero hay que tener cuidado acerca de dónde se obtiene. Hay muchos sitios web que afirman ofrecer YouTube Channel APK, pero algunos de ellos pueden contener malware o virus que pueden dañar su dispositivo o robar sus datos. Por lo tanto, es necesario descargar YouTube Channel APK de una fuente confiable y confiable. </p>
|
29 |
-
<p>Una de las mejores fuentes para YouTube Channel APK es [APKPure], un sitio web que proporciona archivos APK originales y puros para varias aplicaciones y juegos. APKPure tiene una gran reputación y una gran base de usuarios, por lo que puede estar seguro de que los archivos que descarga desde allí son seguros. Aquí están los pasos sobre cómo descargar YouTube Channel APK de APKPure:</p>
|
30 |
-
<p></p>
|
31 |
-
<ol>
|
32 |
-
<li>Ir a [APKPure] en su navegador y buscar "canal de YouTube APK" en la barra de búsqueda. </li>
|
33 |
-
|
34 |
-
<li>Espere a que la descarga termine y localice el archivo en su dispositivo. </li>
|
35 |
-
</ol>
|
36 |
-
<p>Felicidades! Usted ha descargado con éxito el canal de YouTube APK de APKPure. Sin embargo, antes de poder usarlo, debe instalarlo en su dispositivo. </p>
|
37 |
-
<h2>Cómo instalar canal de YouTube APK en su dispositivo Android? </h2>
|
38 |
-
<p>Instalación de canal de YouTube APK en su dispositivo Android no es difícil, pero es necesario hacer algunos cambios en la configuración primero. Dado que YouTube Channel APK no es de Google Play Store, es necesario habilitar fuentes desconocidas y conceder permisos para ello. Aquí están los pasos sobre cómo instalar YouTube Channel APK en su dispositivo Android:</p>
|
39 |
-
<ol <li>Ve a la configuración de tu dispositivo y toca "Seguridad". </li>
|
40 |
-
<li>Desplácese hacia abajo y encuentre la opción "Fuentes desconocidas". Cámbiela y confirme su elección. </li>
|
41 |
-
<li>Volver al administrador de archivos y toque en el archivo APK del canal de YouTube que ha descargado. </li>
|
42 |
-
<li>Toque en "Instalar" y espere a que la instalación se complete. </li>
|
43 |
-
</ol>
|
44 |
-
<p>Felicidades! Usted ha instalado con éxito YouTube Channel APK en su dispositivo Android. Sin embargo, antes de que puedas usarlo, necesitas saber cómo usarlo. </p>
|
45 |
-
<h2> ¿Cómo utilizar el canal de YouTube APK en su dispositivo Android? </h2>
|
46 |
-
<p>Usando YouTube Channel APK en su dispositivo Android no es difícil, pero es necesario familiarizarse con sus características y funciones. Estas son algunas de las cosas que puedes hacer con YouTube Channel APK en tu dispositivo Android:</p>
|
47 |
-
<ul>
|
48 |
-
<li>Para descargar vídeos y listas de reproducción para verlos sin conexión, toque en el icono de descarga debajo del vídeo o lista de reproducción. Elija la calidad y el formato que desea y toque en "Aceptar". Puede encontrar sus vídeos descargados y listas de reproducción en la sección "Descargas" de la aplicación. </li>
|
49 |
-
|
50 |
-
<li>Para cambiar la velocidad de reproducción y la resolución de los vídeos, toque el icono de tres puntos en la esquina superior derecha del vídeo. Toca "Velocidad de reproducción" o "Calidad" y elige la opción que desees. También puedes cambiar estos ajustes desde el menú de configuración de la aplicación. </li>
|
51 |
-
<li>Para habilitar el modo oscuro y el modo de zoom, toque en el icono de perfil en la esquina superior derecha de la aplicación. Toque en "Configuración" y luego en "General". Activa "Tema oscuro" o "Zoom para llenar pantalla" como desees. También puedes cambiar estos ajustes desde el menú de configuración de la aplicación. </li>
|
52 |
-
<li>Para bloquear anuncios y omitir intros, no es necesario hacer nada. YouTube Channel APK bloquea automáticamente los anuncios y omite las intros para usted. Puede disfrutar de videos sin publicidad ni interrupciones sin problemas. </li>
|
53 |
-
<li>Para suscribirse a canales sin iniciar sesión, toque en el botón de suscripción debajo de cualquier canal que desee. Puedes encontrar tus canales suscritos en la sección "Suscripciones" de la aplicación. También puede darse de baja de cualquier canal tocando el mismo botón de nuevo. </li>
|
54 |
-
</ul>
|
55 |
-
<p>Como se puede ver, YouTube Channel APK ofrece muchas características y funciones que pueden mejorar su experiencia de YouTube. Sin embargo, antes de terminar este artículo, debe leer la conclusión y las preguntas frecuentes.</p>
|
56 |
-
<h2>Conclusión</h2>
|
57 |
-
<p>En este artículo, usted aprendió cómo descargar YouTube Channel APK para su dispositivo Android. Aprendiste lo que es YouTube Channel APK, por qué deberías descargarlo, cómo descargarlo de forma segura y fácil, cómo instalarlo en tu dispositivo y cómo usarlo en tu dispositivo. También aprendió acerca de algunas de las características y beneficios de YouTube Channel APK, tales como la descarga de vídeos y listas de reproducción para la visualización sin conexión, la reproducción de vídeos en segundo plano o en el modo de imagen-en-imagen, cambiar la velocidad de reproducción y la resolución de los vídeos, habilitar el modo oscuro y el modo de zoom, bloquear anuncios y omitir intros, y suscribirse a canales sin iniciar sesión. </p>
|
58 |
-
|
59 |
-
<p>Esperamos que haya encontrado este artículo útil e informativo. Si tiene alguna pregunta o comentario sobre YouTube Channel APK, no dude en dejar un comentario a continuación. Nos encantaría saber de usted. ¡Gracias por leer! </p>
|
60 |
-
<h2>Preguntas frecuentes</h2>
|
61 |
-
<ol <li>¿Cuál es la diferencia entre YouTube Channel APK y YouTube Vanced? </li>
|
62 |
-
<p>Canal de YouTube APK y YouTube Vanced son ambas versiones modificadas de la aplicación oficial de YouTube que ofrecen más características y funciones. Sin embargo, tienen algunas diferencias en términos de diseño, rendimiento y compatibilidad. YouTube Channel APK tiene una interfaz más simple y limpia, mientras que YouTube Vanced tiene una interfaz más colorida y personalizable. YouTube Channel APK tiene un rendimiento más rápido y suave, mientras que YouTube Vanced tiene un rendimiento más estable y confiable. YouTube Channel APK es compatible con la mayoría de los dispositivos Android, mientras que YouTube Vanced requiere una versión específica de MicroG para funcionar correctamente. </p>
|
63 |
-
<li> ¿Es el canal de YouTube APK legal y seguro? </li>
|
64 |
-
<p>Canal de YouTube APK no es ilegal, pero no está autorizado por Google o YouTube tampoco. Es una aplicación de terceros que modifica la aplicación original de YouTube sin su permiso. Por lo tanto, podría violar algunos de sus términos y políticas. Sin embargo, YouTube Channel APK es seguro de usar, siempre y cuando se descarga desde una fuente de confianza como APKPure. No contiene ningún malware o virus que pueda dañar su dispositivo o robar sus datos. Sin embargo, siempre debe tener cuidado al descargar cualquier archivo APK de fuentes desconocidas, ya que podrían plantear algunos riesgos para su dispositivo o datos. </p>
|
65 |
-
<li> ¿Cómo puedo actualizar el canal de YouTube APK? </li>
|
66 |
-
|
67 |
-
<li> ¿Cómo puedo desinstalar YouTube Channel APK? </li>
|
68 |
-
<p>Desinstalar YouTube Channel APK es fácil y simple. Solo tiene que ir a la configuración de su dispositivo y toque en "Aplicaciones". A continuación, es necesario encontrar YouTube Channel APK de la lista de aplicaciones y toque en él. Entonces, es necesario tocar en "Desinstalar" y confirmar su elección. También puede desinstalar YouTube Channel APK pulsando largo su icono en la pantalla de inicio y arrastrándolo a la papelera. </p>
|
69 |
-
<li> ¿Puedo usar YouTube Channel APK con mi cuenta de Google? </li>
|
70 |
-
<p>No, no se puede utilizar YouTube Channel APK con su cuenta de Google. YouTube Channel APK no es compatible con Google características de inicio de sesión o sincronización. Por lo tanto, no puede acceder a sus datos personales o preferencias en YouTube Channel APK con su cuenta de Google. Solo se puede utilizar YouTube Channel APK con su propia función de suscripción, que no requiere ningún inicio de sesión o creación de cuenta. </p> 64aa2da5cf<br />
|
71 |
-
<br />
|
72 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/vcs/subversion.py
DELETED
@@ -1,324 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
import os
|
3 |
-
import re
|
4 |
-
from typing import List, Optional, Tuple
|
5 |
-
|
6 |
-
from pip._internal.utils.misc import (
|
7 |
-
HiddenText,
|
8 |
-
display_path,
|
9 |
-
is_console_interactive,
|
10 |
-
is_installable_dir,
|
11 |
-
split_auth_from_netloc,
|
12 |
-
)
|
13 |
-
from pip._internal.utils.subprocess import CommandArgs, make_command
|
14 |
-
from pip._internal.vcs.versioncontrol import (
|
15 |
-
AuthInfo,
|
16 |
-
RemoteNotFoundError,
|
17 |
-
RevOptions,
|
18 |
-
VersionControl,
|
19 |
-
vcs,
|
20 |
-
)
|
21 |
-
|
22 |
-
logger = logging.getLogger(__name__)
|
23 |
-
|
24 |
-
_svn_xml_url_re = re.compile('url="([^"]+)"')
|
25 |
-
_svn_rev_re = re.compile(r'committed-rev="(\d+)"')
|
26 |
-
_svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"')
|
27 |
-
_svn_info_xml_url_re = re.compile(r"<url>(.*)</url>")
|
28 |
-
|
29 |
-
|
30 |
-
class Subversion(VersionControl):
|
31 |
-
name = "svn"
|
32 |
-
dirname = ".svn"
|
33 |
-
repo_name = "checkout"
|
34 |
-
schemes = ("svn+ssh", "svn+http", "svn+https", "svn+svn", "svn+file")
|
35 |
-
|
36 |
-
@classmethod
|
37 |
-
def should_add_vcs_url_prefix(cls, remote_url: str) -> bool:
|
38 |
-
return True
|
39 |
-
|
40 |
-
@staticmethod
|
41 |
-
def get_base_rev_args(rev: str) -> List[str]:
|
42 |
-
return ["-r", rev]
|
43 |
-
|
44 |
-
@classmethod
|
45 |
-
def get_revision(cls, location: str) -> str:
|
46 |
-
"""
|
47 |
-
Return the maximum revision for all files under a given location
|
48 |
-
"""
|
49 |
-
# Note: taken from setuptools.command.egg_info
|
50 |
-
revision = 0
|
51 |
-
|
52 |
-
for base, dirs, _ in os.walk(location):
|
53 |
-
if cls.dirname not in dirs:
|
54 |
-
dirs[:] = []
|
55 |
-
continue # no sense walking uncontrolled subdirs
|
56 |
-
dirs.remove(cls.dirname)
|
57 |
-
entries_fn = os.path.join(base, cls.dirname, "entries")
|
58 |
-
if not os.path.exists(entries_fn):
|
59 |
-
# FIXME: should we warn?
|
60 |
-
continue
|
61 |
-
|
62 |
-
dirurl, localrev = cls._get_svn_url_rev(base)
|
63 |
-
|
64 |
-
if base == location:
|
65 |
-
assert dirurl is not None
|
66 |
-
base = dirurl + "/" # save the root url
|
67 |
-
elif not dirurl or not dirurl.startswith(base):
|
68 |
-
dirs[:] = []
|
69 |
-
continue # not part of the same svn tree, skip it
|
70 |
-
revision = max(revision, localrev)
|
71 |
-
return str(revision)
|
72 |
-
|
73 |
-
@classmethod
|
74 |
-
def get_netloc_and_auth(
|
75 |
-
cls, netloc: str, scheme: str
|
76 |
-
) -> Tuple[str, Tuple[Optional[str], Optional[str]]]:
|
77 |
-
"""
|
78 |
-
This override allows the auth information to be passed to svn via the
|
79 |
-
--username and --password options instead of via the URL.
|
80 |
-
"""
|
81 |
-
if scheme == "ssh":
|
82 |
-
# The --username and --password options can't be used for
|
83 |
-
# svn+ssh URLs, so keep the auth information in the URL.
|
84 |
-
return super().get_netloc_and_auth(netloc, scheme)
|
85 |
-
|
86 |
-
return split_auth_from_netloc(netloc)
|
87 |
-
|
88 |
-
@classmethod
|
89 |
-
def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]:
|
90 |
-
# hotfix the URL scheme after removing svn+ from svn+ssh:// re-add it
|
91 |
-
url, rev, user_pass = super().get_url_rev_and_auth(url)
|
92 |
-
if url.startswith("ssh://"):
|
93 |
-
url = "svn+" + url
|
94 |
-
return url, rev, user_pass
|
95 |
-
|
96 |
-
@staticmethod
|
97 |
-
def make_rev_args(
|
98 |
-
username: Optional[str], password: Optional[HiddenText]
|
99 |
-
) -> CommandArgs:
|
100 |
-
extra_args: CommandArgs = []
|
101 |
-
if username:
|
102 |
-
extra_args += ["--username", username]
|
103 |
-
if password:
|
104 |
-
extra_args += ["--password", password]
|
105 |
-
|
106 |
-
return extra_args
|
107 |
-
|
108 |
-
@classmethod
|
109 |
-
def get_remote_url(cls, location: str) -> str:
|
110 |
-
# In cases where the source is in a subdirectory, we have to look up in
|
111 |
-
# the location until we find a valid project root.
|
112 |
-
orig_location = location
|
113 |
-
while not is_installable_dir(location):
|
114 |
-
last_location = location
|
115 |
-
location = os.path.dirname(location)
|
116 |
-
if location == last_location:
|
117 |
-
# We've traversed up to the root of the filesystem without
|
118 |
-
# finding a Python project.
|
119 |
-
logger.warning(
|
120 |
-
"Could not find Python project for directory %s (tried all "
|
121 |
-
"parent directories)",
|
122 |
-
orig_location,
|
123 |
-
)
|
124 |
-
raise RemoteNotFoundError
|
125 |
-
|
126 |
-
url, _rev = cls._get_svn_url_rev(location)
|
127 |
-
if url is None:
|
128 |
-
raise RemoteNotFoundError
|
129 |
-
|
130 |
-
return url
|
131 |
-
|
132 |
-
@classmethod
|
133 |
-
def _get_svn_url_rev(cls, location: str) -> Tuple[Optional[str], int]:
|
134 |
-
from pip._internal.exceptions import InstallationError
|
135 |
-
|
136 |
-
entries_path = os.path.join(location, cls.dirname, "entries")
|
137 |
-
if os.path.exists(entries_path):
|
138 |
-
with open(entries_path) as f:
|
139 |
-
data = f.read()
|
140 |
-
else: # subversion >= 1.7 does not have the 'entries' file
|
141 |
-
data = ""
|
142 |
-
|
143 |
-
url = None
|
144 |
-
if data.startswith("8") or data.startswith("9") or data.startswith("10"):
|
145 |
-
entries = list(map(str.splitlines, data.split("\n\x0c\n")))
|
146 |
-
del entries[0][0] # get rid of the '8'
|
147 |
-
url = entries[0][3]
|
148 |
-
revs = [int(d[9]) for d in entries if len(d) > 9 and d[9]] + [0]
|
149 |
-
elif data.startswith("<?xml"):
|
150 |
-
match = _svn_xml_url_re.search(data)
|
151 |
-
if not match:
|
152 |
-
raise ValueError(f"Badly formatted data: {data!r}")
|
153 |
-
url = match.group(1) # get repository URL
|
154 |
-
revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)] + [0]
|
155 |
-
else:
|
156 |
-
try:
|
157 |
-
# subversion >= 1.7
|
158 |
-
# Note that using get_remote_call_options is not necessary here
|
159 |
-
# because `svn info` is being run against a local directory.
|
160 |
-
# We don't need to worry about making sure interactive mode
|
161 |
-
# is being used to prompt for passwords, because passwords
|
162 |
-
# are only potentially needed for remote server requests.
|
163 |
-
xml = cls.run_command(
|
164 |
-
["info", "--xml", location],
|
165 |
-
show_stdout=False,
|
166 |
-
stdout_only=True,
|
167 |
-
)
|
168 |
-
match = _svn_info_xml_url_re.search(xml)
|
169 |
-
assert match is not None
|
170 |
-
url = match.group(1)
|
171 |
-
revs = [int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml)]
|
172 |
-
except InstallationError:
|
173 |
-
url, revs = None, []
|
174 |
-
|
175 |
-
if revs:
|
176 |
-
rev = max(revs)
|
177 |
-
else:
|
178 |
-
rev = 0
|
179 |
-
|
180 |
-
return url, rev
|
181 |
-
|
182 |
-
@classmethod
|
183 |
-
def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool:
|
184 |
-
"""Always assume the versions don't match"""
|
185 |
-
return False
|
186 |
-
|
187 |
-
def __init__(self, use_interactive: Optional[bool] = None) -> None:
|
188 |
-
if use_interactive is None:
|
189 |
-
use_interactive = is_console_interactive()
|
190 |
-
self.use_interactive = use_interactive
|
191 |
-
|
192 |
-
# This member is used to cache the fetched version of the current
|
193 |
-
# ``svn`` client.
|
194 |
-
# Special value definitions:
|
195 |
-
# None: Not evaluated yet.
|
196 |
-
# Empty tuple: Could not parse version.
|
197 |
-
self._vcs_version: Optional[Tuple[int, ...]] = None
|
198 |
-
|
199 |
-
super().__init__()
|
200 |
-
|
201 |
-
def call_vcs_version(self) -> Tuple[int, ...]:
|
202 |
-
"""Query the version of the currently installed Subversion client.
|
203 |
-
|
204 |
-
:return: A tuple containing the parts of the version information or
|
205 |
-
``()`` if the version returned from ``svn`` could not be parsed.
|
206 |
-
:raises: BadCommand: If ``svn`` is not installed.
|
207 |
-
"""
|
208 |
-
# Example versions:
|
209 |
-
# svn, version 1.10.3 (r1842928)
|
210 |
-
# compiled Feb 25 2019, 14:20:39 on x86_64-apple-darwin17.0.0
|
211 |
-
# svn, version 1.7.14 (r1542130)
|
212 |
-
# compiled Mar 28 2018, 08:49:13 on x86_64-pc-linux-gnu
|
213 |
-
# svn, version 1.12.0-SlikSvn (SlikSvn/1.12.0)
|
214 |
-
# compiled May 28 2019, 13:44:56 on x86_64-microsoft-windows6.2
|
215 |
-
version_prefix = "svn, version "
|
216 |
-
version = self.run_command(["--version"], show_stdout=False, stdout_only=True)
|
217 |
-
if not version.startswith(version_prefix):
|
218 |
-
return ()
|
219 |
-
|
220 |
-
version = version[len(version_prefix) :].split()[0]
|
221 |
-
version_list = version.partition("-")[0].split(".")
|
222 |
-
try:
|
223 |
-
parsed_version = tuple(map(int, version_list))
|
224 |
-
except ValueError:
|
225 |
-
return ()
|
226 |
-
|
227 |
-
return parsed_version
|
228 |
-
|
229 |
-
def get_vcs_version(self) -> Tuple[int, ...]:
|
230 |
-
"""Return the version of the currently installed Subversion client.
|
231 |
-
|
232 |
-
If the version of the Subversion client has already been queried,
|
233 |
-
a cached value will be used.
|
234 |
-
|
235 |
-
:return: A tuple containing the parts of the version information or
|
236 |
-
``()`` if the version returned from ``svn`` could not be parsed.
|
237 |
-
:raises: BadCommand: If ``svn`` is not installed.
|
238 |
-
"""
|
239 |
-
if self._vcs_version is not None:
|
240 |
-
# Use cached version, if available.
|
241 |
-
# If parsing the version failed previously (empty tuple),
|
242 |
-
# do not attempt to parse it again.
|
243 |
-
return self._vcs_version
|
244 |
-
|
245 |
-
vcs_version = self.call_vcs_version()
|
246 |
-
self._vcs_version = vcs_version
|
247 |
-
return vcs_version
|
248 |
-
|
249 |
-
def get_remote_call_options(self) -> CommandArgs:
|
250 |
-
"""Return options to be used on calls to Subversion that contact the server.
|
251 |
-
|
252 |
-
These options are applicable for the following ``svn`` subcommands used
|
253 |
-
in this class.
|
254 |
-
|
255 |
-
- checkout
|
256 |
-
- switch
|
257 |
-
- update
|
258 |
-
|
259 |
-
:return: A list of command line arguments to pass to ``svn``.
|
260 |
-
"""
|
261 |
-
if not self.use_interactive:
|
262 |
-
# --non-interactive switch is available since Subversion 0.14.4.
|
263 |
-
# Subversion < 1.8 runs in interactive mode by default.
|
264 |
-
return ["--non-interactive"]
|
265 |
-
|
266 |
-
svn_version = self.get_vcs_version()
|
267 |
-
# By default, Subversion >= 1.8 runs in non-interactive mode if
|
268 |
-
# stdin is not a TTY. Since that is how pip invokes SVN, in
|
269 |
-
# call_subprocess(), pip must pass --force-interactive to ensure
|
270 |
-
# the user can be prompted for a password, if required.
|
271 |
-
# SVN added the --force-interactive option in SVN 1.8. Since
|
272 |
-
# e.g. RHEL/CentOS 7, which is supported until 2024, ships with
|
273 |
-
# SVN 1.7, pip should continue to support SVN 1.7. Therefore, pip
|
274 |
-
# can't safely add the option if the SVN version is < 1.8 (or unknown).
|
275 |
-
if svn_version >= (1, 8):
|
276 |
-
return ["--force-interactive"]
|
277 |
-
|
278 |
-
return []
|
279 |
-
|
280 |
-
def fetch_new(
|
281 |
-
self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int
|
282 |
-
) -> None:
|
283 |
-
rev_display = rev_options.to_display()
|
284 |
-
logger.info(
|
285 |
-
"Checking out %s%s to %s",
|
286 |
-
url,
|
287 |
-
rev_display,
|
288 |
-
display_path(dest),
|
289 |
-
)
|
290 |
-
if verbosity <= 0:
|
291 |
-
flag = "--quiet"
|
292 |
-
else:
|
293 |
-
flag = ""
|
294 |
-
cmd_args = make_command(
|
295 |
-
"checkout",
|
296 |
-
flag,
|
297 |
-
self.get_remote_call_options(),
|
298 |
-
rev_options.to_args(),
|
299 |
-
url,
|
300 |
-
dest,
|
301 |
-
)
|
302 |
-
self.run_command(cmd_args)
|
303 |
-
|
304 |
-
def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
|
305 |
-
cmd_args = make_command(
|
306 |
-
"switch",
|
307 |
-
self.get_remote_call_options(),
|
308 |
-
rev_options.to_args(),
|
309 |
-
url,
|
310 |
-
dest,
|
311 |
-
)
|
312 |
-
self.run_command(cmd_args)
|
313 |
-
|
314 |
-
def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
|
315 |
-
cmd_args = make_command(
|
316 |
-
"update",
|
317 |
-
self.get_remote_call_options(),
|
318 |
-
rev_options.to_args(),
|
319 |
-
dest,
|
320 |
-
)
|
321 |
-
self.run_command(cmd_args)
|
322 |
-
|
323 |
-
|
324 |
-
vcs.register(Subversion)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Borda90/Titanic_Esp/app.py
DELETED
@@ -1,67 +0,0 @@
|
|
1 |
-
# This is a small and fast sklearn model, so the run-gradio script trains a model and deploys it
|
2 |
-
|
3 |
-
import pandas as pd
|
4 |
-
import numpy as np
|
5 |
-
import sklearn
|
6 |
-
import gradio as gr
|
7 |
-
from sklearn import preprocessing
|
8 |
-
from sklearn.model_selection import train_test_split
|
9 |
-
from sklearn.ensemble import RandomForestClassifier
|
10 |
-
from sklearn.metrics import accuracy_score
|
11 |
-
|
12 |
-
data = pd.read_csv('https://raw.githubusercontent.com/gradio-app/titanic/master/train.csv')
|
13 |
-
data.head()
|
14 |
-
|
15 |
-
def encode_ages(df): # Binning ages
|
16 |
-
df.Age = df.Age.fillna(-0.5)
|
17 |
-
bins = (-1, 0, 5, 12, 18, 25, 35, 60, 120)
|
18 |
-
categories = pd.cut(df.Age, bins, labels=False)
|
19 |
-
df.Age = categories
|
20 |
-
return df
|
21 |
-
|
22 |
-
def encode_fares(df): # Binning fares
|
23 |
-
df.Fare = df.Fare.fillna(-0.5)
|
24 |
-
bins = (-1, 0, 8, 15, 31, 1000)
|
25 |
-
categories = pd.cut(df.Fare, bins, labels=False)
|
26 |
-
df.Fare = categories
|
27 |
-
return df
|
28 |
-
|
29 |
-
def encode_sex(df):
|
30 |
-
mapping = {"male": 0, "female": 1}
|
31 |
-
return df.replace({'Sex': mapping})
|
32 |
-
|
33 |
-
def transform_features(df):
|
34 |
-
df = encode_ages(df)
|
35 |
-
df = encode_fares(df)
|
36 |
-
df = encode_sex(df)
|
37 |
-
return df
|
38 |
-
|
39 |
-
train = data[['PassengerId', 'Fare', 'Age', 'Sex', 'Survived']]
|
40 |
-
train = transform_features(train)
|
41 |
-
train.head()
|
42 |
-
|
43 |
-
|
44 |
-
X_all = train.drop(['Survived', 'PassengerId'], axis=1)
|
45 |
-
y_all = train['Survived']
|
46 |
-
|
47 |
-
num_test = 0.20
|
48 |
-
X_train, X_test, y_train, y_test = train_test_split(X_all, y_all, test_size=num_test, random_state=23)
|
49 |
-
|
50 |
-
clf = RandomForestClassifier()
|
51 |
-
clf.fit(X_train, y_train)
|
52 |
-
predictions = clf.predict(X_test)
|
53 |
-
|
54 |
-
def predict_survival(sex, age, fare):
|
55 |
-
df = pd.DataFrame.from_dict({'Sex': [sex], 'Age': [age], 'Fare': [fare]})
|
56 |
-
df = encode_sex(df)
|
57 |
-
df = encode_fares(df)
|
58 |
-
df = encode_ages(df)
|
59 |
-
pred = clf.predict_proba(df)[0]
|
60 |
-
return {'Muere': float(pred[0]), 'Sobrevive': float(pred[1])}
|
61 |
-
|
62 |
-
sex = gr.inputs.Radio(['female', 'male'], label="Sexo")
|
63 |
-
age = gr.inputs.Slider(minimum=0, maximum=120, default=22, label="Edad")
|
64 |
-
fare = gr.inputs.Slider(minimum=0, maximum=200, default=100, label="Clase")
|
65 |
-
|
66 |
-
gr.Interface(predict_survival, [sex, age, fare], "label", live=True, thumbnail="https://raw.githubusercontent.com/gradio-app/hub-titanic/master/thumbnail.png", analytics_enabled=False,
|
67 |
-
title="Sobrevivientes del Titanic", description="Analicemos los sobreviventes de este caso y comprobemos").launch();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/vis/extractor.py
DELETED
@@ -1,152 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
import logging
|
3 |
-
from typing import Sequence
|
4 |
-
import torch
|
5 |
-
|
6 |
-
from detectron2.layers.nms import batched_nms
|
7 |
-
from detectron2.structures.instances import Instances
|
8 |
-
|
9 |
-
from densepose.vis.bounding_box import BoundingBoxVisualizer, ScoredBoundingBoxVisualizer
|
10 |
-
from densepose.vis.densepose import DensePoseResultsVisualizer
|
11 |
-
|
12 |
-
from .base import CompoundVisualizer
|
13 |
-
|
14 |
-
Scores = Sequence[float]
|
15 |
-
|
16 |
-
|
17 |
-
def extract_scores_from_instances(instances: Instances, select=None):
|
18 |
-
if instances.has("scores"):
|
19 |
-
return instances.scores if select is None else instances.scores[select]
|
20 |
-
return None
|
21 |
-
|
22 |
-
|
23 |
-
def extract_boxes_xywh_from_instances(instances: Instances, select=None):
|
24 |
-
if instances.has("pred_boxes"):
|
25 |
-
boxes_xywh = instances.pred_boxes.tensor.clone()
|
26 |
-
boxes_xywh[:, 2] -= boxes_xywh[:, 0]
|
27 |
-
boxes_xywh[:, 3] -= boxes_xywh[:, 1]
|
28 |
-
return boxes_xywh if select is None else boxes_xywh[select]
|
29 |
-
return None
|
30 |
-
|
31 |
-
|
32 |
-
def create_extractor(visualizer: object):
|
33 |
-
"""
|
34 |
-
Create an extractor for the provided visualizer
|
35 |
-
"""
|
36 |
-
if isinstance(visualizer, CompoundVisualizer):
|
37 |
-
extractors = [create_extractor(v) for v in visualizer.visualizers]
|
38 |
-
return CompoundExtractor(extractors)
|
39 |
-
elif isinstance(visualizer, DensePoseResultsVisualizer):
|
40 |
-
return DensePoseResultExtractor()
|
41 |
-
elif isinstance(visualizer, ScoredBoundingBoxVisualizer):
|
42 |
-
return CompoundExtractor([extract_boxes_xywh_from_instances, extract_scores_from_instances])
|
43 |
-
elif isinstance(visualizer, BoundingBoxVisualizer):
|
44 |
-
return extract_boxes_xywh_from_instances
|
45 |
-
else:
|
46 |
-
logger = logging.getLogger(__name__)
|
47 |
-
logger.error(f"Could not create extractor for {visualizer}")
|
48 |
-
return None
|
49 |
-
|
50 |
-
|
51 |
-
class BoundingBoxExtractor(object):
|
52 |
-
"""
|
53 |
-
Extracts bounding boxes from instances
|
54 |
-
"""
|
55 |
-
|
56 |
-
def __call__(self, instances: Instances):
|
57 |
-
boxes_xywh = extract_boxes_xywh_from_instances(instances)
|
58 |
-
return boxes_xywh
|
59 |
-
|
60 |
-
|
61 |
-
class ScoredBoundingBoxExtractor(object):
|
62 |
-
"""
|
63 |
-
Extracts bounding boxes from instances
|
64 |
-
"""
|
65 |
-
|
66 |
-
def __call__(self, instances: Instances, select=None):
|
67 |
-
scores = extract_scores_from_instances(instances)
|
68 |
-
boxes_xywh = extract_boxes_xywh_from_instances(instances)
|
69 |
-
if (scores is None) or (boxes_xywh is None):
|
70 |
-
return (boxes_xywh, scores)
|
71 |
-
if select is not None:
|
72 |
-
scores = scores[select]
|
73 |
-
boxes_xywh = boxes_xywh[select]
|
74 |
-
return (boxes_xywh, scores)
|
75 |
-
|
76 |
-
|
77 |
-
class DensePoseResultExtractor(object):
|
78 |
-
"""
|
79 |
-
Extracts DensePose result from instances
|
80 |
-
"""
|
81 |
-
|
82 |
-
def __call__(self, instances: Instances, select=None):
|
83 |
-
boxes_xywh = extract_boxes_xywh_from_instances(instances)
|
84 |
-
if instances.has("pred_densepose") and (boxes_xywh is not None):
|
85 |
-
dpout = instances.pred_densepose
|
86 |
-
if select is not None:
|
87 |
-
dpout = dpout[select]
|
88 |
-
boxes_xywh = boxes_xywh[select]
|
89 |
-
return dpout.to_result(boxes_xywh)
|
90 |
-
else:
|
91 |
-
return None
|
92 |
-
|
93 |
-
|
94 |
-
class CompoundExtractor(object):
|
95 |
-
"""
|
96 |
-
Extracts data for CompoundVisualizer
|
97 |
-
"""
|
98 |
-
|
99 |
-
def __init__(self, extractors):
|
100 |
-
self.extractors = extractors
|
101 |
-
|
102 |
-
def __call__(self, instances: Instances, select=None):
|
103 |
-
datas = []
|
104 |
-
for extractor in self.extractors:
|
105 |
-
data = extractor(instances, select)
|
106 |
-
datas.append(data)
|
107 |
-
return datas
|
108 |
-
|
109 |
-
|
110 |
-
class NmsFilteredExtractor(object):
|
111 |
-
"""
|
112 |
-
Extracts data in the format accepted by NmsFilteredVisualizer
|
113 |
-
"""
|
114 |
-
|
115 |
-
def __init__(self, extractor, iou_threshold):
|
116 |
-
self.extractor = extractor
|
117 |
-
self.iou_threshold = iou_threshold
|
118 |
-
|
119 |
-
def __call__(self, instances: Instances, select=None):
|
120 |
-
scores = extract_scores_from_instances(instances)
|
121 |
-
boxes_xywh = extract_boxes_xywh_from_instances(instances)
|
122 |
-
if boxes_xywh is None:
|
123 |
-
return None
|
124 |
-
select_local_idx = batched_nms(
|
125 |
-
boxes_xywh,
|
126 |
-
scores,
|
127 |
-
torch.zeros(len(scores), dtype=torch.int32),
|
128 |
-
iou_threshold=self.iou_threshold,
|
129 |
-
).squeeze()
|
130 |
-
select_local = torch.zeros(len(boxes_xywh), dtype=torch.bool, device=boxes_xywh.device)
|
131 |
-
select_local[select_local_idx] = True
|
132 |
-
select = select_local if select is None else (select & select_local)
|
133 |
-
return self.extractor(instances, select=select)
|
134 |
-
|
135 |
-
|
136 |
-
class ScoreThresholdedExtractor(object):
|
137 |
-
"""
|
138 |
-
Extracts data in the format accepted by ScoreThresholdedVisualizer
|
139 |
-
"""
|
140 |
-
|
141 |
-
def __init__(self, extractor, min_score):
|
142 |
-
self.extractor = extractor
|
143 |
-
self.min_score = min_score
|
144 |
-
|
145 |
-
def __call__(self, instances: Instances, select=None):
|
146 |
-
scores = extract_scores_from_instances(instances)
|
147 |
-
if scores is None:
|
148 |
-
return None
|
149 |
-
select_local = scores > self.min_score
|
150 |
-
select = select_local if select is None else (select & select_local)
|
151 |
-
data = self.extractor(instances, select=select)
|
152 |
-
return data
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/async/copy.h
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
// The purpose of this header is to #include the async/copy.h header of the
|
18 |
-
// sequential, host, and device systems. It should be #included in any code
|
19 |
-
// which uses ADL to dispatch async copy.
|
20 |
-
|
21 |
-
#pragma once
|
22 |
-
|
23 |
-
#include <thrust/detail/config.h>
|
24 |
-
|
25 |
-
//#include <thrust/system/detail/sequential/async/copy.h>
|
26 |
-
|
27 |
-
//#define __THRUST_HOST_SYSTEM_ASYNC_COPY_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/async/copy.h>
|
28 |
-
//#include __THRUST_HOST_SYSTEM_ASYNC_COPY_HEADER
|
29 |
-
//#undef __THRUST_HOST_SYSTEM_ASYNC_COPY_HEADER
|
30 |
-
|
31 |
-
#define __THRUST_DEVICE_SYSTEM_ASYNC_COPY_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/async/copy.h>
|
32 |
-
#include __THRUST_DEVICE_SYSTEM_ASYNC_COPY_HEADER
|
33 |
-
#undef __THRUST_DEVICE_SYSTEM_ASYNC_COPY_HEADER
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/copy.h
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
#include <thrust/system/detail/generic/tag.h>
|
21 |
-
|
22 |
-
namespace thrust
|
23 |
-
{
|
24 |
-
namespace system
|
25 |
-
{
|
26 |
-
namespace detail
|
27 |
-
{
|
28 |
-
namespace generic
|
29 |
-
{
|
30 |
-
|
31 |
-
|
32 |
-
template<typename DerivedPolicy,
|
33 |
-
typename InputIterator,
|
34 |
-
typename OutputIterator>
|
35 |
-
__host__ __device__
|
36 |
-
OutputIterator copy(thrust::execution_policy<DerivedPolicy> &exec,
|
37 |
-
InputIterator first,
|
38 |
-
InputIterator last,
|
39 |
-
OutputIterator result);
|
40 |
-
|
41 |
-
|
42 |
-
template<typename DerivedPolicy,
|
43 |
-
typename InputIterator,
|
44 |
-
typename Size,
|
45 |
-
typename OutputIterator>
|
46 |
-
__host__ __device__
|
47 |
-
OutputIterator copy_n(thrust::execution_policy<DerivedPolicy> &exec,
|
48 |
-
InputIterator first,
|
49 |
-
Size n,
|
50 |
-
OutputIterator result);
|
51 |
-
|
52 |
-
|
53 |
-
} // end generic
|
54 |
-
} // end detail
|
55 |
-
} // end system
|
56 |
-
} // end thrust
|
57 |
-
|
58 |
-
#include <thrust/system/detail/generic/copy.inl>
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Caoyunkang/Segment-Any-Anomaly/SAA/modelinet.py
DELETED
@@ -1,173 +0,0 @@
|
|
1 |
-
import timm
|
2 |
-
from copy import deepcopy
|
3 |
-
from typing import Tuple
|
4 |
-
|
5 |
-
import numpy as np
|
6 |
-
import timm
|
7 |
-
import torch
|
8 |
-
from torch.nn import functional as F
|
9 |
-
from torchvision.transforms.functional import resize, to_pil_image # type: ignore
|
10 |
-
|
11 |
-
|
12 |
-
class ResizeLongestSide:
|
13 |
-
"""
|
14 |
-
Resizes images to longest side 'target_length', as well as provides
|
15 |
-
methods for resizing coordinates and boxes. Provides methods for
|
16 |
-
transforming both numpy array and batched torch tensors.
|
17 |
-
"""
|
18 |
-
|
19 |
-
def __init__(self, target_length: int) -> None:
|
20 |
-
self.target_length = target_length
|
21 |
-
|
22 |
-
def apply_image(self, image: np.ndarray) -> np.ndarray:
|
23 |
-
"""
|
24 |
-
Expects a numpy array with shape HxWxC in uint8 format.
|
25 |
-
"""
|
26 |
-
target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length)
|
27 |
-
return np.array(resize(to_pil_image(image), target_size))
|
28 |
-
|
29 |
-
def apply_coords(self, coords: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray:
|
30 |
-
"""
|
31 |
-
Expects a numpy array of length 2 in the final dimension. Requires the
|
32 |
-
original image size in (H, W) format.
|
33 |
-
"""
|
34 |
-
old_h, old_w = original_size
|
35 |
-
new_h, new_w = self.get_preprocess_shape(
|
36 |
-
original_size[0], original_size[1], self.target_length
|
37 |
-
)
|
38 |
-
coords = deepcopy(coords).astype(float)
|
39 |
-
coords[..., 0] = coords[..., 0] * (new_w / old_w)
|
40 |
-
coords[..., 1] = coords[..., 1] * (new_h / old_h)
|
41 |
-
return coords
|
42 |
-
|
43 |
-
def apply_boxes(self, boxes: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray:
|
44 |
-
"""
|
45 |
-
Expects a numpy array shape Bx4. Requires the original image size
|
46 |
-
in (H, W) format.
|
47 |
-
"""
|
48 |
-
boxes = self.apply_coords(boxes.reshape(-1, 2, 2), original_size)
|
49 |
-
return boxes.reshape(-1, 4)
|
50 |
-
|
51 |
-
def apply_image_torch(self, image: torch.Tensor) -> torch.Tensor:
|
52 |
-
"""
|
53 |
-
Expects batched images with shape BxCxHxW and float format. This
|
54 |
-
transformation may not exactly match apply_image. apply_image is
|
55 |
-
the transformation expected by the model.
|
56 |
-
"""
|
57 |
-
# Expects an image in BCHW format. May not exactly match apply_image.
|
58 |
-
target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length)
|
59 |
-
return F.interpolate(
|
60 |
-
image, target_size, mode="bilinear", align_corners=False, antialias=True
|
61 |
-
)
|
62 |
-
|
63 |
-
def apply_coords_torch(
|
64 |
-
self, coords: torch.Tensor, original_size: Tuple[int, ...]
|
65 |
-
) -> torch.Tensor:
|
66 |
-
"""
|
67 |
-
Expects a torch tensor with length 2 in the last dimension. Requires the
|
68 |
-
original image size in (H, W) format.
|
69 |
-
"""
|
70 |
-
old_h, old_w = original_size
|
71 |
-
new_h, new_w = self.get_preprocess_shape(
|
72 |
-
original_size[0], original_size[1], self.target_length
|
73 |
-
)
|
74 |
-
coords = deepcopy(coords).to(torch.float)
|
75 |
-
coords[..., 0] = coords[..., 0] * (new_w / old_w)
|
76 |
-
coords[..., 1] = coords[..., 1] * (new_h / old_h)
|
77 |
-
return coords
|
78 |
-
|
79 |
-
def apply_boxes_torch(
|
80 |
-
self, boxes: torch.Tensor, original_size: Tuple[int, ...]
|
81 |
-
) -> torch.Tensor:
|
82 |
-
"""
|
83 |
-
Expects a torch tensor with shape Bx4. Requires the original image
|
84 |
-
size in (H, W) format.
|
85 |
-
"""
|
86 |
-
boxes = self.apply_coords_torch(boxes.reshape(-1, 2, 2), original_size)
|
87 |
-
return boxes.reshape(-1, 4)
|
88 |
-
|
89 |
-
@staticmethod
|
90 |
-
def get_preprocess_shape(oldh: int, oldw: int, long_side_length: int) -> Tuple[int, int]:
|
91 |
-
"""
|
92 |
-
Compute the output size given input size and target long side length.
|
93 |
-
"""
|
94 |
-
scale = long_side_length * 1.0 / max(oldh, oldw)
|
95 |
-
newh, neww = oldh * scale, oldw * scale
|
96 |
-
neww = int(neww + 0.5)
|
97 |
-
newh = int(newh + 0.5)
|
98 |
-
return (newh, neww)
|
99 |
-
|
100 |
-
|
101 |
-
class ModelINet(torch.nn.Module):
|
102 |
-
# hrnet_w32, wide_resnet50_2
|
103 |
-
def __init__(self, device, backbone_name='wide_resnet50_2', out_indices=(1, 2, 3), checkpoint_path='',
|
104 |
-
pool_last=False):
|
105 |
-
super().__init__()
|
106 |
-
# Determine if to output features.
|
107 |
-
kwargs = {'features_only': True if out_indices else False}
|
108 |
-
if out_indices:
|
109 |
-
kwargs.update({'out_indices': out_indices})
|
110 |
-
print(backbone_name)
|
111 |
-
|
112 |
-
self.device = device
|
113 |
-
self.backbone = timm.create_model(model_name=backbone_name, pretrained=True, checkpoint_path=checkpoint_path,
|
114 |
-
**kwargs)
|
115 |
-
self.backbone.eval()
|
116 |
-
self.backbone = self.backbone.to(self.device)
|
117 |
-
|
118 |
-
self.avg_pool = torch.nn.AdaptiveAvgPool2d((1, 1)) if pool_last else None
|
119 |
-
|
120 |
-
self.pixel_mean = torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255]).view(-1, 1, 1).to(self.device)
|
121 |
-
self.pixel_std = torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255]).view(-1, 1, 1).to(self.device)
|
122 |
-
|
123 |
-
self.img_size = 1024
|
124 |
-
self.transform_size = ResizeLongestSide(self.img_size)
|
125 |
-
|
126 |
-
def set_img_size(self, img_size):
|
127 |
-
self.img_size = img_size
|
128 |
-
self.transform_size = ResizeLongestSide(self.img_size)
|
129 |
-
|
130 |
-
def preprocess(self, image: np.ndarray):
|
131 |
-
"""Normalize pixel values and pad to a square input."""
|
132 |
-
|
133 |
-
input_image = self.transform_size.apply_image(image)
|
134 |
-
input_image_torch = torch.as_tensor(input_image, device=self.device)
|
135 |
-
x = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]
|
136 |
-
|
137 |
-
# Normalize colors
|
138 |
-
x = (x - self.pixel_mean) / self.pixel_std
|
139 |
-
|
140 |
-
# Pad
|
141 |
-
h, w = x.shape[-2:]
|
142 |
-
padh = self.img_size - h
|
143 |
-
padw = self.img_size - w
|
144 |
-
x = F.pad(x, (0, padw, 0, padh))
|
145 |
-
|
146 |
-
ratio_h = h / self.img_size
|
147 |
-
ratio_w = w / self.img_size
|
148 |
-
return x, ratio_h, ratio_w
|
149 |
-
|
150 |
-
@torch.no_grad()
|
151 |
-
def forward(self, x):
|
152 |
-
x, ratio_h, ratio_w = self.preprocess(x)
|
153 |
-
x = x.to(self.device)
|
154 |
-
|
155 |
-
# Backbone forward pass.
|
156 |
-
features = self.backbone(x)
|
157 |
-
|
158 |
-
# Adaptive average pool over the last layer.
|
159 |
-
if self.avg_pool:
|
160 |
-
fmap = features[-1]
|
161 |
-
fmap = self.avg_pool(fmap)
|
162 |
-
fmap = torch.flatten(fmap, 1)
|
163 |
-
features.append(fmap)
|
164 |
-
|
165 |
-
size_0 = features[0].shape[2:]
|
166 |
-
|
167 |
-
for i in range(1, len(features)):
|
168 |
-
features[i] = F.interpolate(features[i], size_0)
|
169 |
-
|
170 |
-
features = torch.cat(features, dim=1)
|
171 |
-
features = F.normalize(features, dim=1)
|
172 |
-
|
173 |
-
return features, ratio_h, ratio_w
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Chris4K/llms_compare/Cedie-Ang-Munting-Prinsipe-Tagalog-Version-Full-Movie-Episode-1.md
DELETED
@@ -1,60 +0,0 @@
|
|
1 |
-
## cedie ang munting prinsipe tagalog version full movie episode 1
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
**Download File ->>> [https://eromdesre.blogspot.com/?d=2txP07](https://eromdesre.blogspot.com/?d=2txP07)**
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
# Cedie Ang Munting Prinsipe: A Classic Anime Adaptation of a Beloved Novel
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
Cedie Ang Munting Prinsipe is a Filipino-dubbed anime series based on the novel Little Lord Fauntleroy by Frances Hodgson Burnett. It tells the story of Cedric Errol, a kind and cheerful boy who lives with his mother in New York. One day, he learns that he is the heir of his grandfather, the Earl of Dorincourt, who lives in England. He travels to meet his grandfather, who is cold and stern, but gradually warms up to Cedie's innocence and goodness.
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
The anime series was produced by Nippon Animation in 1988 as part of the World Masterpiece Theater collection. It has 43 episodes, each about 25 minutes long. The first episode introduces Cedie's life in New York, his friendship with Dick Tipton, a shoeshine boy, and his encounter with Mr. Hobbs, a grocer who becomes his mentor. It also shows how Cedie receives a letter from his grandfather's lawyer, informing him of his inheritance and inviting him to England.
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
The series was dubbed in Tagalog by ABS-CBN and aired in the Philippines in the early 1990s. It became one of the most popular and beloved anime shows among Filipino children and adults alike. Many viewers were moved by Cedie's kindness, courage, and loyalty, as well as his relationship with his mother and grandfather. The series also featured memorable songs, such as "Ang Munting Prinsipe" (The Little Prince), "Ikaw ang Lahat sa Akin" (You Are Everything to Me), and "Pag-ibig ang Nagbibigay Buhay" (Love Gives Life).
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
Cedie Ang Munting Prinsipe is a classic anime adaptation of a beloved novel that has touched the hearts of many generations. It is a story of love, family, and friendship that transcends time and culture. You can watch the first episode of the Tagalog version on YouTube[^1^]. Enjoy!
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
In the following episodes, Cedie arrives in England and meets his grandfather, who is initially cold and distant. He also meets his cousin Wilkins, who is jealous and spiteful of him. Cedie tries to win his grandfather's affection and respect, while also learning about the life and responsibilities of a lord. He makes friends with the servants and the villagers, and helps them with their problems. He also faces various challenges and dangers, such as being kidnapped by his uncle Bevis, who wants to claim the earldom.
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
Meanwhile, Cedie's mother, who is called Dearest by him, stays in a cottage near the castle. She is not allowed to see her son or her father-in-law, as the Earl disapproves of her marriage to his son. She is kind and gentle, and supports Cedie from afar. She also teaches him about his father, who died when he was a baby. Cedie's father was a brave and noble man, who fought in the American Civil War and married Dearest against his father's wishes.
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
As the story progresses, Cedie and his grandfather develop a close and loving bond. The Earl becomes more kind and generous, thanks to Cedie's influence. He also learns to appreciate Dearest and reconciles with her. However, their happiness is threatened when a woman named Minna claims that her son Tom is the real heir of the earldom, as she was married to Cedie's father before Dearest. Cedie faces the possibility of losing his title and his grandfather's love, but he remains faithful and hopeful.
|
54 |
-
|
55 |
-
dfd1c89656
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChristopherMarais/Andrew_Alpha/app.py
DELETED
@@ -1,107 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import dill
|
3 |
-
import timm
|
4 |
-
import random
|
5 |
-
import numpy as np
|
6 |
-
import gradio as gr
|
7 |
-
from fastai.tabular.all import *
|
8 |
-
from fastai.vision.all import *
|
9 |
-
from fastai.vision.utils import get_image_files
|
10 |
-
from fastai.learner import load_learner
|
11 |
-
from Ambrosia import pre_process_image
|
12 |
-
from huggingface_hub import from_pretrained_fastai, push_to_hub_fastai, hf_hub_download
|
13 |
-
from torchvision.transforms import GaussianBlur
|
14 |
-
|
15 |
-
# Set the token
|
16 |
-
os.environ["HUGGINGFACE_TOKEN"] = "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"
|
17 |
-
|
18 |
-
# # Define a custom transform for Gaussian blur
|
19 |
-
# def gaussian_blur(x, p=0.5, kernel_size_min=3, kernel_size_max=9, sigma_min=0.1, sigma_max=2):
|
20 |
-
# if x.ndim == 4:
|
21 |
-
# for i in range(x.shape[0]):
|
22 |
-
# if random.random() < p:
|
23 |
-
# kernel_size = random.randrange(kernel_size_min, kernel_size_max + 1, 2)
|
24 |
-
# sigma = random.uniform(sigma_min, sigma_max)
|
25 |
-
# x[i] = GaussianBlur(kernel_size=kernel_size, sigma=sigma)(x[i])
|
26 |
-
# return x
|
27 |
-
|
28 |
-
# Define a custom transform for Gaussian blur
|
29 |
-
def gaussian_blur(x, p=0.5, kernel_size_min=3, kernel_size_max=20, sigma_min=0.1, sigma_max=3):
|
30 |
-
if x.ndim == 4:
|
31 |
-
for i in range(x.shape[0]):
|
32 |
-
if random.random() < p:
|
33 |
-
kernel_size = random.randrange(kernel_size_min, kernel_size_max + 1, 2)
|
34 |
-
sigma = random.uniform(sigma_min, sigma_max)
|
35 |
-
x[i] = GaussianBlur(kernel_size=kernel_size, sigma=sigma)(x[i])
|
36 |
-
return x
|
37 |
-
|
38 |
-
# this function only describes how much a singular value in al ist stands out.
|
39 |
-
# if all values in the lsit are high or low this is 1
|
40 |
-
# the smaller the proportiopn of number of disimilar vlaues are to other more similar values the lower this number
|
41 |
-
# the larger the gap between the dissimilar numbers and the simialr number the smaller this number
|
42 |
-
# only able to interpret probabilities or values between 0 and 1
|
43 |
-
# this function outputs an estimate an inverse of the classification confidence based on the probabilities of all the classes.
|
44 |
-
# the wedge threshold splits the data on a threshold with a magnitude of a positive int to force a ledge/peak in the data
|
45 |
-
def unkown_prob_calc(probs, wedge_threshold, wedge_magnitude=1, wedge='strict'):
|
46 |
-
if wedge =='strict':
|
47 |
-
increase_var = (1/(wedge_magnitude))
|
48 |
-
decrease_var = (wedge_magnitude)
|
49 |
-
if wedge =='dynamic': # this allows pointsthat are furhter from the threshold ot be moved less and points clsoer to be moved more
|
50 |
-
increase_var = (1/(wedge_magnitude*((1-np.abs(probs-wedge_threshold)))))
|
51 |
-
decrease_var = (wedge_magnitude*((1-np.abs(probs-wedge_threshold))))
|
52 |
-
else:
|
53 |
-
print("Error: use 'strict' (default) or 'dynamic' as options for the wedge parameter!")
|
54 |
-
probs = np.where(probs>=wedge_threshold , probs**increase_var, probs)
|
55 |
-
probs = np.where(probs<=wedge_threshold , probs**decrease_var, probs)
|
56 |
-
diff_matrix = np.abs(probs[:, np.newaxis] - probs)
|
57 |
-
diff_matrix_sum = np.sum(diff_matrix)
|
58 |
-
probs_sum = np.sum(probs)
|
59 |
-
class_val = (diff_matrix_sum/probs_sum)
|
60 |
-
max_class_val = ((len(probs)-1)*2)
|
61 |
-
kown_prob = class_val/max_class_val
|
62 |
-
unknown_prob = 1-kown_prob
|
63 |
-
return(unknown_prob)
|
64 |
-
|
65 |
-
# load model
|
66 |
-
learn = from_pretrained_fastai("ChristopherMarais/beetle-model")
|
67 |
-
# learn = load_learner(
|
68 |
-
# hf_hub_download('ChristopherMarais/Andrew_Alpha_model', filename="model.pkl")
|
69 |
-
# )
|
70 |
-
|
71 |
-
# get class names
|
72 |
-
labels = np.append(np.array(learn.dls.vocab), "Unknown")
|
73 |
-
|
74 |
-
def predict(img):
|
75 |
-
# Segment image into smaller images
|
76 |
-
pre_process = pre_process_image(manual_thresh_buffer=0.15, image = img) # use image_dir if directory of image used
|
77 |
-
pre_process.segment(cluster_num=2,
|
78 |
-
image_edge_buffer=50)
|
79 |
-
# get predictions for all segments
|
80 |
-
conf_dict_lst = []
|
81 |
-
output_lst = []
|
82 |
-
img_cnt = len(pre_process.col_image_lst)
|
83 |
-
for i in range(0,img_cnt):
|
84 |
-
prob_ar = np.array(learn.predict(pre_process.col_image_lst[i])[2])
|
85 |
-
unkown_prob = unkown_prob_calc(probs=prob_ar, wedge_threshold=0.85, wedge_magnitude=5, wedge='dynamic')
|
86 |
-
prob_ar = np.append(prob_ar, unkown_prob)
|
87 |
-
prob_ar = np.around(prob_ar*100, decimals=1)
|
88 |
-
|
89 |
-
conf_dict = {labels[i]: float(prob_ar[i]) for i in range(len(prob_ar))}
|
90 |
-
conf_dict = dict(sorted(conf_dict.items(), key=lambda item: item[1], reverse=True))
|
91 |
-
conf_dict_lst.append(str(conf_dict))
|
92 |
-
result = list(zip(pre_process.col_image_lst, conf_dict_lst))
|
93 |
-
|
94 |
-
return(result)
|
95 |
-
|
96 |
-
with gr.Blocks() as demo:
|
97 |
-
with gr.Column(variant="panel"):
|
98 |
-
with gr.Row(variant="compact"):
|
99 |
-
inputs = gr.Image()
|
100 |
-
btn = gr.Button("Classify").style(full_width=False)
|
101 |
-
|
102 |
-
gallery = gr.Gallery(
|
103 |
-
label="Show images", show_label=True, elem_id="gallery"
|
104 |
-
).style(grid=[8], height="auto")
|
105 |
-
|
106 |
-
btn.click(predict, inputs, gallery)
|
107 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CosmoAI/BhagwatGeeta/app.py
DELETED
@@ -1,196 +0,0 @@
|
|
1 |
-
import google.generativeai as palm
|
2 |
-
import streamlit as st
|
3 |
-
import os
|
4 |
-
|
5 |
-
# Set your API key
|
6 |
-
palm.configure(api_key = os.environ['PALM_KEY'])
|
7 |
-
|
8 |
-
# Select the PaLM 2 model
|
9 |
-
model = 'models/text-bison-001'
|
10 |
-
|
11 |
-
# Generate text
|
12 |
-
if prompt := st.chat_input("Ask your query..."):
|
13 |
-
enprom = f"""Answer the below provided input in context to Bhagwad Geeta. Use the verses and chapters sentences as references to your answer with suggestions
|
14 |
-
coming from Bhagwad Geeta. Your answer to below input should only be in context to Bhagwad geeta only.\nInput= {prompt}"""
|
15 |
-
completion = palm.generate_text(model=model, prompt=enprom, temperature=0.5, max_output_tokens=800)
|
16 |
-
|
17 |
-
# response = palm.chat(messages=["Hello."])
|
18 |
-
# print(response.last) # 'Hello! What can I help you with?'
|
19 |
-
# response.reply("Can you tell me a joke?")
|
20 |
-
|
21 |
-
# Print the generated text
|
22 |
-
with st.chat_message("Assistant"):
|
23 |
-
st.write(completion.result)
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
# import streamlit as st
|
33 |
-
# from dotenv import load_dotenv
|
34 |
-
# from PyPDF2 import PdfReader
|
35 |
-
# from langchain.text_splitter import CharacterTextSplitter
|
36 |
-
# from langchain.embeddings import HuggingFaceEmbeddings
|
37 |
-
# from langchain.vectorstores import FAISS
|
38 |
-
# # from langchain.chat_models import ChatOpenAI
|
39 |
-
# from langchain.memory import ConversationBufferMemory
|
40 |
-
# from langchain.chains import ConversationalRetrievalChain
|
41 |
-
# from htmlTemplates import css, bot_template, user_template
|
42 |
-
# from langchain.llms import HuggingFaceHub
|
43 |
-
# import os
|
44 |
-
# # from transformers import T5Tokenizer, T5ForConditionalGeneration
|
45 |
-
# # from langchain.callbacks import get_openai_callback
|
46 |
-
|
47 |
-
# hub_token = os.environ["HUGGINGFACE_HUB_TOKEN"]
|
48 |
-
|
49 |
-
# def get_pdf_text(pdf_docs):
|
50 |
-
# text = ""
|
51 |
-
# for pdf in pdf_docs:
|
52 |
-
# pdf_reader = PdfReader(pdf)
|
53 |
-
# for page in pdf_reader.pages:
|
54 |
-
# text += page.extract_text()
|
55 |
-
# return text
|
56 |
-
|
57 |
-
|
58 |
-
# def get_text_chunks(text):
|
59 |
-
# text_splitter = CharacterTextSplitter(
|
60 |
-
# separator="\n",
|
61 |
-
# chunk_size=200,
|
62 |
-
# chunk_overlap=20,
|
63 |
-
# length_function=len
|
64 |
-
# )
|
65 |
-
# chunks = text_splitter.split_text(text)
|
66 |
-
# return chunks
|
67 |
-
|
68 |
-
|
69 |
-
# def get_vectorstore(text_chunks):
|
70 |
-
# # embeddings = OpenAIEmbeddings()
|
71 |
-
# # embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl")
|
72 |
-
# embeddings = HuggingFaceEmbeddings()
|
73 |
-
# vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
|
74 |
-
# return vectorstore
|
75 |
-
|
76 |
-
|
77 |
-
# def get_conversation_chain(vectorstore):
|
78 |
-
# # llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k")
|
79 |
-
# # tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-base")
|
80 |
-
# # model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-base")
|
81 |
-
|
82 |
-
# llm = HuggingFaceHub(repo_id="mistralai/Mistral-7B-v0.1", huggingfacehub_api_token=hub_token, model_kwargs={"temperature":0.5, "max_length":20})
|
83 |
-
|
84 |
-
# memory = ConversationBufferMemory(
|
85 |
-
# memory_key='chat_history', return_messages=True)
|
86 |
-
# conversation_chain = ConversationalRetrievalChain.from_llm(
|
87 |
-
# llm=llm,
|
88 |
-
# retriever=vectorstore.as_retriever(),
|
89 |
-
# memory=memory
|
90 |
-
# )
|
91 |
-
# return conversation_chain
|
92 |
-
|
93 |
-
|
94 |
-
# def handle_userinput(user_question):
|
95 |
-
# response = st.session_state.conversation
|
96 |
-
# reply = response.run(user_question)
|
97 |
-
# st.write(reply)
|
98 |
-
# # st.session_state.chat_history = response['chat_history']
|
99 |
-
|
100 |
-
# # for i, message in enumerate(st.session_state.chat_history):
|
101 |
-
# # if i % 2 == 0:
|
102 |
-
# # st.write(user_template.replace(
|
103 |
-
# # "{{MSG}}", message.content), unsafe_allow_html=True)
|
104 |
-
# # else:
|
105 |
-
# # st.write(bot_template.replace(
|
106 |
-
# # "{{MSG}}", message.content), unsafe_allow_html=True)
|
107 |
-
|
108 |
-
|
109 |
-
# def main():
|
110 |
-
# load_dotenv()
|
111 |
-
# st.set_page_config(page_title="Chat with multiple PDFs",
|
112 |
-
# page_icon=":books:")
|
113 |
-
# st.write(css, unsafe_allow_html=True)
|
114 |
-
|
115 |
-
# if "conversation" not in st.session_state:
|
116 |
-
# st.session_state.conversation = None
|
117 |
-
# if "chat_history" not in st.session_state:
|
118 |
-
# st.session_state.chat_history = None
|
119 |
-
|
120 |
-
# st.header("Chat with multiple PDFs :books:")
|
121 |
-
# user_question = st.text_input("Ask a question about your documents:")
|
122 |
-
# if user_question:
|
123 |
-
# handle_userinput(user_question)
|
124 |
-
|
125 |
-
# with st.sidebar:
|
126 |
-
# st.subheader("Your documents")
|
127 |
-
# pdf_docs = st.file_uploader(
|
128 |
-
# "Upload your PDFs here and click on 'Process'", accept_multiple_files=True)
|
129 |
-
# if st.button("Process"):
|
130 |
-
# if(len(pdf_docs) == 0):
|
131 |
-
# st.error("Please upload at least one PDF")
|
132 |
-
# else:
|
133 |
-
# with st.spinner("Processing"):
|
134 |
-
# # get pdf text
|
135 |
-
# raw_text = get_pdf_text(pdf_docs)
|
136 |
-
|
137 |
-
# # get the text chunks
|
138 |
-
# text_chunks = get_text_chunks(raw_text)
|
139 |
-
|
140 |
-
# # create vector store
|
141 |
-
# vectorstore = get_vectorstore(text_chunks)
|
142 |
-
|
143 |
-
# # create conversation chain
|
144 |
-
# st.session_state.conversation = get_conversation_chain(
|
145 |
-
# vectorstore)
|
146 |
-
|
147 |
-
# if __name__ == '__main__':
|
148 |
-
# main()
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
# # import os
|
156 |
-
# # import getpass
|
157 |
-
# # import streamlit as st
|
158 |
-
# # from langchain.document_loaders import PyPDFLoader
|
159 |
-
# # from langchain.text_splitter import RecursiveCharacterTextSplitter
|
160 |
-
# # from langchain.embeddings import HuggingFaceEmbeddings
|
161 |
-
# # from langchain.vectorstores import Chroma
|
162 |
-
# # from langchain import HuggingFaceHub
|
163 |
-
# # from langchain.chains import RetrievalQA
|
164 |
-
# # # __import__('pysqlite3')
|
165 |
-
# # # import sys
|
166 |
-
# # # sys.modules['sqlite3'] = sys.modules.pop('pysqlite3')
|
167 |
-
|
168 |
-
|
169 |
-
# # # load huggingface api key
|
170 |
-
# # hubtok = os.environ["HUGGINGFACE_HUB_TOKEN"]
|
171 |
-
|
172 |
-
# # # use streamlit file uploader to ask user for file
|
173 |
-
# # # file = st.file_uploader("Upload PDF")
|
174 |
-
|
175 |
-
|
176 |
-
# # path = "Geeta.pdf"
|
177 |
-
# # loader = PyPDFLoader(path)
|
178 |
-
# # pages = loader.load()
|
179 |
-
|
180 |
-
# # # st.write(pages)
|
181 |
-
|
182 |
-
# # splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=20)
|
183 |
-
# # docs = splitter.split_documents(pages)
|
184 |
-
|
185 |
-
# # embeddings = HuggingFaceEmbeddings()
|
186 |
-
# # doc_search = Chroma.from_documents(docs, embeddings)
|
187 |
-
|
188 |
-
# # repo_id = "tiiuae/falcon-7b"
|
189 |
-
# # llm = HuggingFaceHub(repo_id=repo_id, huggingfacehub_api_token=hubtok, model_kwargs={'temperature': 0.2,'max_length': 1000})
|
190 |
-
|
191 |
-
# # from langchain.schema import retriever
|
192 |
-
# # retireval_chain = RetrievalQA.from_chain_type(llm, chain_type="stuff", retriever=doc_search.as_retriever())
|
193 |
-
|
194 |
-
# # if query := st.chat_input("Enter a question: "):
|
195 |
-
# # with st.chat_message("assistant"):
|
196 |
-
# # st.write(retireval_chain.run(query))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/util.py
DELETED
@@ -1,197 +0,0 @@
|
|
1 |
-
import importlib
|
2 |
-
|
3 |
-
import torch
|
4 |
-
from torch import optim
|
5 |
-
import numpy as np
|
6 |
-
|
7 |
-
from inspect import isfunction
|
8 |
-
from PIL import Image, ImageDraw, ImageFont
|
9 |
-
|
10 |
-
|
11 |
-
def log_txt_as_img(wh, xc, size=10):
|
12 |
-
# wh a tuple of (width, height)
|
13 |
-
# xc a list of captions to plot
|
14 |
-
b = len(xc)
|
15 |
-
txts = list()
|
16 |
-
for bi in range(b):
|
17 |
-
txt = Image.new("RGB", wh, color="white")
|
18 |
-
draw = ImageDraw.Draw(txt)
|
19 |
-
font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)
|
20 |
-
nc = int(40 * (wh[0] / 256))
|
21 |
-
lines = "\n".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))
|
22 |
-
|
23 |
-
try:
|
24 |
-
draw.text((0, 0), lines, fill="black", font=font)
|
25 |
-
except UnicodeEncodeError:
|
26 |
-
print("Cant encode string for logging. Skipping.")
|
27 |
-
|
28 |
-
txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0
|
29 |
-
txts.append(txt)
|
30 |
-
txts = np.stack(txts)
|
31 |
-
txts = torch.tensor(txts)
|
32 |
-
return txts
|
33 |
-
|
34 |
-
|
35 |
-
def ismap(x):
|
36 |
-
if not isinstance(x, torch.Tensor):
|
37 |
-
return False
|
38 |
-
return (len(x.shape) == 4) and (x.shape[1] > 3)
|
39 |
-
|
40 |
-
|
41 |
-
def isimage(x):
|
42 |
-
if not isinstance(x,torch.Tensor):
|
43 |
-
return False
|
44 |
-
return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)
|
45 |
-
|
46 |
-
|
47 |
-
def exists(x):
|
48 |
-
return x is not None
|
49 |
-
|
50 |
-
|
51 |
-
def default(val, d):
|
52 |
-
if exists(val):
|
53 |
-
return val
|
54 |
-
return d() if isfunction(d) else d
|
55 |
-
|
56 |
-
|
57 |
-
def mean_flat(tensor):
|
58 |
-
"""
|
59 |
-
https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86
|
60 |
-
Take the mean over all non-batch dimensions.
|
61 |
-
"""
|
62 |
-
return tensor.mean(dim=list(range(1, len(tensor.shape))))
|
63 |
-
|
64 |
-
|
65 |
-
def count_params(model, verbose=False):
|
66 |
-
total_params = sum(p.numel() for p in model.parameters())
|
67 |
-
if verbose:
|
68 |
-
print(f"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.")
|
69 |
-
return total_params
|
70 |
-
|
71 |
-
|
72 |
-
def instantiate_from_config(config):
|
73 |
-
if not "target" in config:
|
74 |
-
if config == '__is_first_stage__':
|
75 |
-
return None
|
76 |
-
elif config == "__is_unconditional__":
|
77 |
-
return None
|
78 |
-
raise KeyError("Expected key `target` to instantiate.")
|
79 |
-
return get_obj_from_str(config["target"])(**config.get("params", dict()))
|
80 |
-
|
81 |
-
|
82 |
-
def get_obj_from_str(string, reload=False):
|
83 |
-
module, cls = string.rsplit(".", 1)
|
84 |
-
if reload:
|
85 |
-
module_imp = importlib.import_module(module)
|
86 |
-
importlib.reload(module_imp)
|
87 |
-
return getattr(importlib.import_module(module, package=None), cls)
|
88 |
-
|
89 |
-
|
90 |
-
class AdamWwithEMAandWings(optim.Optimizer):
|
91 |
-
# credit to https://gist.github.com/crowsonkb/65f7265353f403714fce3b2595e0b298
|
92 |
-
def __init__(self, params, lr=1.e-3, betas=(0.9, 0.999), eps=1.e-8, # TODO: check hyperparameters before using
|
93 |
-
weight_decay=1.e-2, amsgrad=False, ema_decay=0.9999, # ema decay to match previous code
|
94 |
-
ema_power=1., param_names=()):
|
95 |
-
"""AdamW that saves EMA versions of the parameters."""
|
96 |
-
if not 0.0 <= lr:
|
97 |
-
raise ValueError("Invalid learning rate: {}".format(lr))
|
98 |
-
if not 0.0 <= eps:
|
99 |
-
raise ValueError("Invalid epsilon value: {}".format(eps))
|
100 |
-
if not 0.0 <= betas[0] < 1.0:
|
101 |
-
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
|
102 |
-
if not 0.0 <= betas[1] < 1.0:
|
103 |
-
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
|
104 |
-
if not 0.0 <= weight_decay:
|
105 |
-
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
|
106 |
-
if not 0.0 <= ema_decay <= 1.0:
|
107 |
-
raise ValueError("Invalid ema_decay value: {}".format(ema_decay))
|
108 |
-
defaults = dict(lr=lr, betas=betas, eps=eps,
|
109 |
-
weight_decay=weight_decay, amsgrad=amsgrad, ema_decay=ema_decay,
|
110 |
-
ema_power=ema_power, param_names=param_names)
|
111 |
-
super().__init__(params, defaults)
|
112 |
-
|
113 |
-
def __setstate__(self, state):
|
114 |
-
super().__setstate__(state)
|
115 |
-
for group in self.param_groups:
|
116 |
-
group.setdefault('amsgrad', False)
|
117 |
-
|
118 |
-
@torch.no_grad()
|
119 |
-
def step(self, closure=None):
|
120 |
-
"""Performs a single optimization step.
|
121 |
-
Args:
|
122 |
-
closure (callable, optional): A closure that reevaluates the model
|
123 |
-
and returns the loss.
|
124 |
-
"""
|
125 |
-
loss = None
|
126 |
-
if closure is not None:
|
127 |
-
with torch.enable_grad():
|
128 |
-
loss = closure()
|
129 |
-
|
130 |
-
for group in self.param_groups:
|
131 |
-
params_with_grad = []
|
132 |
-
grads = []
|
133 |
-
exp_avgs = []
|
134 |
-
exp_avg_sqs = []
|
135 |
-
ema_params_with_grad = []
|
136 |
-
state_sums = []
|
137 |
-
max_exp_avg_sqs = []
|
138 |
-
state_steps = []
|
139 |
-
amsgrad = group['amsgrad']
|
140 |
-
beta1, beta2 = group['betas']
|
141 |
-
ema_decay = group['ema_decay']
|
142 |
-
ema_power = group['ema_power']
|
143 |
-
|
144 |
-
for p in group['params']:
|
145 |
-
if p.grad is None:
|
146 |
-
continue
|
147 |
-
params_with_grad.append(p)
|
148 |
-
if p.grad.is_sparse:
|
149 |
-
raise RuntimeError('AdamW does not support sparse gradients')
|
150 |
-
grads.append(p.grad)
|
151 |
-
|
152 |
-
state = self.state[p]
|
153 |
-
|
154 |
-
# State initialization
|
155 |
-
if len(state) == 0:
|
156 |
-
state['step'] = 0
|
157 |
-
# Exponential moving average of gradient values
|
158 |
-
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
|
159 |
-
# Exponential moving average of squared gradient values
|
160 |
-
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
|
161 |
-
if amsgrad:
|
162 |
-
# Maintains max of all exp. moving avg. of sq. grad. values
|
163 |
-
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
|
164 |
-
# Exponential moving average of parameter values
|
165 |
-
state['param_exp_avg'] = p.detach().float().clone()
|
166 |
-
|
167 |
-
exp_avgs.append(state['exp_avg'])
|
168 |
-
exp_avg_sqs.append(state['exp_avg_sq'])
|
169 |
-
ema_params_with_grad.append(state['param_exp_avg'])
|
170 |
-
|
171 |
-
if amsgrad:
|
172 |
-
max_exp_avg_sqs.append(state['max_exp_avg_sq'])
|
173 |
-
|
174 |
-
# update the steps for each param group update
|
175 |
-
state['step'] += 1
|
176 |
-
# record the step after step update
|
177 |
-
state_steps.append(state['step'])
|
178 |
-
|
179 |
-
optim._functional.adamw(params_with_grad,
|
180 |
-
grads,
|
181 |
-
exp_avgs,
|
182 |
-
exp_avg_sqs,
|
183 |
-
max_exp_avg_sqs,
|
184 |
-
state_steps,
|
185 |
-
amsgrad=amsgrad,
|
186 |
-
beta1=beta1,
|
187 |
-
beta2=beta2,
|
188 |
-
lr=group['lr'],
|
189 |
-
weight_decay=group['weight_decay'],
|
190 |
-
eps=group['eps'],
|
191 |
-
maximize=False)
|
192 |
-
|
193 |
-
cur_ema_decay = min(ema_decay, 1 - state['step'] ** -ema_power)
|
194 |
-
for param, ema_param in zip(params_with_grad, ema_params_with_grad):
|
195 |
-
ema_param.mul_(cur_ema_decay).add_(param.float(), alpha=1 - cur_ema_decay)
|
196 |
-
|
197 |
-
return loss
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DAMO-NLP-SG/Video-LLaMA/app.py
DELETED
@@ -1,259 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Adapted from: https://github.com/Vision-CAIR/MiniGPT-4/blob/main/demo.py
|
3 |
-
"""
|
4 |
-
import argparse
|
5 |
-
import os
|
6 |
-
import random
|
7 |
-
|
8 |
-
import numpy as np
|
9 |
-
import torch
|
10 |
-
import torch.backends.cudnn as cudnn
|
11 |
-
import gradio as gr
|
12 |
-
|
13 |
-
from video_llama.common.config import Config
|
14 |
-
from video_llama.common.dist_utils import get_rank
|
15 |
-
from video_llama.common.registry import registry
|
16 |
-
from video_llama.conversation.conversation_video import Chat, Conversation, default_conversation,SeparatorStyle
|
17 |
-
import decord
|
18 |
-
decord.bridge.set_bridge('torch')
|
19 |
-
|
20 |
-
|
21 |
-
#%%
|
22 |
-
# imports modules for registration
|
23 |
-
from video_llama.datasets.builders import *
|
24 |
-
from video_llama.models import *
|
25 |
-
from video_llama.processors import *
|
26 |
-
from video_llama.runners import *
|
27 |
-
from video_llama.tasks import *
|
28 |
-
|
29 |
-
#%%
|
30 |
-
def parse_args():
|
31 |
-
parser = argparse.ArgumentParser(description="Demo")
|
32 |
-
parser.add_argument("--cfg-path", default='eval_configs/video_llama_eval.yaml', help="path to configuration file.")
|
33 |
-
parser.add_argument("--gpu-id", type=int, default=0, help="specify the gpu to load the model.")
|
34 |
-
parser.add_argument(
|
35 |
-
"--options",
|
36 |
-
nargs="+",
|
37 |
-
help="override some settings in the used config, the key-value pair "
|
38 |
-
"in xxx=yyy format will be merged into config file (deprecate), "
|
39 |
-
"change to --cfg-options instead.",
|
40 |
-
)
|
41 |
-
args = parser.parse_args()
|
42 |
-
return args
|
43 |
-
|
44 |
-
|
45 |
-
def setup_seeds(config):
|
46 |
-
seed = config.run_cfg.seed + get_rank()
|
47 |
-
|
48 |
-
random.seed(seed)
|
49 |
-
np.random.seed(seed)
|
50 |
-
torch.manual_seed(seed)
|
51 |
-
|
52 |
-
cudnn.benchmark = False
|
53 |
-
cudnn.deterministic = True
|
54 |
-
|
55 |
-
|
56 |
-
# ========================================
|
57 |
-
# Model Initialization
|
58 |
-
# ========================================
|
59 |
-
|
60 |
-
print('Initializing Chat')
|
61 |
-
args = parse_args()
|
62 |
-
cfg = Config(args)
|
63 |
-
|
64 |
-
model_config = cfg.model_cfg
|
65 |
-
model_config.device_8bit = args.gpu_id
|
66 |
-
model_cls = registry.get_model_class(model_config.arch)
|
67 |
-
model = model_cls.from_config(model_config).to('cuda:{}'.format(args.gpu_id))
|
68 |
-
model.eval()
|
69 |
-
vis_processor_cfg = cfg.datasets_cfg.webvid.vis_processor.train
|
70 |
-
vis_processor = registry.get_processor_class(vis_processor_cfg.name).from_config(vis_processor_cfg)
|
71 |
-
chat = Chat(model, vis_processor, device='cuda:{}'.format(args.gpu_id))
|
72 |
-
print('Initialization Finished')
|
73 |
-
|
74 |
-
# ========================================
|
75 |
-
# Gradio Setting
|
76 |
-
# ========================================
|
77 |
-
|
78 |
-
def gradio_reset(chat_state, img_list):
|
79 |
-
if chat_state is not None:
|
80 |
-
chat_state.messages = []
|
81 |
-
if img_list is not None:
|
82 |
-
img_list = []
|
83 |
-
return None, gr.update(value=None, interactive=True), gr.update(value=None, interactive=True), gr.update(placeholder='Please upload your video first', interactive=False),gr.update(value="Upload & Start Chat", interactive=True), chat_state, img_list
|
84 |
-
|
85 |
-
def upload_imgorvideo(gr_video, gr_img, text_input, chat_state,chatbot):
|
86 |
-
if gr_img is None and gr_video is None:
|
87 |
-
return None, None, None, gr.update(interactive=True), chat_state, None
|
88 |
-
elif gr_img is not None and gr_video is None:
|
89 |
-
print(gr_img)
|
90 |
-
chatbot = chatbot + [((gr_img,), None)]
|
91 |
-
chat_state = Conversation(
|
92 |
-
system= "You are able to understand the visual content that the user provides."
|
93 |
-
"Follow the instructions carefully and explain your answers in detail.",
|
94 |
-
roles=("Human", "Assistant"),
|
95 |
-
messages=[],
|
96 |
-
offset=0,
|
97 |
-
sep_style=SeparatorStyle.SINGLE,
|
98 |
-
sep="###",
|
99 |
-
)
|
100 |
-
img_list = []
|
101 |
-
llm_message = chat.upload_img(gr_img, chat_state, img_list)
|
102 |
-
return gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=True, placeholder='Type and press Enter'), gr.update(value="Start Chatting", interactive=False), chat_state, img_list,chatbot
|
103 |
-
elif gr_video is not None and gr_img is None:
|
104 |
-
print(gr_video)
|
105 |
-
chatbot = chatbot + [((gr_video,), None)]
|
106 |
-
chat_state = default_conversation.copy()
|
107 |
-
chat_state = Conversation(
|
108 |
-
system= "You are able to understand the visual content that the user provides."
|
109 |
-
"Follow the instructions carefully and explain your answers in detail.",
|
110 |
-
roles=("Human", "Assistant"),
|
111 |
-
messages=[],
|
112 |
-
offset=0,
|
113 |
-
sep_style=SeparatorStyle.SINGLE,
|
114 |
-
sep="###",
|
115 |
-
)
|
116 |
-
img_list = []
|
117 |
-
llm_message = chat.upload_video(gr_video, chat_state, img_list)
|
118 |
-
return gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=True, placeholder='Type and press Enter'), gr.update(value="Start Chatting", interactive=False), chat_state, img_list,chatbot
|
119 |
-
else:
|
120 |
-
# img_list = []
|
121 |
-
return gr.update(interactive=False), gr.update(interactive=False, placeholder='Currently, only one input is supported'), gr.update(value="Currently, only one input is supported", interactive=False), chat_state, None,chatbot
|
122 |
-
|
123 |
-
def gradio_ask(user_message, chatbot, chat_state):
|
124 |
-
if len(user_message) == 0:
|
125 |
-
return gr.update(interactive=True, placeholder='Input should not be empty!'), chatbot, chat_state
|
126 |
-
chat.ask(user_message, chat_state)
|
127 |
-
chatbot = chatbot + [[user_message, None]]
|
128 |
-
return '', chatbot, chat_state
|
129 |
-
|
130 |
-
|
131 |
-
def gradio_answer(chatbot, chat_state, img_list, num_beams, temperature):
|
132 |
-
llm_message = chat.answer(conv=chat_state,
|
133 |
-
img_list=img_list,
|
134 |
-
num_beams=1,
|
135 |
-
temperature=temperature,
|
136 |
-
max_new_tokens=240,
|
137 |
-
max_length=511)[0]
|
138 |
-
chatbot[-1][1] = llm_message
|
139 |
-
print(chat_state.get_prompt())
|
140 |
-
print(chat_state)
|
141 |
-
return chatbot, chat_state, img_list
|
142 |
-
|
143 |
-
title = """
|
144 |
-
<h1 align="center"><a href="https://github.com/DAMO-NLP-SG/Video-LLaMA"><img src="https://s1.ax1x.com/2023/05/22/p9oQ0FP.jpg", alt="Video-LLaMA" border="0" style="margin: 0 auto; height: 200px;" /></a> </h1>
|
145 |
-
|
146 |
-
<h1 align="center">Video-LLaMA: An Instruction-tuned Audio-Visual Language Model for Video Understanding</h1>
|
147 |
-
|
148 |
-
<h5 align="center"> Introduction: Video-LLaMA is a multi-model large language model that achieves video-grounded conversations between humans and computers \
|
149 |
-
by connecting language decoder with off-the-shelf unimodal pre-trained models. </h5>
|
150 |
-
|
151 |
-
<div style='display:flex; gap: 0.25rem; '>
|
152 |
-
<a href='https://github.com/DAMO-NLP-SG/Video-LLaMA'><img src='https://img.shields.io/badge/Github-Code-success'></a>
|
153 |
-
<a href='https://huggingface.co/spaces/DAMO-NLP-SG/Video-LLaMA'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue'></a>
|
154 |
-
<a href='https://huggingface.co/DAMO-NLP-SG/Video-LLaMA-Series'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Model-blue'></a>
|
155 |
-
<a href='https://modelscope.cn/studios/damo/video-llama/summary'><img src='https://img.shields.io/badge/ModelScope-Demo-blueviolet'></a>
|
156 |
-
<a href='https://arxiv.org/abs/2306.02858'><img src='https://img.shields.io/badge/Paper-PDF-red'></a>
|
157 |
-
</div>
|
158 |
-
|
159 |
-
|
160 |
-
Thank you for using the Video-LLaMA Demo Page! If you have any questions or feedback, feel free to contact us.
|
161 |
-
|
162 |
-
If you find Video-LLaMA interesting, please give us a star on GitHub.
|
163 |
-
|
164 |
-
Current online demo uses the 7B version of Video-LLaMA due to resource limitations. We have released \
|
165 |
-
the 13B version on our GitHub repository.
|
166 |
-
|
167 |
-
|
168 |
-
"""
|
169 |
-
|
170 |
-
Note_markdown = ("""
|
171 |
-
### Note
|
172 |
-
Video-LLaMA is a prototype model and may have limitations in understanding complex scenes, long videos, or specific domains.
|
173 |
-
The output results may be influenced by input quality, limitations of the dataset, and the model's susceptibility to illusions. Please interpret the results with caution.
|
174 |
-
|
175 |
-
**Copyright 2023 Alibaba DAMO Academy.**
|
176 |
-
""")
|
177 |
-
|
178 |
-
cite_markdown = ("""
|
179 |
-
## Citation
|
180 |
-
If you find our project useful, hope you can star our repo and cite our paper as follows:
|
181 |
-
```
|
182 |
-
@article{damonlpsg2023videollama,
|
183 |
-
author = {Zhang, Hang and Li, Xin and Bing, Lidong},
|
184 |
-
title = {Video-LLaMA: An Instruction-tuned Audio-Visual Language Model for Video Understanding},
|
185 |
-
year = 2023,
|
186 |
-
journal = {arXiv preprint arXiv:2306.02858}
|
187 |
-
url = {https://arxiv.org/abs/2306.02858}
|
188 |
-
}
|
189 |
-
""")
|
190 |
-
|
191 |
-
case_note_upload = ("""
|
192 |
-
### We provide some examples at the bottom of the page. Simply click on them to try them out directly.
|
193 |
-
""")
|
194 |
-
|
195 |
-
#TODO show examples below
|
196 |
-
|
197 |
-
with gr.Blocks() as demo:
|
198 |
-
gr.Markdown(title)
|
199 |
-
|
200 |
-
with gr.Row():
|
201 |
-
with gr.Column(scale=0.5):
|
202 |
-
video = gr.Video()
|
203 |
-
image = gr.Image(type="filepath")
|
204 |
-
gr.Markdown(case_note_upload)
|
205 |
-
|
206 |
-
upload_button = gr.Button(value="Upload & Start Chat", interactive=True, variant="primary")
|
207 |
-
clear = gr.Button("Restart")
|
208 |
-
|
209 |
-
num_beams = gr.Slider(
|
210 |
-
minimum=1,
|
211 |
-
maximum=10,
|
212 |
-
value=1,
|
213 |
-
step=1,
|
214 |
-
interactive=True,
|
215 |
-
label="beam search numbers)",
|
216 |
-
)
|
217 |
-
|
218 |
-
temperature = gr.Slider(
|
219 |
-
minimum=0.1,
|
220 |
-
maximum=2.0,
|
221 |
-
value=1.0,
|
222 |
-
step=0.1,
|
223 |
-
interactive=True,
|
224 |
-
label="Temperature",
|
225 |
-
)
|
226 |
-
|
227 |
-
audio = gr.Checkbox(interactive=True, value=False, label="Audio")
|
228 |
-
gr.Markdown(Note_markdown)
|
229 |
-
with gr.Column():
|
230 |
-
chat_state = gr.State()
|
231 |
-
img_list = gr.State()
|
232 |
-
chatbot = gr.Chatbot(label='Video-LLaMA')
|
233 |
-
text_input = gr.Textbox(label='User', placeholder='Upload your image/video first, or directly click the examples at the bottom of the page.', interactive=False)
|
234 |
-
|
235 |
-
|
236 |
-
with gr.Column():
|
237 |
-
gr.Examples(examples=[
|
238 |
-
[f"examples/dog.jpg", "Which breed is this dog? "],
|
239 |
-
[f"examples/jonsnow.jpg", "Who's the man on the right? "],
|
240 |
-
[f"examples/statue_of_liberty.jpg", "Can you tell me about this building? "],
|
241 |
-
], inputs=[image, text_input])
|
242 |
-
|
243 |
-
gr.Examples(examples=[
|
244 |
-
[f"examples/skateboarding_dog.mp4", "What is the dog doing? "],
|
245 |
-
[f"examples/birthday.mp4", "What is the boy doing? "],
|
246 |
-
[f"examples/Iron_Man.mp4", "Is the guy in the video Iron Man? "],
|
247 |
-
], inputs=[video, text_input])
|
248 |
-
|
249 |
-
gr.Markdown(cite_markdown)
|
250 |
-
upload_button.click(upload_imgorvideo, [video, image, text_input, chat_state,chatbot], [video, image, text_input, upload_button, chat_state, img_list,chatbot])
|
251 |
-
|
252 |
-
text_input.submit(gradio_ask, [text_input, chatbot, chat_state], [text_input, chatbot, chat_state]).then(
|
253 |
-
gradio_answer, [chatbot, chat_state, img_list, num_beams, temperature], [chatbot, chat_state, img_list]
|
254 |
-
)
|
255 |
-
clear.click(gradio_reset, [chat_state, img_list], [chatbot, video, image, text_input, upload_button, chat_state, img_list], queue=False)
|
256 |
-
|
257 |
-
demo.launch(share=False, enable_queue=True)
|
258 |
-
|
259 |
-
# %%
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/_urlparse.py
DELETED
@@ -1,462 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
An implementation of `urlparse` that provides URL validation and normalization
|
3 |
-
as described by RFC3986.
|
4 |
-
|
5 |
-
We rely on this implementation rather than the one in Python's stdlib, because:
|
6 |
-
|
7 |
-
* It provides more complete URL validation.
|
8 |
-
* It properly differentiates between an empty querystring and an absent querystring,
|
9 |
-
to distinguish URLs with a trailing '?'.
|
10 |
-
* It handles scheme, hostname, port, and path normalization.
|
11 |
-
* It supports IDNA hostnames, normalizing them to their encoded form.
|
12 |
-
* The API supports passing individual components, as well as the complete URL string.
|
13 |
-
|
14 |
-
Previously we relied on the excellent `rfc3986` package to handle URL parsing and
|
15 |
-
validation, but this module provides a simpler alternative, with less indirection
|
16 |
-
required.
|
17 |
-
"""
|
18 |
-
import ipaddress
|
19 |
-
import re
|
20 |
-
import typing
|
21 |
-
|
22 |
-
import idna
|
23 |
-
|
24 |
-
from ._exceptions import InvalidURL
|
25 |
-
|
26 |
-
MAX_URL_LENGTH = 65536
|
27 |
-
|
28 |
-
# https://datatracker.ietf.org/doc/html/rfc3986.html#section-2.3
|
29 |
-
UNRESERVED_CHARACTERS = (
|
30 |
-
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-._~"
|
31 |
-
)
|
32 |
-
SUB_DELIMS = "!$&'()*+,;="
|
33 |
-
|
34 |
-
PERCENT_ENCODED_REGEX = re.compile("%[A-Fa-f0-9]{2}")
|
35 |
-
|
36 |
-
|
37 |
-
# {scheme}: (optional)
|
38 |
-
# //{authority} (optional)
|
39 |
-
# {path}
|
40 |
-
# ?{query} (optional)
|
41 |
-
# #{fragment} (optional)
|
42 |
-
URL_REGEX = re.compile(
|
43 |
-
(
|
44 |
-
r"(?:(?P<scheme>{scheme}):)?"
|
45 |
-
r"(?://(?P<authority>{authority}))?"
|
46 |
-
r"(?P<path>{path})"
|
47 |
-
r"(?:\?(?P<query>{query}))?"
|
48 |
-
r"(?:#(?P<fragment>{fragment}))?"
|
49 |
-
).format(
|
50 |
-
scheme="([a-zA-Z][a-zA-Z0-9+.-]*)?",
|
51 |
-
authority="[^/?#]*",
|
52 |
-
path="[^?#]*",
|
53 |
-
query="[^#]*",
|
54 |
-
fragment=".*",
|
55 |
-
)
|
56 |
-
)
|
57 |
-
|
58 |
-
# {userinfo}@ (optional)
|
59 |
-
# {host}
|
60 |
-
# :{port} (optional)
|
61 |
-
AUTHORITY_REGEX = re.compile(
|
62 |
-
(
|
63 |
-
r"(?:(?P<userinfo>{userinfo})@)?" r"(?P<host>{host})" r":?(?P<port>{port})?"
|
64 |
-
).format(
|
65 |
-
userinfo="[^@]*", # Any character sequence not including '@'.
|
66 |
-
host="(\\[.*\\]|[^:]*)", # Either any character sequence not including ':',
|
67 |
-
# or an IPv6 address enclosed within square brackets.
|
68 |
-
port=".*", # Any character sequence.
|
69 |
-
)
|
70 |
-
)
|
71 |
-
|
72 |
-
|
73 |
-
# If we call urlparse with an individual component, then we need to regex
|
74 |
-
# validate that component individually.
|
75 |
-
# Note that we're duplicating the same strings as above. Shock! Horror!!
|
76 |
-
COMPONENT_REGEX = {
|
77 |
-
"scheme": re.compile("([a-zA-Z][a-zA-Z0-9+.-]*)?"),
|
78 |
-
"authority": re.compile("[^/?#]*"),
|
79 |
-
"path": re.compile("[^?#]*"),
|
80 |
-
"query": re.compile("[^#]*"),
|
81 |
-
"fragment": re.compile(".*"),
|
82 |
-
"userinfo": re.compile("[^@]*"),
|
83 |
-
"host": re.compile("(\\[.*\\]|[^:]*)"),
|
84 |
-
"port": re.compile(".*"),
|
85 |
-
}
|
86 |
-
|
87 |
-
|
88 |
-
# We use these simple regexs as a first pass before handing off to
|
89 |
-
# the stdlib 'ipaddress' module for IP address validation.
|
90 |
-
IPv4_STYLE_HOSTNAME = re.compile(r"^[0-9]+.[0-9]+.[0-9]+.[0-9]+$")
|
91 |
-
IPv6_STYLE_HOSTNAME = re.compile(r"^\[.*\]$")
|
92 |
-
|
93 |
-
|
94 |
-
class ParseResult(typing.NamedTuple):
|
95 |
-
scheme: str
|
96 |
-
userinfo: str
|
97 |
-
host: str
|
98 |
-
port: typing.Optional[int]
|
99 |
-
path: str
|
100 |
-
query: typing.Optional[str]
|
101 |
-
fragment: typing.Optional[str]
|
102 |
-
|
103 |
-
@property
|
104 |
-
def authority(self) -> str:
|
105 |
-
return "".join(
|
106 |
-
[
|
107 |
-
f"{self.userinfo}@" if self.userinfo else "",
|
108 |
-
f"[{self.host}]" if ":" in self.host else self.host,
|
109 |
-
f":{self.port}" if self.port is not None else "",
|
110 |
-
]
|
111 |
-
)
|
112 |
-
|
113 |
-
@property
|
114 |
-
def netloc(self) -> str:
|
115 |
-
return "".join(
|
116 |
-
[
|
117 |
-
f"[{self.host}]" if ":" in self.host else self.host,
|
118 |
-
f":{self.port}" if self.port is not None else "",
|
119 |
-
]
|
120 |
-
)
|
121 |
-
|
122 |
-
def copy_with(self, **kwargs: typing.Optional[str]) -> "ParseResult":
|
123 |
-
if not kwargs:
|
124 |
-
return self
|
125 |
-
|
126 |
-
defaults = {
|
127 |
-
"scheme": self.scheme,
|
128 |
-
"authority": self.authority,
|
129 |
-
"path": self.path,
|
130 |
-
"query": self.query,
|
131 |
-
"fragment": self.fragment,
|
132 |
-
}
|
133 |
-
defaults.update(kwargs)
|
134 |
-
return urlparse("", **defaults)
|
135 |
-
|
136 |
-
def __str__(self) -> str:
|
137 |
-
authority = self.authority
|
138 |
-
return "".join(
|
139 |
-
[
|
140 |
-
f"{self.scheme}:" if self.scheme else "",
|
141 |
-
f"//{authority}" if authority else "",
|
142 |
-
self.path,
|
143 |
-
f"?{self.query}" if self.query is not None else "",
|
144 |
-
f"#{self.fragment}" if self.fragment is not None else "",
|
145 |
-
]
|
146 |
-
)
|
147 |
-
|
148 |
-
|
149 |
-
def urlparse(url: str = "", **kwargs: typing.Optional[str]) -> ParseResult:
|
150 |
-
# Initial basic checks on allowable URLs.
|
151 |
-
# ---------------------------------------
|
152 |
-
|
153 |
-
# Hard limit the maximum allowable URL length.
|
154 |
-
if len(url) > MAX_URL_LENGTH:
|
155 |
-
raise InvalidURL("URL too long")
|
156 |
-
|
157 |
-
# If a URL includes any ASCII control characters including \t, \r, \n,
|
158 |
-
# then treat it as invalid.
|
159 |
-
if any(char.isascii() and not char.isprintable() for char in url):
|
160 |
-
raise InvalidURL("Invalid non-printable ASCII character in URL")
|
161 |
-
|
162 |
-
# Some keyword arguments require special handling.
|
163 |
-
# ------------------------------------------------
|
164 |
-
|
165 |
-
# Coerce "port" to a string, if it is provided as an integer.
|
166 |
-
if "port" in kwargs:
|
167 |
-
port = kwargs["port"]
|
168 |
-
kwargs["port"] = str(port) if isinstance(port, int) else port
|
169 |
-
|
170 |
-
# Replace "netloc" with "host and "port".
|
171 |
-
if "netloc" in kwargs:
|
172 |
-
netloc = kwargs.pop("netloc") or ""
|
173 |
-
kwargs["host"], _, kwargs["port"] = netloc.partition(":")
|
174 |
-
|
175 |
-
# Replace "username" and/or "password" with "userinfo".
|
176 |
-
if "username" in kwargs or "password" in kwargs:
|
177 |
-
username = quote(kwargs.pop("username", "") or "")
|
178 |
-
password = quote(kwargs.pop("password", "") or "")
|
179 |
-
kwargs["userinfo"] = f"{username}:{password}" if password else username
|
180 |
-
|
181 |
-
# Replace "raw_path" with "path" and "query".
|
182 |
-
if "raw_path" in kwargs:
|
183 |
-
raw_path = kwargs.pop("raw_path") or ""
|
184 |
-
kwargs["path"], seperator, kwargs["query"] = raw_path.partition("?")
|
185 |
-
if not seperator:
|
186 |
-
kwargs["query"] = None
|
187 |
-
|
188 |
-
# Ensure that IPv6 "host" addresses are always escaped with "[...]".
|
189 |
-
if "host" in kwargs:
|
190 |
-
host = kwargs.get("host") or ""
|
191 |
-
if ":" in host and not (host.startswith("[") and host.endswith("]")):
|
192 |
-
kwargs["host"] = f"[{host}]"
|
193 |
-
|
194 |
-
# If any keyword arguments are provided, ensure they are valid.
|
195 |
-
# -------------------------------------------------------------
|
196 |
-
|
197 |
-
for key, value in kwargs.items():
|
198 |
-
if value is not None:
|
199 |
-
if len(value) > MAX_URL_LENGTH:
|
200 |
-
raise InvalidURL(f"URL component '{key}' too long")
|
201 |
-
|
202 |
-
# If a component includes any ASCII control characters including \t, \r, \n,
|
203 |
-
# then treat it as invalid.
|
204 |
-
if any(char.isascii() and not char.isprintable() for char in value):
|
205 |
-
raise InvalidURL(
|
206 |
-
f"Invalid non-printable ASCII character in URL component '{key}'"
|
207 |
-
)
|
208 |
-
|
209 |
-
# Ensure that keyword arguments match as a valid regex.
|
210 |
-
if not COMPONENT_REGEX[key].fullmatch(value):
|
211 |
-
raise InvalidURL(f"Invalid URL component '{key}'")
|
212 |
-
|
213 |
-
# The URL_REGEX will always match, but may have empty components.
|
214 |
-
url_match = URL_REGEX.match(url)
|
215 |
-
assert url_match is not None
|
216 |
-
url_dict = url_match.groupdict()
|
217 |
-
|
218 |
-
# * 'scheme', 'authority', and 'path' may be empty strings.
|
219 |
-
# * 'query' may be 'None', indicating no trailing "?" portion.
|
220 |
-
# Any string including the empty string, indicates a trailing "?".
|
221 |
-
# * 'fragment' may be 'None', indicating no trailing "#" portion.
|
222 |
-
# Any string including the empty string, indicates a trailing "#".
|
223 |
-
scheme = kwargs.get("scheme", url_dict["scheme"]) or ""
|
224 |
-
authority = kwargs.get("authority", url_dict["authority"]) or ""
|
225 |
-
path = kwargs.get("path", url_dict["path"]) or ""
|
226 |
-
query = kwargs.get("query", url_dict["query"])
|
227 |
-
fragment = kwargs.get("fragment", url_dict["fragment"])
|
228 |
-
|
229 |
-
# The AUTHORITY_REGEX will always match, but may have empty components.
|
230 |
-
authority_match = AUTHORITY_REGEX.match(authority)
|
231 |
-
assert authority_match is not None
|
232 |
-
authority_dict = authority_match.groupdict()
|
233 |
-
|
234 |
-
# * 'userinfo' and 'host' may be empty strings.
|
235 |
-
# * 'port' may be 'None'.
|
236 |
-
userinfo = kwargs.get("userinfo", authority_dict["userinfo"]) or ""
|
237 |
-
host = kwargs.get("host", authority_dict["host"]) or ""
|
238 |
-
port = kwargs.get("port", authority_dict["port"])
|
239 |
-
|
240 |
-
# Normalize and validate each component.
|
241 |
-
# We end up with a parsed representation of the URL,
|
242 |
-
# with components that are plain ASCII bytestrings.
|
243 |
-
parsed_scheme: str = scheme.lower()
|
244 |
-
parsed_userinfo: str = quote(userinfo, safe=SUB_DELIMS + ":")
|
245 |
-
parsed_host: str = encode_host(host)
|
246 |
-
parsed_port: typing.Optional[int] = normalize_port(port, scheme)
|
247 |
-
|
248 |
-
has_scheme = parsed_scheme != ""
|
249 |
-
has_authority = (
|
250 |
-
parsed_userinfo != "" or parsed_host != "" or parsed_port is not None
|
251 |
-
)
|
252 |
-
validate_path(path, has_scheme=has_scheme, has_authority=has_authority)
|
253 |
-
if has_authority:
|
254 |
-
path = normalize_path(path)
|
255 |
-
|
256 |
-
# The GEN_DELIMS set is... : / ? # [ ] @
|
257 |
-
# These do not need to be percent-quoted unless they serve as delimiters for the
|
258 |
-
# specific component.
|
259 |
-
|
260 |
-
# For 'path' we need to drop ? and # from the GEN_DELIMS set.
|
261 |
-
parsed_path: str = quote(path, safe=SUB_DELIMS + ":/[]@")
|
262 |
-
# For 'query' we need to drop '#' from the GEN_DELIMS set.
|
263 |
-
parsed_query: typing.Optional[str] = (
|
264 |
-
None if query is None else quote(query, safe=SUB_DELIMS + ":/?[]@")
|
265 |
-
)
|
266 |
-
# For 'fragment' we can include all of the GEN_DELIMS set.
|
267 |
-
parsed_fragment: typing.Optional[str] = (
|
268 |
-
None if fragment is None else quote(fragment, safe=SUB_DELIMS + ":/?#[]@")
|
269 |
-
)
|
270 |
-
|
271 |
-
# The parsed ASCII bytestrings are our canonical form.
|
272 |
-
# All properties of the URL are derived from these.
|
273 |
-
return ParseResult(
|
274 |
-
parsed_scheme,
|
275 |
-
parsed_userinfo,
|
276 |
-
parsed_host,
|
277 |
-
parsed_port,
|
278 |
-
parsed_path,
|
279 |
-
parsed_query,
|
280 |
-
parsed_fragment,
|
281 |
-
)
|
282 |
-
|
283 |
-
|
284 |
-
def encode_host(host: str) -> str:
|
285 |
-
if not host:
|
286 |
-
return ""
|
287 |
-
|
288 |
-
elif IPv4_STYLE_HOSTNAME.match(host):
|
289 |
-
# Validate IPv4 hostnames like #.#.#.#
|
290 |
-
#
|
291 |
-
# From https://datatracker.ietf.org/doc/html/rfc3986/#section-3.2.2
|
292 |
-
#
|
293 |
-
# IPv4address = dec-octet "." dec-octet "." dec-octet "." dec-octet
|
294 |
-
try:
|
295 |
-
ipaddress.IPv4Address(host)
|
296 |
-
except ipaddress.AddressValueError:
|
297 |
-
raise InvalidURL(f"Invalid IPv4 address: {host!r}")
|
298 |
-
return host
|
299 |
-
|
300 |
-
elif IPv6_STYLE_HOSTNAME.match(host):
|
301 |
-
# Validate IPv6 hostnames like [...]
|
302 |
-
#
|
303 |
-
# From https://datatracker.ietf.org/doc/html/rfc3986/#section-3.2.2
|
304 |
-
#
|
305 |
-
# "A host identified by an Internet Protocol literal address, version 6
|
306 |
-
# [RFC3513] or later, is distinguished by enclosing the IP literal
|
307 |
-
# within square brackets ("[" and "]"). This is the only place where
|
308 |
-
# square bracket characters are allowed in the URI syntax."
|
309 |
-
try:
|
310 |
-
ipaddress.IPv6Address(host[1:-1])
|
311 |
-
except ipaddress.AddressValueError:
|
312 |
-
raise InvalidURL(f"Invalid IPv6 address: {host!r}")
|
313 |
-
return host[1:-1]
|
314 |
-
|
315 |
-
elif host.isascii():
|
316 |
-
# Regular ASCII hostnames
|
317 |
-
#
|
318 |
-
# From https://datatracker.ietf.org/doc/html/rfc3986/#section-3.2.2
|
319 |
-
#
|
320 |
-
# reg-name = *( unreserved / pct-encoded / sub-delims )
|
321 |
-
return quote(host.lower(), safe=SUB_DELIMS)
|
322 |
-
|
323 |
-
# IDNA hostnames
|
324 |
-
try:
|
325 |
-
return idna.encode(host.lower()).decode("ascii")
|
326 |
-
except idna.IDNAError:
|
327 |
-
raise InvalidURL(f"Invalid IDNA hostname: {host!r}")
|
328 |
-
|
329 |
-
|
330 |
-
def normalize_port(
|
331 |
-
port: typing.Optional[typing.Union[str, int]], scheme: str
|
332 |
-
) -> typing.Optional[int]:
|
333 |
-
# From https://tools.ietf.org/html/rfc3986#section-3.2.3
|
334 |
-
#
|
335 |
-
# "A scheme may define a default port. For example, the "http" scheme
|
336 |
-
# defines a default port of "80", corresponding to its reserved TCP
|
337 |
-
# port number. The type of port designated by the port number (e.g.,
|
338 |
-
# TCP, UDP, SCTP) is defined by the URI scheme. URI producers and
|
339 |
-
# normalizers should omit the port component and its ":" delimiter if
|
340 |
-
# port is empty or if its value would be the same as that of the
|
341 |
-
# scheme's default."
|
342 |
-
if port is None or port == "":
|
343 |
-
return None
|
344 |
-
|
345 |
-
try:
|
346 |
-
port_as_int = int(port)
|
347 |
-
except ValueError:
|
348 |
-
raise InvalidURL(f"Invalid port: {port!r}")
|
349 |
-
|
350 |
-
# See https://url.spec.whatwg.org/#url-miscellaneous
|
351 |
-
default_port = {"ftp": 21, "http": 80, "https": 443, "ws": 80, "wss": 443}.get(
|
352 |
-
scheme
|
353 |
-
)
|
354 |
-
if port_as_int == default_port:
|
355 |
-
return None
|
356 |
-
return port_as_int
|
357 |
-
|
358 |
-
|
359 |
-
def validate_path(path: str, has_scheme: bool, has_authority: bool) -> None:
|
360 |
-
"""
|
361 |
-
Path validation rules that depend on if the URL contains a scheme or authority component.
|
362 |
-
|
363 |
-
See https://datatracker.ietf.org/doc/html/rfc3986.html#section-3.3
|
364 |
-
"""
|
365 |
-
if has_authority:
|
366 |
-
# > If a URI contains an authority component, then the path component
|
367 |
-
# > must either be empty or begin with a slash ("/") character."
|
368 |
-
if path and not path.startswith("/"):
|
369 |
-
raise InvalidURL("For absolute URLs, path must be empty or begin with '/'")
|
370 |
-
else:
|
371 |
-
# > If a URI does not contain an authority component, then the path cannot begin
|
372 |
-
# > with two slash characters ("//").
|
373 |
-
if path.startswith("//"):
|
374 |
-
raise InvalidURL(
|
375 |
-
"URLs with no authority component cannot have a path starting with '//'"
|
376 |
-
)
|
377 |
-
# > In addition, a URI reference (Section 4.1) may be a relative-path reference, in which
|
378 |
-
# > case the first path segment cannot contain a colon (":") character.
|
379 |
-
if path.startswith(":") and not has_scheme:
|
380 |
-
raise InvalidURL(
|
381 |
-
"URLs with no scheme component cannot have a path starting with ':'"
|
382 |
-
)
|
383 |
-
|
384 |
-
|
385 |
-
def normalize_path(path: str) -> str:
|
386 |
-
"""
|
387 |
-
Drop "." and ".." segments from a URL path.
|
388 |
-
|
389 |
-
For example:
|
390 |
-
|
391 |
-
normalize_path("/path/./to/somewhere/..") == "/path/to"
|
392 |
-
"""
|
393 |
-
# https://datatracker.ietf.org/doc/html/rfc3986#section-5.2.4
|
394 |
-
components = path.split("/")
|
395 |
-
output: typing.List[str] = []
|
396 |
-
for component in components:
|
397 |
-
if component == ".":
|
398 |
-
pass
|
399 |
-
elif component == "..":
|
400 |
-
if output and output != [""]:
|
401 |
-
output.pop()
|
402 |
-
else:
|
403 |
-
output.append(component)
|
404 |
-
return "/".join(output)
|
405 |
-
|
406 |
-
|
407 |
-
def percent_encode(char: str) -> str:
|
408 |
-
"""
|
409 |
-
Replace a single character with the percent-encoded representation.
|
410 |
-
|
411 |
-
Characters outside the ASCII range are represented with their a percent-encoded
|
412 |
-
representation of their UTF-8 byte sequence.
|
413 |
-
|
414 |
-
For example:
|
415 |
-
|
416 |
-
percent_encode(" ") == "%20"
|
417 |
-
"""
|
418 |
-
return "".join([f"%{byte:02x}" for byte in char.encode("utf-8")]).upper()
|
419 |
-
|
420 |
-
|
421 |
-
def is_safe(string: str, safe: str = "/") -> bool:
|
422 |
-
"""
|
423 |
-
Determine if a given string is already quote-safe.
|
424 |
-
"""
|
425 |
-
NON_ESCAPED_CHARS = UNRESERVED_CHARACTERS + safe + "%"
|
426 |
-
|
427 |
-
# All characters must already be non-escaping or '%'
|
428 |
-
for char in string:
|
429 |
-
if char not in NON_ESCAPED_CHARS:
|
430 |
-
return False
|
431 |
-
|
432 |
-
# Any '%' characters must be valid '%xx' escape sequences.
|
433 |
-
return string.count("%") == len(PERCENT_ENCODED_REGEX.findall(string))
|
434 |
-
|
435 |
-
|
436 |
-
def quote(string: str, safe: str = "/") -> str:
|
437 |
-
"""
|
438 |
-
Use percent-encoding to quote a string if required.
|
439 |
-
"""
|
440 |
-
if is_safe(string, safe=safe):
|
441 |
-
return string
|
442 |
-
|
443 |
-
NON_ESCAPED_CHARS = UNRESERVED_CHARACTERS + safe
|
444 |
-
return "".join(
|
445 |
-
[char if char in NON_ESCAPED_CHARS else percent_encode(char) for char in string]
|
446 |
-
)
|
447 |
-
|
448 |
-
|
449 |
-
def urlencode(items: typing.List[typing.Tuple[str, str]]) -> str:
|
450 |
-
# We can use a much simpler version of the stdlib urlencode here because
|
451 |
-
# we don't need to handle a bunch of different typing cases, such as bytes vs str.
|
452 |
-
#
|
453 |
-
# https://github.com/python/cpython/blob/b2f7b2ef0b5421e01efb8c7bee2ef95d3bab77eb/Lib/urllib/parse.py#L926
|
454 |
-
#
|
455 |
-
# Note that we use '%20' encoding for spaces, and treat '/' as a safe
|
456 |
-
# character. This means our query params have the same escaping as other
|
457 |
-
# characters in the URL path. This is slightly different to `requests`,
|
458 |
-
# but is the behaviour that browsers use.
|
459 |
-
#
|
460 |
-
# See https://github.com/encode/httpx/issues/2536 and
|
461 |
-
# https://docs.python.org/3/library/urllib.parse.html#urllib.parse.urlencode
|
462 |
-
return "&".join([quote(k) + "=" + quote(v) for k, v in items])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|