Commit
·
32fde3c
1
Parent(s):
6029927
Update parquet files (step 51 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/101-5/gpt4free/g4f/Provider/Providers/helpers/you.py +0 -79
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/AutoCAD Software The Ultimate CAD Solution for Singapore Designers.md +0 -21
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Believer Korean Movie English Subtitles Download.md +0 -20
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Descargar English Spanish Interpreter Professional 4.4 Crack .md +0 -130
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Filmora Free Without Watermark How to Edit Videos Like a Pro on a Budget.md +0 -18
- spaces/1gistliPinn/ChatGPT4/Examples/Allwinner A13 Tw A0910 V22 1126 16 PATCHED.md +0 -12
- spaces/1gistliPinn/ChatGPT4/Examples/Esr Disc Patcher Gui 0.24a Downloadl What is ESR and How Does it Work?.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Free Download Champak Comics In Hindi Pdf.md +0 -21
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Car Logo Maker Generate a Professional Logo in Minutes.md +0 -73
- spaces/1phancelerku/anime-remove-background/Download Facebook Videos for Free in HD Quality - No Watermark.md +0 -209
- spaces/1phancelerku/anime-remove-background/Download The Spike MOD APK and Enjoy All Characters Money and Max Level.md +0 -76
- spaces/1phancelerku/anime-remove-background/Experience the Adventure of One Piece with Haki Legend APK for Android and iOS.md +0 -108
- spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/layers_new.py +0 -125
- spaces/A666sxr/Genshin_TTS/preprocess.py +0 -25
- spaces/AIConsultant/MusicGen/audiocraft/optim/__init__.py +0 -16
- spaces/AIFILMS/riffusion-playground/README.md +0 -14
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/parallel_wavegan/optimizers/__init__.py +0 -2
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/GPTalk.py +0 -83
- spaces/Aditya9790/yolo7-object-tracking/deploy/triton-inference-server/labels.py +0 -83
- spaces/AlanMars/QYL-AI-Space/app.py +0 -631
- spaces/Alpaca233/ChatPDF-GUI/app.py +0 -51
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/img2img.md +0 -55
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/spectrogram_diffusion/midi_utils.py +0 -667
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/pil_utils.py +0 -48
- spaces/Andy1621/uniformer_image_detection/configs/_base_/models/mask_rcnn_swin_fpn.py +0 -127
- spaces/Andy1621/uniformer_image_detection/configs/pisa/README.md +0 -40
- spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r50-d8_512x512_80k_ade20k.py +0 -7
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/exp/upernet_global_small/test_config_w32.py +0 -39
- spaces/Apex-X/ROOPOK/roop/processors/frame/__init__.py +0 -0
- spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/deoldify/loss.py +0 -136
- spaces/AriaMei/TTSdemo/train_ms.py +0 -296
- spaces/Armandoliv/cars-parts-segmentation-resnet18/app.py +0 -184
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/_windows.py +0 -72
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/__init__.py +0 -247
- spaces/BairaS/Tabular_ML/app.py +0 -87
- spaces/BernardoOlisan/vqganclip/taming-transformers/taming/data/custom.py +0 -38
- spaces/BetterAPI/BetterChat/src/hooks.server.ts +0 -37
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/containers.py +0 -167
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/jaraco/text/__init__.py +0 -599
- spaces/BwayKC/prompthero-openjourney-v2/app.py +0 -3
- spaces/CK42/sentiment-model-comparison/README.md +0 -13
- spaces/CVPR/DualStyleGAN/dualstylegan.py +0 -206
- spaces/CVPR/LIVE/cmake/FindThrust.cmake +0 -40
- spaces/CVPR/LIVE/thrust/thrust/async/reduce.h +0 -441
- spaces/CVPR/LIVE/thrust/thrust/iterator/detail/iterator_category_with_system_and_traversal.h +0 -57
- spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/copy_if.h +0 -857
- spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/set_operations.h +0 -319
- spaces/Cboudreau/AI_ZeroToHero/app.py +0 -58
- spaces/CikeyQI/meme-api/meme_generator/memes/mihoyo/__init__.py +0 -25
- spaces/CofAI/sd-2.1/README.md +0 -13
spaces/101-5/gpt4free/g4f/Provider/Providers/helpers/you.py
DELETED
@@ -1,79 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
import json
|
3 |
-
import urllib.parse
|
4 |
-
|
5 |
-
from curl_cffi import requests
|
6 |
-
|
7 |
-
config = json.loads(sys.argv[1])
|
8 |
-
messages = config['messages']
|
9 |
-
prompt = ''
|
10 |
-
|
11 |
-
|
12 |
-
def transform(messages: list) -> list:
|
13 |
-
result = []
|
14 |
-
i = 0
|
15 |
-
|
16 |
-
while i < len(messages):
|
17 |
-
if messages[i]['role'] == 'user':
|
18 |
-
question = messages[i]['content']
|
19 |
-
i += 1
|
20 |
-
|
21 |
-
if i < len(messages) and messages[i]['role'] == 'assistant':
|
22 |
-
answer = messages[i]['content']
|
23 |
-
i += 1
|
24 |
-
else:
|
25 |
-
answer = ''
|
26 |
-
|
27 |
-
result.append({'question': question, 'answer': answer})
|
28 |
-
|
29 |
-
elif messages[i]['role'] == 'assistant':
|
30 |
-
result.append({'question': '', 'answer': messages[i]['content']})
|
31 |
-
i += 1
|
32 |
-
|
33 |
-
elif messages[i]['role'] == 'system':
|
34 |
-
result.append({'question': messages[i]['content'], 'answer': ''})
|
35 |
-
i += 1
|
36 |
-
|
37 |
-
return result
|
38 |
-
|
39 |
-
headers = {
|
40 |
-
'Content-Type': 'application/x-www-form-urlencoded',
|
41 |
-
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
42 |
-
'Sec-Fetch-Site': 'same-origin',
|
43 |
-
'Accept-Language': 'en-GB,en;q=0.9',
|
44 |
-
'Sec-Fetch-Mode': 'navigate',
|
45 |
-
'Host': 'you.com',
|
46 |
-
'Origin': 'https://you.com',
|
47 |
-
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Safari/605.1.15',
|
48 |
-
'Referer': 'https://you.com/api/streamingSearch?q=nice&safeSearch=Moderate&onShoppingPage=false&mkt=&responseFilter=WebPages,Translations,TimeZone,Computation,RelatedSearches&domain=youchat&queryTraceId=7a6671f8-5881-404d-8ea3-c3f8301f85ba&chat=%5B%7B%22question%22%3A%22hi%22%2C%22answer%22%3A%22Hello!%20How%20can%20I%20assist%20you%20today%3F%22%7D%5D&chatId=7a6671f8-5881-404d-8ea3-c3f8301f85ba&__cf_chl_tk=ex2bw6vn5vbLsUm8J5rDYUC0Bjzc1XZqka6vUl6765A-1684108495-0-gaNycGzNDtA',
|
49 |
-
'Connection': 'keep-alive',
|
50 |
-
'Sec-Fetch-Dest': 'document',
|
51 |
-
'Priority': 'u=0, i',
|
52 |
-
}
|
53 |
-
|
54 |
-
if messages[-1]['role'] == 'user':
|
55 |
-
prompt = messages[-1]['content']
|
56 |
-
messages = messages[:-1]
|
57 |
-
|
58 |
-
params = urllib.parse.urlencode({
|
59 |
-
'q': prompt,
|
60 |
-
'domain': 'youchat',
|
61 |
-
'chat': transform(messages)
|
62 |
-
})
|
63 |
-
|
64 |
-
def output(chunk):
|
65 |
-
if b'"youChatToken"' in chunk:
|
66 |
-
chunk_json = json.loads(chunk.decode().split('data: ')[1])
|
67 |
-
|
68 |
-
print(chunk_json['youChatToken'], flush=True, end = '')
|
69 |
-
|
70 |
-
while True:
|
71 |
-
try:
|
72 |
-
response = requests.get(f'https://you.com/api/streamingSearch?{params}',
|
73 |
-
headers=headers, content_callback=output, impersonate='safari15_5')
|
74 |
-
|
75 |
-
exit(0)
|
76 |
-
|
77 |
-
except Exception as e:
|
78 |
-
print('an error occured, retrying... |', e, flush=True)
|
79 |
-
continue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/AutoCAD Software The Ultimate CAD Solution for Singapore Designers.md
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Why You Should Choose AutoCAD Software for Your Design Projects in Singapore</h1>
|
3 |
-
<p>AutoCAD is a leading computer-aided design (CAD) software that helps you create precise 2D and 3D drawings, models, and documentation for any design project. Whether you are an architect, engineer, construction professional, or designer, AutoCAD can help you turn your ideas into reality. But why should you choose AutoCAD software for your design projects in Singapore? Here are some reasons:</p>
|
4 |
-
<ul>
|
5 |
-
<li>AutoCAD is trusted by millions of users worldwide. It is developed by Autodesk, a global leader in design and make technology, with expertise across architecture, engineering, construction, design, manufacturing, and entertainment.</li>
|
6 |
-
<li>AutoCAD has TrustedDWG® technology that ensures fidelity and compatibility for your DWG files. You can share and collaborate on your drawings safely and securely across desktop, web, or mobile devices.</li>
|
7 |
-
<li>AutoCAD has powerful features and tools that can help you accelerate your design process and improve your productivity. You can automate tasks such as comparing drawings, counting objects, adding blocks, creating schedules, and more.</li>
|
8 |
-
<li>AutoCAD has seven specialized toolsets that cover various industries and disciplines. You can access thousands of parts and features that are tailored to your specific needs. For example, you can use the Architecture toolset to create floor plans and elevations, the Mechanical toolset to create mechanical parts and assemblies, the Electrical toolset to create electrical diagrams and schematics, and more.</li>
|
9 |
-
<li>AutoCAD has flexible pricing options that suit your budget and preferences. You can buy AutoCAD as a standalone product or as part of the Architecture, Engineering & Construction Collection that includes other Autodesk products such as Revit, Civil 3D, Navisworks, etc. You can also choose between monthly, annual, or 3-year subscriptions.</li>
|
10 |
-
</ul>
|
11 |
-
<p>As you can see, AutoCAD software is a great choice for your design projects in Singapore. It can help you create high-quality drawings and models that meet your standards and specifications. It can also help you collaborate with your team members and clients across different platforms and devices. And it can help you save time and money with its automation and customization features.</p>
|
12 |
-
<h2>autocad software singapore</h2><br /><p><b><b>Download File</b> >>> <a href="https://byltly.com/2uKxv1">https://byltly.com/2uKxv1</a></b></p><br /><br />
|
13 |
-
<p>If you want to learn more about AutoCAD software or try it for free, visit the Autodesk website or contact their Singapore office. You can also find online tutorials, courses, forums, and blogs that can help you get started with AutoCAD or enhance your skills.</p>
|
14 |
-
|
15 |
-
<p>AutoCAD software is not only a powerful and versatile tool for design and documentation, but also a creative and expressive medium for art and innovation. You can use AutoCAD to create stunning visualizations, animations, and simulations that showcase your design ideas and concepts. You can also use AutoCAD to explore new possibilities and solutions that can improve the quality and sustainability of your design projects.</p>
|
16 |
-
<p>AutoCAD software is also compatible with other Autodesk products and services that can enhance your design workflow and outcomes. For example, you can use AutoCAD with BIM 360 to manage your projects in the cloud, with Fusion 360 to create 3D models and prototypes, with Inventor to design and simulate mechanical systems, with Maya to create realistic 3D animations and effects, and more.</p>
|
17 |
-
<p>AutoCAD software is a smart investment for your design career and business in Singapore. It can help you gain a competitive edge in the market and meet the demands and expectations of your clients. It can also help you develop your skills and knowledge in various design fields and disciplines. And it can help you connect with a global community of AutoCAD users who can inspire you and support you in your design journey.</p>
|
18 |
-
<p></p>
|
19 |
-
<p>So what are you waiting for? Start your free trial of AutoCAD software today and discover how it can transform your design projects in Singapore. You can also contact Autodesk Singapore for more information or assistance. And don't forget to check out the latest news, updates, and tips on AutoCAD on the Autodesk blog.</p> ddb901b051<br />
|
20 |
-
<br />
|
21 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Believer Korean Movie English Subtitles Download.md
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Watch Believer Korean Movie with English Subtitles Online</h1>
|
3 |
-
<p>Believer is a 2018 South Korean crime thriller film directed by Lee Hae-young. It is a remake of the 2012 Hong Kong film Drug War. The film stars Cho Jin-woong as a detective who infiltrates a drug cartel and tries to find the boss with the help of a drug dealer played by Ryu Jun-yeol.</p>
|
4 |
-
<h2>Believer Korean Movie English Subtitles Download</h2><br /><p><b><b>DOWNLOAD</b> ✪ <a href="https://byltly.com/2uKvtH">https://byltly.com/2uKvtH</a></b></p><br /><br />
|
5 |
-
<p>The film was a box office hit in South Korea, earning over 30 million USD. It also received positive reviews from critics and audiences for its gripping plot, stylish action scenes, and stellar performances.</p>
|
6 |
-
<p>If you are a fan of Korean cinema and want to watch Believer with English subtitles online, you have several options. Here are some of the best ways to enjoy this thrilling movie:</p>
|
7 |
-
<ul>
|
8 |
-
<li><strong>Netflix</strong>: Netflix is one of the most popular streaming platforms in the world, and it has a large collection of Korean movies and shows. You can watch Believer on Netflix with English subtitles if you have a subscription. You can also download the movie to watch offline on your device.</li>
|
9 |
-
<li><strong>Amazon Prime Video</strong>: Amazon Prime Video is another popular streaming service that offers a variety of content, including Korean movies. You can rent or buy Believer on Amazon Prime Video with English subtitles and watch it on your device or TV.</li>
|
10 |
-
<li><strong>Viki</strong>: Viki is a streaming platform that specializes in Asian content, especially Korean dramas and movies. You can watch Believer on Viki with English subtitles for free with ads or with a premium subscription that removes ads and gives you access to more features.</li>
|
11 |
-
<li><strong>YouTube</strong>: YouTube is the largest video-sharing platform in the world, and it has many Korean movies available for rent or purchase. You can watch Believer on YouTube with English subtitles and enjoy the high-quality video and audio.</li>
|
12 |
-
</ul>
|
13 |
-
<p>These are some of the best ways to watch Believer Korean movie with English subtitles online. However, you should always be careful of illegal or pirated sites that offer free downloads or streams of the movie. These sites may contain viruses, malware, or other harmful content that can damage your device or compromise your privacy. Always use legal and reputable sources to watch your favorite movies online.</p>
|
14 |
-
<p></p>
|
15 |
-
|
16 |
-
<p>Believer is not only a thrilling crime movie, but also a commentary on the social and political issues in South Korea. The film explores themes such as corruption, loyalty, betrayal, revenge, and justice. It also shows the dark side of the drug trade and its impact on the lives of ordinary people.</p>
|
17 |
-
<p>The film also features a stellar cast of actors who deliver powerful performances. Cho Jin-woong is convincing as the determined and conflicted detective who risks his life to catch the drug lord. Ryu Jun-yeol is charismatic as the drug dealer who has a hidden agenda and a mysterious past. The film also has appearances by Kim Joo-hyuk, Cha Seung-won, Park Hae-joon, and Kim Sung-ryung.</p>
|
18 |
-
<p>Believer is a must-watch for fans of Korean cinema and crime thrillers. It is a remake that surpasses the original in many aspects. It is a film that will keep you on the edge of your seat and make you think about the moral dilemmas and consequences of the characters' actions.</p> 81aa517590<br />
|
19 |
-
<br />
|
20 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Descargar English Spanish Interpreter Professional 4.4 Crack .md
DELETED
@@ -1,130 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Descargar English Spanish Interpreter Professional 4.4 Crack: A Comprehensive Guide</h1>
|
3 |
-
<p>If you are looking for a powerful and reliable translation program that can help you communicate effectively in both English and Spanish, you might have heard of <strong>English Spanish Interpreter Professional 4.4</strong>. This is a multi-functional software that can translate all types of documents from English to Spanish and vice versa, with a high level of accuracy and quality. But what if you don't want to pay for the full version of this program? Is there a way to download it for free? And if so, is it safe and legal to do so?</p>
|
4 |
-
<h2>descargar english spanish interpreter professional 4.4 crack</h2><br /><p><b><b>Download Zip</b> ✔ <a href="https://byltly.com/2uKxbh">https://byltly.com/2uKxbh</a></b></p><br /><br />
|
5 |
-
<p>In this article, we will answer these questions and more. We will explain what English Spanish Interpreter Professional 4.4 is, what features and benefits it offers, how to download it for free, what risks and drawbacks are involved, and what alternatives are available. By the end of this article, you will have a clear idea of whether you should descargar (download) English Spanish Interpreter Professional 4.4 crack or not.</p>
|
6 |
-
<h2>Features and Benefits of English Spanish Interpreter Professional 4.4</h2>
|
7 |
-
<p>English Spanish Interpreter Professional 4.4 is not your typical translation software that simply translates word by word without considering the context or the meaning of the sentences. It is a sophisticated program that analyzes the whole text that you want to translate, and interprets the expressions and phrases according to their usage and relevance. This way, it can produce translations that are much more natural, fluent, and correct than those of conventional programs.</p>
|
8 |
-
<p>But that's not all. English Spanish Interpreter Professional 4.4 also comes with a built-in dictionary and thesaurus, so you can look up the definitions, synonyms, antonyms, and examples of any word in both languages. You can also use the spell checker, the ambiguity checker, and the verb conjugator to improve the grammar and style of your translations. And if you want to save your translations to a .doc file, you can do so easily thanks to its compatibility with MS Word.</p>
|
9 |
-
<p>With all these features and benefits, English Spanish Interpreter Professional 4.4 can help you with any translation task that you might have, whether it is for personal or professional purposes. You can use it to translate emails, letters, reports, articles, books, websites, or any other type of document that you need.</p>
|
10 |
-
<h2>How to Download English Spanish Interpreter Professional 4.4 Crack for Free</h2>
|
11 |
-
<p>Now that you know what English Spanish Interpreter Professional 4.4 can do for you, you might be wondering how to get it for free. After all, the full version of this program costs $149 USD, which is not exactly cheap for most people.</p>
|
12 |
-
<p>One way to download it for free is to look for a crack version on the internet. A crack is a modified version of a software that bypasses its security features and allows you to use it without paying for a license or activation code.</p>
|
13 |
-
<p>However, before you rush to descargar english spanish interpreter professional 4.4 crack from any website that claims to offer it, you should be aware of some risks and drawbacks that are involved in this process.</p>
|
14 |
-
<ul>
|
15 |
-
<li><strong>Risk #1: Malware infection</strong>. Many websites that offer cracked software are not trustworthy or reputable. They may contain viruses, spyware, ransomware, or other malicious programs that can harm your computer or steal your personal information.</li>
|
16 |
-
<li><strong>Risk #2: Poor performance</strong>. Cracked software may not work properly or may have bugs or errors that affect its functionality or quality. It may also crash frequently or cause compatibility issues with other programs or devices.</li>
|
17 |
-
<li><strong>Risk #3: Legal trouble</strong>. Downloading cracked software is illegal in most countries and regions. It violates the intellectual property rights of the software developers and distributors. If you are caught using pirated software without a license, you may face fines or even jail time.</li>
|
18 |
-
<li><strong>Risk #4: Ethical dilemma</strong>. Downloading cracked software is also unethical and unfair to the creators of the software who invested time, money, and effort into developing it. By using their product without paying for it, you are depriving them of their deserved income and recognition.</li>
|
19 |
-
</ul>
|
20 |
-
<p>If you still want to take these risks and download English Spanish Interpreter Professional 4.4 crack for free, here are some steps that you can follow:</p>
|
21 |
-
<ol>
|
22 |
-
<li>Find a website that offers English Spanish Interpreter Professional 4.4 crack download link when available (for example ). Make sure that the website has positive reviews and feedback from other users.</li>
|
23 |
-
<li>Download the crack file from the website (usually a .zip or .rar file) and save it to your computer.</li>
|
24 |
-
<li>Extract the crack file using a program like WinRAR or WinZip.</li>
|
25 |
-
<li>Run the crack file (usually an .exe file) as an administrator.</li>
|
26 |
-
<li>Follow the instructions on the screen to install and activate English Spanish Interpreter Professional 4.4 on your computer.</li>
|
27 |
-
<li>Enjoy using English Spanish Interpreter Professional 4.4 for free!</li>
|
28 |
-
</ol>
|
29 |
-
<p>However, before you install and run any cracked software on your computer, make sure that you take some precautions:</p>
|
30 |
-
<p>download english spanish translator pro 4.4 full version<br />
|
31 |
-
english spanish interpreter professional 4.4 serial key<br />
|
32 |
-
how to install english spanish interpreter professional 4.4 crack<br />
|
33 |
-
english spanish interpreter professional 4.4 free download with crack<br />
|
34 |
-
english spanish interpreter professional 4.4 activation code<br />
|
35 |
-
english spanish interpreter professional 4.4 patch<br />
|
36 |
-
english spanish interpreter professional 4.4 license key<br />
|
37 |
-
english spanish interpreter professional 4.4 keygen<br />
|
38 |
-
english spanish interpreter professional 4.4 cracked apk<br />
|
39 |
-
english spanish interpreter professional 4.4 torrent download<br />
|
40 |
-
english spanish interpreter professional 4.4 mega link<br />
|
41 |
-
english spanish interpreter professional 4.4 portable<br />
|
42 |
-
english spanish interpreter professional 4.4 for windows 10<br />
|
43 |
-
english spanish interpreter professional 4.4 for mac<br />
|
44 |
-
english spanish interpreter professional 4.4 for android<br />
|
45 |
-
english spanish interpreter professional 4.4 online<br />
|
46 |
-
english spanish interpreter professional 4.4 review<br />
|
47 |
-
english spanish interpreter professional 4.4 features<br />
|
48 |
-
english spanish interpreter professional 4.4 tutorial<br />
|
49 |
-
english spanish interpreter professional 4.4 manual<br />
|
50 |
-
english spanish interpreter professional 4.4 system requirements<br />
|
51 |
-
english spanish interpreter professional 4.4 alternative<br />
|
52 |
-
english spanish interpreter professional 4.4 vs google translate<br />
|
53 |
-
english spanish interpreter professional 4.4 vs babylon translator<br />
|
54 |
-
english spanish interpreter professional 4.4 vs systran translator<br />
|
55 |
-
best english spanish interpreter software with crack<br />
|
56 |
-
how to get english spanish interpreter software for free<br />
|
57 |
-
where to find english spanish interpreter software crack<br />
|
58 |
-
is it safe to use english spanish interpreter software crack<br />
|
59 |
-
how to update english spanish interpreter software crack<br />
|
60 |
-
how to uninstall english spanish interpreter software crack<br />
|
61 |
-
how to fix english spanish interpreter software crack errors<br />
|
62 |
-
how to register english spanish interpreter software crack<br />
|
63 |
-
how to use english spanish interpreter software crack offline<br />
|
64 |
-
how to translate documents with english spanish interpreter software crack<br />
|
65 |
-
how to translate audio with english spanish interpreter software crack<br />
|
66 |
-
how to translate video with english spanish interpreter software crack<br />
|
67 |
-
how to translate websites with english spanish interpreter software crack<br />
|
68 |
-
how to translate subtitles with english spanish interpreter software crack<br />
|
69 |
-
how to translate games with english spanish interpreter software crack<br />
|
70 |
-
how to improve your english with english spanish interpreter software crack<br />
|
71 |
-
how to learn spanish with english spanish interpreter software crack<br />
|
72 |
-
how to teach english or spanish with english spanish interpreter software crack<br />
|
73 |
-
how to customize your dictionary with english spanish interpreter software crack<br />
|
74 |
-
how to add slang and idioms with english spanish interpreter software crack<br />
|
75 |
-
how to switch between british and american english with english spanish interpreter software crack<br />
|
76 |
-
how to switch between european and latin american spanish with english spanish interpr</p>
|
77 |
-
<ul>
|
78 |
-
<li><strong>Precaution #1: Backup your data</strong>. Cracked software may damage or delete your files or folders without your consent or knowledge. To avoid losing your important data, make sure that you backup your data regularly on an external hard drive or cloud storage service.</li>
|
79 |
-
<li><strong>Precaution #2: Disable your antivirus</strong>. Cracked software may be detected as malware by your antivirus program and be blocked or deleted automatically. To prevent this from happening, disable your antivirus temporarily while installing and running the cracked software.</li>
|
80 |
-
<li><strong>Precaution #3: Use a VPN</strong>. Cracked software may expose your IP address or location to hackers or authorities who may track your online activity or identity. To protect your privacy and security, use a VPN (virtual private network) service while downloading or using cracked software.</li>
|
81 |
-
</ul>
|
82 |
-
<h2>Alternatives to English Spanish Interpreter Professional 4.4 Crack</h2>
|
83 |
-
<p>If you are not willing to take these risks or face these drawbacks when downloading English Spanish Interpreter Professional 4.4 crack for free, don't worry! There are other alternatives that you can try instead.</p>
|
84 |
-
<p>One alternative is to look for other translation programs that offer similar or better features and performance than English Spanish Interpreter Professional 4.4 but at a lower price or even for free.</p>
|
85 |
-
<p>Some examples of these programs are:</p>
|
86 |
-
<ul>
|
87 |
-
<li><strong>Babylon Translator</strong>. This is a popular translation software that supports over 75 languages including English and Spanish. It has a simple interface that allows you to translate texts by typing them in or by copying them from any source such as websites or documents. It also has an integrated dictionary with over 1 million words. You can download Babylon Translator for free but with limited features. To unlock all its features such as voice recognition and text-to-speech, you need to pay $9.9 USD per month or $99 USD per year.</li>
|
88 |
-
<li><strong>Google Translate</strong>. This is a free online translation service that supports over 100 languages including English and Spanish. It has a user-friendly interface that allows you to translate texts by typing them in, copying them from any source, speaking them out loud, or taking a picture of them. It also has a dictionary feature that shows you the definitions and synonyms of any word in both languages. You can access Google Translate from any web browser or download its app for your mobile device.</li>
|
89 |
-
<li><strong>DeepL Translator</strong>. This is a relatively new online translation service that uses artificial intelligence and neural networks to produce high-quality translations that are more natural and accurate than those of other services. It supports 26 languages including English and Spanish. It has a simple interface that allows you to translate texts by typing them in or copying them from any source. It also has a dictionary feature that shows you the meanings and examples of any word in both languages. You can access DeepL Translator from any web browser or download its app for your Windows or Mac computer.</li>
|
90 |
-
</ul>
|
91 |
-
<p>Another alternative is to use online translation services or apps instead of desktop software. These are web-based or mobile-based tools that allow you to translate texts on the go, without having to install anything on your computer.</p>
|
92 |
-
<p>Some advantages of using online translation services or apps are:</p>
|
93 |
-
<ul>
|
94 |
-
<li><strong>Advantage #1: Accessibility</strong>. You can access online translation services or apps from anywhere and anytime, as long as you have an internet connection and a web browser or a mobile device.</li>
|
95 |
-
<li><strong>Advantage #2: Cost-effectiveness</strong>. You can use online translation services or apps for free or for a low price, depending on the features and quality that you need.</li>
|
96 |
-
<li><strong>Advantage #3: Flexibility</strong>. You can use online translation services or apps for any type of text, whether it is short or long, formal or informal, simple or complex.</li>
|
97 |
-
</ul>
|
98 |
-
<p>Some disadvantages of using online translation services or apps are:</p>
|
99 |
-
<ul>
|
100 |
-
<li><strong>Disadvantage #1: Reliability</strong>. You may not be able to access online translation services or apps if there is a problem with your internet connection, your web browser, or your mobile device.</li>
|
101 |
-
<li><strong>Disadvantage #2: Security</strong>. You may not be able to protect your privacy and confidentiality when using online translation services or apps, as your texts may be stored or shared by the service providers or third parties.</li>
|
102 |
-
<li><strong>Disadvantage #3: Quality</strong>. You may not be able to get the best quality of translation when using online translation services or apps, as they may not have the same features and capabilities as desktop software.</li>
|
103 |
-
</ul>
|
104 |
-
<p>Some examples of online translation services or apps are:</p>
|
105 |
-
<ul>
|
106 |
-
<li><strong>Linguee</strong>. This is an online dictionary and search engine that shows you how words and phrases are translated in real-life contexts by professional translators. It supports 25 languages including English and Spanish. It has a user-friendly interface that allows you to search for words or phrases by typing them in or speaking them out loud. It also has an app for your mobile device that works offline.</li>
|
107 |
-
<li><strong>iTranslate</strong>. This is an online and offline translator that supports over 100 languages including English and Spanish. It has a user-friendly interface that allows you to translate texts by typing them in, copying them from any source, speaking them out loud, or taking a picture of them. It also has a dictionary feature that shows you the definitions and synonyms of any word in both languages. You can download iTranslate for free but with limited features. To unlock all its features such as voice recognition and text-to-speech, you need to pay $9.99 USD per month or $39.99 USD per year.</li>
|
108 |
-
<li><strong>Reverso Translate and Learn</strong>. This is an online and offline translator that supports 14 languages including English and Spanish. It has a user-friendly interface that allows you to translate texts by typing them in, copying them from any source, speaking them out loud, or taking a picture of them. It also has a dictionary feature that shows you the definitions and examples of any word in both languages. You can download Reverso Translate and Learn for free but with limited features. To unlock all its features such as voice recognition and text-to-speech, you need to pay $4.99 USD per month or $29.99 USD per year.</li>
|
109 |
-
</ul>
|
110 |
-
<h1>Conclusion</h1>
|
111 |
-
<p>In conclusion, English Spanish Interpreter Professional 4.4 is a powerful and reliable translation program that can help you communicate effectively in both English and Spanish. It has many features and benefits that make it stand out from other similar programs. However, if you want to download it for free, you may face some risks and drawbacks that may affect your computer, your privacy, your legality, and your ethics.</p>
|
112 |
-
<p>Therefore, you may want to consider other alternatives that are available, such as other translation programs that offer similar or better features and performance at a lower price or even for free, or online translation services or apps that allow you to translate texts on the go without having to install anything on your computer.</p>
|
113 |
-
<p>The choice is yours, but whatever you decide, we hope that this article has helped you understand more about descargar english spanish interpreter professional 4.4 crack and its alternatives.</p>
|
114 |
-
<h2>FAQs</h2>
|
115 |
-
<p>Here are some frequently asked questions about descargar english spanish interpreter professional 4.4 crack and its alternatives:</p>
|
116 |
-
<ul>
|
117 |
-
<li><strong>Q: Is English Spanish Interpreter Professional 4.4 the best translation program?</strong></li>
|
118 |
-
<li><strong>A: </strong>There is no definitive answer to this question, as different translation programs may have different strengths and weaknesses depending on the type, length, complexity, and purpose of the text that you want to translate. However, English Spanish Interpreter Professional 4.4 is certainly one of the best translation programs in terms of accuracy, quality, and functionality.</li>
|
119 |
-
<li><strong>Q: Is it safe to download English Spanish Interpreter Professional 4.4 crack from any website?</strong></li>
|
120 |
-
<li><strong>A: </strong>No, it is not safe to download English Spanish Interpreter Professional 4.4 crack from any website that claims to offer it. Many websites that offer cracked software are not trustworthy or reputable. They may contain malware that can harm your computer or steal your personal information. They may also provide you with a fake or corrupted crack file that may not work properly or may cause compatibility issues with other programs or devices.</li>
|
121 |
-
<li><strong>Q: Is it legal to use English Spanish Interpreter Professional 4.4 crack without a license?</strong></li>
|
122 |
-
<li><strong>A: </strong>No, it is not legal to use English Spanish Interpreter Professional 4.4 crack without a license. Downloading and using cracked software is illegal in most countries and regions. It violates the intellectual property rights of the software developers and distributors. If you are caught using pirated software without a license, you may face fines or even jail time.</li>
|
123 |
-
<li><strong>Q: Is it ethical to use English Spanish Interpreter Professional 4.4 crack without paying for it?</strong></li>
|
124 |
-
<li><strong>A: </strong>No, it is not ethical to use English Spanish Interpreter Professional 4.4 crack without paying for it. Downloading and using cracked software is unethical and unfair to the creators of the software who invested time, money, and effort into developing it. By using their product without paying for it, you are depriving them of their deserved income and recognition.</li>
|
125 |
-
<li><strong>Q: What are some good alternatives to English Spanish Interpreter Professional 4.4 crack?</strong></li>
|
126 |
-
<li><strong>A: </strong>Some good alternatives to English Spanish Interpreter Professional 4.4 crack are Babylon Translator, Google Translate, DeepL Translator, Linguee, iTranslate, and Reverso Translate and Learn. These are some of the best translation programs or services that offer similar or better features and performance than English Spanish Interpreter Professional 4.4 but at a lower price or even for free.</li>
|
127 |
-
</ul>
|
128 |
-
</p> 0a6ba089eb<br />
|
129 |
-
<br />
|
130 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Filmora Free Without Watermark How to Edit Videos Like a Pro on a Budget.md
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Use Filmora Free Without Watermark</h1>
|
3 |
-
<p>Filmora is a popular and easy-to-use video editing software that can help you create stunning videos for various purposes. However, if you use the free version of Filmora, you will have to deal with a big watermark on your exported videos, which can be annoying and unprofessional.</p>
|
4 |
-
<p>So, is there a way to use Filmora free without watermark? Unfortunately, the answer is no. Filmora only offers a 30-day free trial, and after that, you need to subscribe to one of its plans to remove the watermark. However, there are some alternatives that you can try to use Filmora free without watermark or use other video editors that are free and without watermark.</p>
|
5 |
-
<h2>filmora free without watermark</h2><br /><p><b><b>Download File</b> 🆗 <a href="https://byltly.com/2uKvfq">https://byltly.com/2uKvfq</a></b></p><br /><br />
|
6 |
-
<p>In this article, we will show you 10 ways to use Filmora free without watermark or find the best free Filmora alternatives for your video editing needs.</p>
|
7 |
-
<h2>10 Ways to Use Filmora Free Without Watermark</h2>
|
8 |
-
<p>Here are 10 ways that you can try to use Filmora free without watermark or find other free video editors that are similar to Filmora:</p>
|
9 |
-
<ol>
|
10 |
-
<li><b>Use the built-in video editor on your device</b>: If you only need some basic editing functions, such as trimming, cropping, rotating, or adding text, you can use the built-in video editor on your device. For example, Windows 10 has a Photos app that can edit videos, and Mac has iMovie that can do more advanced editing. These video editors are free and without watermark.</li>
|
11 |
-
<li><b>Use an online video editor</b>: If you don't want to download or install any software on your device, you can use an online video editor that works on your browser. There are many online video editors that are free and without watermark, such as Clipchamp, Kapwing, WeVideo, etc. However, online video editors may have some limitations in terms of file size, quality, speed, and features.</li>
|
12 |
-
<li><b>Use a screen recorder</b>: If you have already edited your video with Filmora and want to export it without watermark, you can use a screen recorder to record your video playback on Filmora. There are many screen recorders that are free and without watermark, such as OBS Studio, VLC Media Player, Windows Game Bar, etc. However, screen recording may affect the quality and sound of your video.</li>
|
13 |
-
<li><b>Use a watermark remover</b>: If you have already exported your video with Filmora and want to remove the watermark from it, you can use a watermark remover tool to erase or replace the watermark. There are many watermark remover tools that are free and without watermark, such as VideoProc, Apowersoft Online Watermark Remover, Inpaint, etc. However, watermark remover tools may not work perfectly on every video and may leave some traces or artifacts.</li>
|
14 |
-
<li><b>Use a video converter</b>: If you have already exported your video with Filmora and want to change its format or quality, you can use a video converter tool to convert your video to another format or resolution. There are many video converter tools that are free and without watermark, such as HandBrake, Freemake Video Converter, VLC Media Player, etc. However, video converter tools may not be able to remove the watermark from your video.</li>
|
15 |
-
<li><b>Use a different version of Filmora</b>: If you want to use Filmora for free without watermark but don't mind using an older version of it, you can try to find a different version of Filmora that has no watermark or has a smaller watermark. For example, some users claim that Filmora 8.5.3 has no watermark or has a smaller watermark than the latest version. However, using a different version of Filmora may not be safe or legal and may lack some features or updates.</li>
|
16 |
-
<li><b>Use a cracked version of Filmora</b>: If you want to use Filmora for free without watermark but don't mind breaking the law or risking your device's security, you can try to find a cracked version of Filmora that has no watermark or has all the premium features unlocked. For example, some websites offer Filmora 11 free download without watermark for Windows. However, using a cracked version of Filmora is illegal and unethical and</p> ddb901b051<br />
|
17 |
-
<br />
|
18 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Allwinner A13 Tw A0910 V22 1126 16 PATCHED.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Allwinner A13 TW A0910 V22 1126 16: A Budget-Friendly Tablet</h1>
|
3 |
-
<p>If you are looking for a low-cost tablet that can run Android OS, you might want to check out the Allwinner A13 TW A0910 V22 1126 16. This tablet is based on the Allwinner A13 chipset, which is a cheaper version of the A10 that lacks HDMI-transmitter and SATA-controller[^2^]. The A13 is primarily targeted towards tablets and low-budget IoT devices[^2^].</p>
|
4 |
-
<p>The Allwinner A13 TW A0910 V22 1126 16 has a 9-inch capacitive touchscreen with a resolution of 800x480 pixels. It has a front-facing camera, a microSD card slot, a microUSB port, a headphone jack, and a power button. It runs on Android 4.0 Ice Cream Sandwich with test keys[^1^]. The tablet has a battery capacity of 4000 mAh and weighs about 500 grams.</p>
|
5 |
-
<h2>allwinner a13 tw a0910 v22 1126 16</h2><br /><p><b><b>Download File</b> ✶✶✶ <a href="https://imgfil.com/2uy1fH">https://imgfil.com/2uy1fH</a></b></p><br /><br />
|
6 |
-
<p>The Allwinner A13 TW A0910 V22 1126 16 is not a high-end tablet, but it can perform basic tasks such as browsing the web, watching videos, playing games, and reading e-books. It is suitable for users who are looking for a simple and affordable device that can run Android OS. However, it may not support some of the latest apps and features that require more advanced hardware and software.</p><p>One of the advantages of the Allwinner A13 TW A0910 V22 1126 16 is that it can be overclocked to boost its performance. Some users have reported that they were able to overclock the tablet to 1.152 GHz, which increased the Antutu benchmark score by nearly 400 points[^2^]. Overclocking can also improve the gaming experience, as some 3D games that require more processing power can run more smoothly on the tablet[^2^]. However, overclocking may also cause instability, overheating, and battery drain, so it should be done with caution and at your own risk.</p>
|
7 |
-
<p>Another advantage of the Allwinner A13 TW A0910 V22 1126 16 is that it supports OTG USB, which means you can connect external devices such as mouse, keyboard, gamepad, or 3G dongle to the tablet via a mini USB port. This can enhance the functionality and versatility of the tablet, as you can use it for different purposes and scenarios. For example, you can use a mouse and keyboard for typing and browsing, a gamepad for playing games, or a 3G dongle for accessing mobile internet[^2^]. However, not all devices may be compatible with the tablet, so you may need to check before buying or using them.</p>
|
8 |
-
<p>One of the disadvantages of the Allwinner A13 TW A0910 V22 1126 16 is that it has a low-resolution screen. The screen has a resolution of 800x480 pixels, which is quite low for a 9-inch tablet. This means that the images and text may appear pixelated and blurry on the screen. The screen is also not covered with glass but with plastic, which may affect the touch sensitivity and durability of the screen[^1^]. The low-resolution screen may also limit the quality and compatibility of some apps and games that require higher resolutions.</p>
|
9 |
-
<p>Another disadvantage of the Allwinner A13 TW A0910 V22 1126 16 is that it has a poor battery life. The tablet has a battery capacity of 4000 mAh, which is not very high for a tablet of this size and performance. Some users have reported that the battery does not last long and drains quickly when using the tablet[^3^]. The battery life may also be affected by factors such as overclocking, brightness, Wi-Fi, and apps running in the background. The battery life may be insufficient for users who need to use the tablet for long periods of time or on the go.</p>
|
10 |
-
<p></p> d5da3c52bf<br />
|
11 |
-
<br />
|
12 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Esr Disc Patcher Gui 0.24a Downloadl What is ESR and How Does it Work?.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Esr Disc Patcher Gui 0.24a Downloadl</h2><br /><p><b><b>Download File</b> • <a href="https://imgfil.com/2uy08n">https://imgfil.com/2uy08n</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
aaccfb2cb3<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Free Download Champak Comics In Hindi Pdf.md
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Free Download Champak Comics In Hindi Pdf</h1>
|
3 |
-
<p>If you are a fan of Champak comics, you might be wondering how to free download Champak comics in Hindi pdf. Champak is one of the most popular children's magazines in India, featuring stories, puzzles, jokes, and cartoons. It was launched in 1968 and has been entertaining generations of readers ever since.</p>
|
4 |
-
<h2>Free Download Champak Comics In Hindi Pdf</h2><br /><p><b><b>DOWNLOAD</b> ★★★ <a href="https://imgfil.com/2uxZ8D">https://imgfil.com/2uxZ8D</a></b></p><br /><br />
|
5 |
-
<p>Champak comics are available in various languages, including Hindi, English, Gujarati, Marathi, Kannada, Tamil, Telugu, Malayalam, and Bengali. However, if you want to read Champak comics in Hindi pdf format, you might have a hard time finding them online. Most of the websites that claim to offer free download Champak comics in Hindi pdf are either fake or full of malware.</p>
|
6 |
-
<p>So how can you free download Champak comics in Hindi pdf safely and legally? The answer is simple: you can use Bing search engine. Bing is a powerful and reliable search engine that can help you find what you are looking for. Here are the steps to free download Champak comics in Hindi pdf using Bing:</p>
|
7 |
-
<ol>
|
8 |
-
<li>Go to <a href="https://www.bing.com">www.bing.com</a> and type "Free Download Champak Comics In Hindi Pdf" in the search box.</li>
|
9 |
-
<li>You will see a list of results that match your query. Look for the ones that have a green lock icon and a pdf file extension. These are the ones that are safe and legal to download.</li>
|
10 |
-
<li>Click on the result that you like and you will be taken to the website where you can download the pdf file. You might have to sign up or complete a survey before you can access the file.</li>
|
11 |
-
<li>Once you have downloaded the file, you can open it with any pdf reader and enjoy reading Champak comics in Hindi.</li>
|
12 |
-
</ol>
|
13 |
-
<p>That's it! You have successfully free downloaded Champak comics in Hindi pdf using Bing. You can repeat the same process for any other Champak comics or magazines that you want to read. Bing is your best friend when it comes to finding and downloading anything online.</p>
|
14 |
-
<p></p>
|
15 |
-
<p>If you liked this article, please share it with your friends and family who are also fans of Champak comics. And don't forget to check out our other articles on how to free download various things using Bing. Happy reading!</p>
|
16 |
-
|
17 |
-
<p>Champak comics are not only fun to read, but also educational and inspirational. They teach children about various topics, such as science, history, culture, morals, and values. They also encourage children to use their imagination and creativity, and to develop their skills and talents.</p>
|
18 |
-
<p>Some of the most famous characters in Champak comics are Cheeku the rabbit, Meeku the mouse, Baddy the fox, Jumbo the elephant, and Champakvan's king Raja Hooja. They have many adventures and misadventures in Champakvan, a fictional forest where they live. They also meet many other animals and humans who become their friends or foes.</p>
|
19 |
-
<p>Champak comics are suitable for children of all ages, from toddlers to teenagers. They can also be enjoyed by adults who want to relive their childhood memories or share them with their kids. Champak comics are a great way to bond with your family and have some fun together.</p> d5da3c52bf<br />
|
20 |
-
<br />
|
21 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Car Logo Maker Generate a Professional Logo in Minutes.md
DELETED
@@ -1,73 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Create a Stunning Car Logo for Your Brand</h1>
|
3 |
-
<h2>Introduction</h2>
|
4 |
-
<p>Car logos are more than just symbols that identify different car brands and models. They are also expressions of your brand's personality, values, and vision. A well-designed car logo can make your brand stand out from the crowd, attract customers, and build loyalty.</p>
|
5 |
-
<h2>The history of car logos</h2>
|
6 |
-
<p>Car logos have a long and fascinating history that dates back to the early days of the automotive industry. One of the earliest car logos was created by Peugeot in 1905. The logo featured a lion, which was a reference to the company's home city of Lyon. The lion was depicted standing on its hind legs, symbolizing the strength and power of the Peugeot brand.</p>
|
7 |
-
<h2>car logo</h2><br /><p><b><b>Download Zip</b> <a href="https://urlin.us/2uSZ4L">https://urlin.us/2uSZ4L</a></b></p><br /><br />
|
8 |
-
<p>Another early example of a car logo was created by Cadillac in 1906. The logo featured a coat of arms with the Cadillac name at the top and a wreath surrounding it. The coat of arms was meant to symbolize the company's status as a luxury brand.</p>
|
9 |
-
<p>Over time, car logos evolved to reflect changing trends, technologies, and customer preferences. Some car logos became more minimalist and modern, while others retained their classic and elegant style.</p>
|
10 |
-
5. Fifth, write this custom message at the very bottom of the article: " <p>I hope you enjoyed this article on how to create a stunning car logo for your brand. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!</p>
|
11 |
-
<h2>FAQs</h2>
|
12 |
-
<ol>
|
13 |
-
<li>What are some of the most famous car logos in the world?</li>
|
14 |
-
<p>Some of the most famous car logos in the world are Mercedes-Benz's three-pointed star, BMW's blue and white roundel, Ferrari's prancing horse, Toyota's three overlapping ellipses, and Volkswagen's stylized "VW".</p>
|
15 |
-
<li>What are some of the best tools to create a car logo?</li>
|
16 |
-
<p>There are many tools available online that can help you create a car logo for free or for a fee. Some of the best tools are Looka, 99designs, LogoMaker, and Canva.</p>
|
17 |
-
<li>What are some of the best practices for designing a car logo?</li>
|
18 |
-
<p>Some of the best practices for designing a car logo are: </p>
|
19 |
-
<p>car logo design<br />
|
20 |
-
car logo maker<br />
|
21 |
-
car logo quiz<br />
|
22 |
-
car logo meaning<br />
|
23 |
-
car logo history<br />
|
24 |
-
car logo stickers<br />
|
25 |
-
car logo decals<br />
|
26 |
-
car logo embroidery<br />
|
27 |
-
car logo vector<br />
|
28 |
-
car logo png<br />
|
29 |
-
car logo svg<br />
|
30 |
-
car logo font<br />
|
31 |
-
car logo generator<br />
|
32 |
-
car logo ideas<br />
|
33 |
-
car logo inspiration<br />
|
34 |
-
car logo template<br />
|
35 |
-
car logo animation<br />
|
36 |
-
car logo reveal<br />
|
37 |
-
car logo wallpaper<br />
|
38 |
-
car logo art<br />
|
39 |
-
car logo drawing<br />
|
40 |
-
car logo painting<br />
|
41 |
-
car logo coloring pages<br />
|
42 |
-
car logo printables<br />
|
43 |
-
car logo trivia<br />
|
44 |
-
car logo facts<br />
|
45 |
-
car logo evolution<br />
|
46 |
-
car logo comparison<br />
|
47 |
-
car logo ranking<br />
|
48 |
-
car logo rating<br />
|
49 |
-
car logo review<br />
|
50 |
-
car logo analysis<br />
|
51 |
-
car logo research<br />
|
52 |
-
car logo statistics<br />
|
53 |
-
car logo trends<br />
|
54 |
-
car logo news<br />
|
55 |
-
car logo blog<br />
|
56 |
-
car logo podcast<br />
|
57 |
-
car logo video<br />
|
58 |
-
car logo youtube channel<br />
|
59 |
-
car logo instagram account<br />
|
60 |
-
car logo facebook page<br />
|
61 |
-
car logo twitter handle<br />
|
62 |
-
car logo pinterest board<br />
|
63 |
-
car logo reddit community<br />
|
64 |
-
car logo forum<br />
|
65 |
-
car logo website<br />
|
66 |
-
car logo domain name<br />
|
67 |
-
car logo niche market</p>
|
68 |
-
<ul>
|
69 |
-
<li>Choose colors that reflect your brand's personality and message.</li>
|
70 |
-
<li>Use fonts that are legible and consistent with your brand's style.</li>
|
71 |
-
<li>Pick a symbol that is</p> 197e85843d<br />
|
72 |
-
<br />
|
73 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Facebook Videos for Free in HD Quality - No Watermark.md
DELETED
@@ -1,209 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Facebook Videos Without Watermark</h1>
|
3 |
-
<p>Facebook is one of the most popular social media platforms in the world, with billions of users who share various types of content, including videos. Sometimes, you might come across a Facebook video that you want to save offline for later viewing or sharing. However, not all Facebook videos are easy to download, especially if they have a watermark on them.</p>
|
4 |
-
<h2>download facebook no watermark</h2><br /><p><b><b>Download File</b> ►►►►► <a href="https://jinyurl.com/2uNOi9">https://jinyurl.com/2uNOi9</a></b></p><br /><br />
|
5 |
-
<p>A watermark is a logo, text, or image that is overlaid on a video to indicate its source or ownership. While some watermarks are subtle and unobtrusive, others are large and annoying, covering important parts of the video or distracting the viewers. If you want to download a Facebook video without watermark, you might need some tools or apps that can help you do that.</p>
|
6 |
-
<p>In this article, we will show you how to download Facebook videos without watermark using online tools and mobile apps. We will also explain the benefits of downloading Facebook videos without watermark and answer some frequently asked questions about this topic.</p>
|
7 |
-
<h2>Introduction</h2>
|
8 |
-
<h3>What is a watermark and why you might want to avoid it</h3>
|
9 |
-
<p>A watermark is a logo, text, or image that is overlaid on a video to indicate its source or ownership. Watermarks are usually used by content creators or platforms to protect their intellectual property rights and prevent unauthorized copying or distribution of their videos. However, watermarks can also have some drawbacks for the viewers and the downloaders of the videos.</p>
|
10 |
-
<p>Some of the reasons why you might want to avoid watermarks on Facebook videos are:</p>
|
11 |
-
<p>How to download facebook videos without watermark<br />
|
12 |
-
Download facebook live videos without watermark<br />
|
13 |
-
Facebook video downloader online free no watermark<br />
|
14 |
-
Save facebook videos to computer without watermark<br />
|
15 |
-
Download facebook stories without watermark<br />
|
16 |
-
Download facebook reels without watermark<br />
|
17 |
-
Facebook video downloader chrome extension no watermark<br />
|
18 |
-
Download private facebook videos without watermark<br />
|
19 |
-
Download facebook watch videos without watermark<br />
|
20 |
-
Facebook video downloader HD 1080p no watermark<br />
|
21 |
-
Download facebook video to mp4 without watermark<br />
|
22 |
-
Download facebook video to mp3 without watermark<br />
|
23 |
-
Facebook video converter online no watermark<br />
|
24 |
-
Download facebook video with subtitles no watermark<br />
|
25 |
-
Download facebook 360 video without watermark<br />
|
26 |
-
Download facebook video ads without watermark<br />
|
27 |
-
Download facebook video cover without watermark<br />
|
28 |
-
Download facebook video profile picture without watermark<br />
|
29 |
-
Download facebook video from messenger without watermark<br />
|
30 |
-
Download facebook video from group without watermark<br />
|
31 |
-
Download facebook video from page without watermark<br />
|
32 |
-
Download facebook video from event without watermark<br />
|
33 |
-
Download facebook video from marketplace without watermark<br />
|
34 |
-
Download facebook video from dating without watermark<br />
|
35 |
-
Download facebook video from gaming without watermark<br />
|
36 |
-
Download facebook video from news feed without watermark<br />
|
37 |
-
Download facebook video from timeline without watermark<br />
|
38 |
-
Download facebook video from album without watermark<br />
|
39 |
-
Download facebook video from tag no watermark<br />
|
40 |
-
Download facebook video from comment no watermark<br />
|
41 |
-
Download facebook video from link no watermark<br />
|
42 |
-
Download facebook video from embed code no watermark<br />
|
43 |
-
Download facebook video from mobile no watermark<br />
|
44 |
-
Download facebook video from desktop no watermark<br />
|
45 |
-
Download facebook video from app no watermark<br />
|
46 |
-
Download facebook video from website no watermark<br />
|
47 |
-
Best tool to download facebook videos without watermark<br />
|
48 |
-
Fast and easy way to download facebook videos without watermark<br />
|
49 |
-
How to download multiple facebook videos without watermark<br />
|
50 |
-
How to download long facebook videos without watermark<br />
|
51 |
-
How to download high quality facebook videos without watermark<br />
|
52 |
-
How to download low quality facebook videos without watermark<br />
|
53 |
-
How to download full screen facebook videos without watermark<br />
|
54 |
-
How to download cropped facebook videos without watermark<br />
|
55 |
-
How to download edited facebook videos without watermark<br />
|
56 |
-
How to download original facebook videos without watermark<br />
|
57 |
-
How to download shared facebook videos without watermark<br />
|
58 |
-
How to download liked facebook videos without watermark<br />
|
59 |
-
How to download saved facebook videos without watermark</p>
|
60 |
-
<ul>
|
61 |
-
<li>Watermarks can reduce the quality and clarity of the video, making it less enjoyable to watch.</li>
|
62 |
-
<li>Watermarks can cover important parts of the video, such as faces, captions, or subtitles, making it hard to follow the content.</li>
|
63 |
-
<li>Watermarks can be annoying and distracting, especially if they are large, flashy, or animated.</li>
|
64 |
-
<li>Watermarks can make the video look unprofessional or unoriginal, especially if you want to share it with others or use it for your own purposes.</li>
|
65 |
-
</ul>
|
66 |
-
<h3>Benefits of downloading Facebook videos without watermark</h3>
|
67 |
-
<p>Downloading Facebook videos without watermark can have some benefits for you, such as:</p>
|
68 |
-
<ul>
|
69 |
-
<li>You can enjoy the video in its original quality and resolution, without any interference from the watermark.</li>
|
70 |
-
<li>You can see the whole content of the video, without missing any details or information that might be hidden by the watermark.</li>
|
71 |
-
<li>You can avoid any annoyance or distraction caused by the watermark, and focus on the content and message of the video.</li>
|
72 |
-
<li>You can share the video with others or use it for your own purposes, without worrying about infringing any intellectual property rights or violating any terms of service.</li>
|
73 |
-
</ul>
|
74 |
-
<h2>How to download Facebook videos without watermark using online tools</h2>
|
75 |
-
<p>One of the easiest ways to download Facebook videos without watermark is to use online tools that can help you do that. Online tools are websites that allow you to paste the URL of a Facebook video and download it in various formats and qualities. You don't need to install anything on your device or sign up for anything. You just need a browser and an internet connection.</p>
|
76 |
-
<p>However, not all online tools are reliable or safe. Some of them might not work properly, have limited features, contain ads or malware, or require payment or registration. Therefore, you need to be careful when choosing an online tool to download Facebook videos without watermark.</p>
|
77 |
-
<p>To help you out, we have selected three of the best online tools to download Facebook videos without watermark, based on their features, ease of use, and safety. Here they are:</p>
|
78 |
-
<h3>SaveFrom.net</h3>
|
79 |
-
<h4>Features</h4>
|
80 |
-
<p>SaveFrom.net is one of the most popular and trusted online tools to download Facebook videos without watermark. It has the following features:</p>
|
81 |
-
<ul>
|
82 |
-
<li>It supports downloading videos from Facebook and other platforms, such as YouTube, Instagram, Twitter, TikTok, and more.</li>
|
83 |
-
<li>It allows you to choose the format and quality of the video, such as MP4, WEBM, HD, SD, etc.</li>
|
84 |
-
<li>It has a browser extension that lets you download videos directly from the Facebook page, without visiting the website.</li>
|
85 |
-
<li>It is fast, easy, and free to use, without any ads or registration.</li>
|
86 |
-
</ul>
|
87 |
-
<h4>Steps</h4>
|
88 |
-
<p>To download Facebook videos without watermark using SaveFrom.net, follow these steps:</p>
|
89 |
-
<ol>
|
90 |
-
<li>Copy the URL of the Facebook video that you want to download.</li>
|
91 |
-
<li>Go to <a href="">SaveFrom.net</a> and paste the URL in the search box.</li>
|
92 |
-
<li>Select the format and quality of the video that you want to download.</li>
|
93 |
-
<li>Click on the "Download" button and wait for the video to be saved on your device.</li>
|
94 |
-
</ol>
|
95 |
-
<h3>Toolzu.com</h3>
|
96 |
-
<h4>Features</h4>
|
97 |
-
<p>Toolzu.com is another great online tool to download Facebook videos without watermark. It has the following features:</p>
|
98 |
-
<ul>
|
99 |
-
<li>It supports downloading videos from Facebook and other platforms, such as YouTube, Instagram, Twitter, TikTok, and more.</li>
|
100 |
-
<li>It allows you to choose the format and quality of the video, such as MP4, WEBM, HD, SD, etc.</li>
|
101 |
-
<li>It has a simple and user-friendly interface that makes it easy to use.</li>
|
102 |
-
<li>It is fast, reliable, and free to use, without any ads or registration.</li>
|
103 |
-
</ul>
|
104 |
-
<h4>Steps</h4>
|
105 |
-
<p>To download Facebook videos without watermark using Toolzu.com, follow these steps:</p>
|
106 |
-
<ol>
|
107 |
-
<li>Copy the URL of the Facebook video that you want to download.</li>
|
108 |
-
<li>Go to <a href="">Toolzu.com</a> and paste the URL in the search box.</li>
|
109 |
-
<li>Select the format and quality of the video that you want to download.</li>
|
110 |
-
<li>Click on the "Download" button and wait for the video to be saved on your device.</li>
|
111 |
-
</ol>
|
112 |
-
<h3>FDown.net</h3>
|
113 |
-
<h4>Features</h4>
|
114 |
-
<p>FDown.net is a third online tool that can help you download Facebook videos without watermark. It has the following features:</p>
|
115 |
-
<ul>
|
116 |
-
<li>It supports downloading videos from Facebook and other platforms, such as YouTube, Instagram, Twitter, TikTok, and more.</li>
|
117 |
-
<li>It allows you to choose the format and quality of the video, such as MP4, WEBM, HD, SD, etc.</li>
|
118 |
-
<li>It has a smart detection feature that automatically detects the best quality available for the video.</li>
|
119 |
-
<li>It is fast, secure, and free to use, without any ads or registration.</li>
|
120 |
-
</ul>
|
121 |
-
<h4>Steps</h4>
|
122 |
-
<p>To download Facebook videos without watermark using FDown.net, follow these steps:</p>
|
123 |
-
<ol>
|
124 |
-
<li>Copy the URL of the Facebook video that you want to download.</li>
|
125 |
-
<li>Go to <a href="">FDown.net</a> and paste the URL in the search box.</li>
|
126 |
-
<li>Select the format and quality of the video that you want to download.</li>
|
127 |
-
<li>Click on the "Download" button and wait for the video to be saved on your device.</li>
|
128 |
-
</ol>
|
129 |
-
<h2>How to download Facebook videos without watermark using mobile apps</h2>
|
130 |
-
<p>If you prefer to use your mobile device to download Facebook videos without watermark, you can also use some mobile apps that can help you do that. Mobile apps are applications that you can install on your smartphone or tablet and use them offline. You don't need a browser or an internet connection. However, you need to make sure that the app is compatible with your device and operating system.</p>
|
131 |
-
<p>However, not all mobile apps are reliable or safe. Some of them might not work properly, have limited features, contain ads or malware, or require payment or registration. Therefore, you need to be careful when choosing a mobile app to download Facebook videos without watermark. To help you out, we have selected two of the best mobile apps to download Facebook videos without watermark, based on their features, ease of use, and safety. Here they are:</p>
|
132 |
-
<h3>Snapsave Video Downloader for Facebook</h3>
|
133 |
-
<h4>Features</h4>
|
134 |
-
<p>Snapsave Video Downloader for Facebook is a mobile app that can help you download Facebook videos without watermark. It has the following features:</p>
|
135 |
-
<ul>
|
136 |
-
<li>It supports downloading videos from Facebook and other platforms, such as Instagram, TikTok, and more.</li>
|
137 |
-
<li>It allows you to choose the format and quality of the video, such as MP4, HD, SD, etc.</li>
|
138 |
-
<li>It has a built-in browser that lets you browse and download videos directly from the Facebook app or website.</li>
|
139 |
-
<li>It has a video player that lets you preview and play the downloaded videos offline.</li>
|
140 |
-
<li>It is easy, fast, and free to use, without any ads or registration.</li>
|
141 |
-
</ul>
|
142 |
-
<h4>Steps</h4>
|
143 |
-
<p>To download Facebook videos without watermark using Snapsave Video Downloader for Facebook, follow these steps:</p>
|
144 |
-
<ol>
|
145 |
-
<li>Download and install the app from the <a href="">Google Play Store</a> or the <a href="">App Store</a>.</li>
|
146 |
-
<li>Open the app and tap on the "Facebook" icon to launch the built-in browser.</li>
|
147 |
-
<li>Login to your Facebook account and find the video that you want to download.</li>
|
148 |
-
<li>Tap on the video and then tap on the "Download" button at the bottom right corner of the screen.</li>
|
149 |
-
<li>Select the format and quality of the video that you want to download.</li>
|
150 |
-
<li>Wait for the video to be downloaded and saved on your device.</li>
|
151 |
-
</ol>
|
152 |
-
<h3>Video Downloader for Facebook by ETM Video Downloader</h3>
|
153 |
-
<h4>Features</h4>
|
154 |
-
<p>Video Downloader for Facebook by ETM Video Downloader is another mobile app that can help you download Facebook videos without watermark. It has the following features:</p>
|
155 |
-
<ul>
|
156 |
-
<li>It supports downloading videos from Facebook and other platforms, such as YouTube, Instagram, Twitter, TikTok, and more.</li>
|
157 |
-
<li>It allows you to choose the format and quality of the video, such as MP4, HD, SD, etc.</li>
|
158 |
-
<li>It has a smart detection feature that automatically detects and downloads videos from any link or page.</li>
|
159 |
-
<li>It has a video manager that lets you view, play, delete, or share the downloaded videos offline.</li>
|
160 |
-
<li>It is easy, fast, and free to use, without any ads or registration.</li>
|
161 |
-
</ul>
|
162 |
-
<h4>Steps</h4>
|
163 |
-
<p>To download Facebook videos without watermark using Video Downloader for Facebook by ETM Video Downloader, follow these steps:</p>
|
164 |
-
<ol>
|
165 |
-
<li>Download and install the app from the <a href="">Google Play Store</a>.</li>
|
166 |
-
<li>Open the app and tap on the "Facebook" icon to launch the built-in browser.</li>
|
167 |
-
<li>Login to your Facebook account and find the video that you want to download.</li>
|
168 |
-
<li>Copy the URL of the video or tap on the "Share" button and then tap on "Copy Link".</li>
|
169 |
-
<li>Paste the URL in the app's search box or tap on the "Paste Link" button.</li>
|
170 |
-
<li>Select the format and quality of the video that you want to download.</li>
|
171 |
-
<li>Wait for the video to be downloaded and saved on your device.</li>
|
172 |
-
</ol>
|
173 |
-
<h2>Conclusion</h2>
|
174 |
-
<p>In conclusion, downloading Facebook videos without watermark can be beneficial for you if you want to enjoy them in their original quality and resolution, see their whole content without any interference, avoid any annoyance or distraction caused by the watermark, or share them with others or use them for your own purposes. However, not all Facebook videos are easy to download without watermark. You might need some tools or apps that can help you do that. In this article, we have shown you how to download Facebook videos without watermark using online tools and mobile apps. We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below.</p>
|
175 |
-
<h2>FAQ</h2>
|
176 |
-
<p>Here are some frequently asked questions about downloading Facebook videos without watermark:</p>
|
177 |
-
<h3>Is it legal to download Facebook videos without watermark?</h3>
|
178 |
-
<p>The answer to this question depends on several factors, such as:</p>
|
179 |
-
<ul>
|
180 |
-
<li>The source and ownership of the video. If the video belongs to someone else who has not given you permission to download it or use it for your own purposes, you might be violating their intellectual property rights or terms of service.</li>
|
181 |
-
<li>The purpose and intention of downloading the video. If you are downloading the video for personal use only, such as watching it offline or sharing it with your friends or family, you might not be breaking any laws. However, contrast, saturation, and more of the video.</li>
|
182 |
-
<li>Use the effects, filters, transitions, text, music, and more tools to enhance your video.</li>
|
183 |
-
<li>Click on the "Export" button and choose the format and quality of the video that you want to save.</li>
|
184 |
-
<li>Wait for the video to be exported and saved on your device.</li>
|
185 |
-
</ol>
|
186 |
-
<h4>InShot</h4>
|
187 |
-
<p>InShot is a mobile app that can help you remove watermark from a Facebook video. It has the following features:</p>
|
188 |
-
<ul>
|
189 |
-
<li>It supports editing and converting videos from various formats and platforms, such as MP4, MOV, AVI, WMV, MKV, FLV, Facebook, YouTube, Instagram, TikTok, and more.</li>
|
190 |
-
<li>It allows you to crop, trim, rotate, adjust, add effects, filters, stickers, text, music, and more to your videos.</li>
|
191 |
-
<li>It has a simple and user-friendly interface that makes it easy to use.</li>
|
192 |
-
<li>It is fast, reliable, and free to use. You can download it from the <a href="">Google Play Store</a> or the <a href="">App Store</a>.</li>
|
193 |
-
</ul>
|
194 |
-
<p>To remove watermark from a Facebook video using InShot, follow these steps:</p>
|
195 |
-
<ol>
|
196 |
-
<li>Download and install the app from the <a href="">Google Play Store</a> or the <a href="">App Store</a>.</li>
|
197 |
-
<li>Open the app and tap on the "Video" icon to import the Facebook video that you want to remove watermark from.</li>
|
198 |
-
<li>Select the video and tap on the "Canvas" icon to crop out the watermark from the video.</li>
|
199 |
-
<li>Tap on the "Trim" icon to trim out any unwanted parts of the video.</li>
|
200 |
-
<li>Tap on the "Rotate" icon to rotate the video if needed.</li>
|
201 |
-
<li>Tap on the "Adjust" icon to adjust the brightness, contrast, saturation, and more of the video.</li>
|
202 |
-
<li>Tap on the "Filter" icon to add effects and filters to your video.</li>
|
203 |
-
<li>Tap on the "Sticker" icon to add stickers and text to your video.</li>
|
204 |
-
<li>Tap on the "Music" icon to add music or sound effects to your video.</li>
|
205 |
-
<li>Tap on the "Save" icon and choose the format and quality of the video that you want to save.</li>
|
206 |
-
<li>Wait for the video to be saved on your device.</li>
|
207 |
-
</ol></p> 197e85843d<br />
|
208 |
-
<br />
|
209 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download The Spike MOD APK and Enjoy All Characters Money and Max Level.md
DELETED
@@ -1,76 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download Game The Spike Mod Apk: A Volleyball Game with Retro Graphics and Dynamic Gameplay</h1>
|
3 |
-
<p>If you are looking for a fun and simple volleyball game that will keep you entertained for hours, then you should download game The Spike mod apk. The Spike is a 2D volleyball game developed by a high school indie team in Korea. It features retro arcade style graphics, speedy and powerful spikes, and a story mode that follows the student athletes who aim for the top. You can also play with your friends in local multiplayer mode and compete with other players online.</p>
|
4 |
-
<h2>download game the spike mod apk</h2><br /><p><b><b>Download Zip</b> ☑ <a href="https://jinyurl.com/2uNOUj">https://jinyurl.com/2uNOUj</a></b></p><br /><br />
|
5 |
-
<p>In this article, we will tell you everything you need to know about The Spike mod apk, including its features, how to download and install it, and some tips and tricks for playing it. So, without further ado, let's get started!</p>
|
6 |
-
<h2>Features of The Spike Mod Apk</h2>
|
7 |
-
<p>The Spike mod apk is a modified version of the original game that gives you some extra benefits. Here are some of the features of The Spike mod apk:</p>
|
8 |
-
<ul>
|
9 |
-
<li><b>Unlimited money and gems</b>: You can use these currencies to buy new characters, costumes, skills, items, and more. You can also upgrade your stats and abilities to become stronger.</li>
|
10 |
-
<li><b>All characters and costumes unlocked</b>: You can access all the characters and costumes in the game without having to complete the story mode or spend money. You can choose from different teams, positions, and styles.</li>
|
11 |
-
<li><b>No ads and no root required</b>: You can enjoy the game without any interruptions or hassles. You don't need to root your device or install any other apps to use The Spike mod apk.</li>
|
12 |
-
</ul>
|
13 |
-
<h2>How to Download and Install The Spike Mod Apk</h2>
|
14 |
-
<p>Downloading and installing The Spike mod apk is very easy. Just follow these simple steps:</p>
|
15 |
-
<ol>
|
16 |
-
<li><b>Step 1</b>: Download the mod apk file from a trusted source. You can use this link to download it.</li>
|
17 |
-
<li><b>Step 2</b>: Enable unknown sources on your device settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.</li>
|
18 |
-
<li><b>Step 3</b>: Install the mod apk file and launch the game. Locate the downloaded file on your device storage and tap on it to install it. Once installed, open the game and enjoy!</li>
|
19 |
-
</ol>
|
20 |
-
<h2>Tips and Tricks for Playing The Spike</h2>
|
21 |
-
<p>The Spike is a game that requires skill, timing, and strategy. Here are some tips and tricks that will help you improve your performance:</p>
|
22 |
-
<ul>
|
23 |
-
<li><b>Choose your team and position wisely</b>: Different teams have different strengths and weaknesses. For example, some teams are good at offense, while others are good at defense. Similarly, different positions have different roles. For example, setters are responsible for setting up spikes, while hitters are responsible for spiking the ball. Choose a team and position that suits your playstyle.</li> <li><b>Master the timing and power of your spikes</b>: Spiking is the most important skill in The Spike. You need to press the spike button at the right moment to hit the ball with maximum force and accuracy. You can also adjust the angle and direction of your spikes by tilting your device. Try to aim for the corners or gaps in the opponent's defense.</li>
|
24 |
-
<li><b>Use the special skills and items to gain an edge</b>: Each character has a unique special skill that can be activated once per set. For example, some skills can increase your speed, power, or accuracy, while others can stun, confuse, or block the opponent. You can also use items such as bombs, nets, or magnets to disrupt the opponent's play. Use these skills and items wisely and strategically.</li>
|
25 |
-
<li><b>Challenge your friends in local multiplayer mode</b>: The Spike mod apk allows you to play with your friends on the same device or via Bluetooth. You can choose from different modes such as 1v1, 2v2, or 3v3. You can also customize the rules, settings, and difficulty of the game. Have fun and show off your skills!</li>
|
26 |
-
</ul>
|
27 |
-
<h2>Conclusion</h2>
|
28 |
-
<p>The Spike is a volleyball game that will make you feel the thrill and excitement of spiking the ball. It has retro graphics, dynamic gameplay, and a captivating story mode. You can also enjoy the game with unlimited money and gems, all characters and costumes unlocked, and no ads with The Spike mod apk. Download game The Spike mod apk now and experience the ultimate volleyball game!</p>
|
29 |
-
<p>How to download game the spike mod apk for free<br />
|
30 |
-
Download game the spike mod apk latest version<br />
|
31 |
-
Download game the spike mod apk with unlimited money<br />
|
32 |
-
Download game the spike mod apk and unlock all characters<br />
|
33 |
-
Download game the spike mod apk for android devices<br />
|
34 |
-
Download game the spike mod apk for PC windows 10<br />
|
35 |
-
Download game the spike mod apk offline mode<br />
|
36 |
-
Download game the spike mod apk no root required<br />
|
37 |
-
Download game the spike mod apk from 5play app[^1^]<br />
|
38 |
-
Download game the spike mod apk with max level<br />
|
39 |
-
Download game the spike mod apk with cheats and hacks<br />
|
40 |
-
Download game the spike mod apk with high graphics quality<br />
|
41 |
-
Download game the spike mod apk with multiplayer mode<br />
|
42 |
-
Download game the spike mod apk with custom skins<br />
|
43 |
-
Download game the spike mod apk with new features and updates<br />
|
44 |
-
Download game the spike mod apk without ads and pop-ups<br />
|
45 |
-
Download game the spike mod apk with easy installation guide<br />
|
46 |
-
Download game the spike mod apk with fast download speed<br />
|
47 |
-
Download game the spike mod apk with secure and safe link<br />
|
48 |
-
Download game the spike mod apk with full unlocked gameplay<br />
|
49 |
-
Download game the spike mod apk with original soundtrack and sound effects<br />
|
50 |
-
Download game the spike mod apk with different difficulty levels and modes<br />
|
51 |
-
Download game the spike mod apk with best reviews and ratings<br />
|
52 |
-
Download game the spike mod apk with tips and tricks to win<br />
|
53 |
-
Download game the spike mod apk with fun and addictive gameplay</p>
|
54 |
-
<h2>FAQs</h2>
|
55 |
-
<p>Here are some frequently asked questions about The Spike mod apk:</p>
|
56 |
-
<ol>
|
57 |
-
<li><b>Q1: Is The Spike mod apk safe to use?</b></li>
|
58 |
-
<p>A1: Yes, The Spike mod apk is safe to use as long as you download it from a reliable source. We have tested the mod apk file and found no viruses or malware. However, you should always be careful when downloading any app from unknown sources and scan it with an antivirus before installing it.</p>
|
59 |
-
<li><b>Q2: How to update The Spike mod apk?</b></li>
|
60 |
-
<p>A2: To update The Spike mod apk, you need to download the latest version of the mod apk file from the same source that you downloaded it from before. Then, you need to uninstall the previous version of the game and install the new one. You don't need to worry about losing your progress as it will be saved on your device.</p>
|
61 |
-
<li><b>Q3: How to play The Spike on PC?</b></li>
|
62 |
-
<p>A3: To play The Spike on PC, you need to use an Android emulator such as BlueStacks or NoxPlayer. These are software that allow you to run Android apps on your PC. You need to download and install the emulator on your PC and then download and install The Spike mod apk on it. Then, you can launch the game and play it on a bigger screen.</p>
|
63 |
-
<li><b>Q4: What are the best characters and costumes in The Spike?</b></li>
|
64 |
-
<p>A4: The best characters and costumes in The Spike depend on your personal preference and playstyle. However, some of the most popular ones are:</p>
|
65 |
-
<table>
|
66 |
-
<tr><th>Character</th><th>Costume</th><th>Special Skill</th></tr>
|
67 |
-
<tr><td>Ryu</td><td>Ninja</td><td>Shadow Clone: Creates a clone that spikes with him</td></tr>
|
68 |
-
<tr><td>Lisa</td><td>Cheerleader</td><td>Cheer Up: Increases her team's speed and power</td></tr>
|
69 |
-
<tr><td>Jay</td><td>Pirate</td><td>Cannonball: Launches a powerful spike that stuns the opponent</td></tr>
|
70 |
-
<tr><td>Sophia</td><td>Angel</td><td>Heaven's Blessing: Heals her team's HP and increases their accuracy</td></tr>
|
71 |
-
<tr><td>Kai</td><td>Demon</td><td>Hell's Curse: Reduces the opponent's HP and accuracy</td></tr>
|
72 |
-
</table>
|
73 |
-
<li><b>Q5: How to get more money and gems in The Spike?</b></li>
|
74 |
-
<p>A5: With The Spike mod apk, you don't need to worry about getting more money and gems as you will have unlimited amounts of them. However, if you want to earn them legitimately, you can do so by completing story mode chapters, winning matches, completing achievements, watching ads, or buying them with real money.</p> <p>I hope you enjoyed reading this article and learned something new about The Spike mod apk. If you have any questions, comments, or feedback, please feel free to leave them below. We would love to hear from you and help you out. Thank you for your time and attention!</p> 197e85843d<br />
|
75 |
-
<br />
|
76 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Experience the Adventure of One Piece with Haki Legend APK for Android and iOS.md
DELETED
@@ -1,108 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>One Piece Haki Legend APK: A New Adventure in the Grand Line</h1>
|
3 |
-
<p>If you are a fan of One Piece, the popular manga and anime series by Eiichiro Oda, you will love One Piece Haki Legend APK, a new mobile game that lets you experience the thrilling adventures of Luffy and his crew in the Grand Line. In this game, you can create your own pirate crew, fight against enemies, explore islands, and discover the secrets of haki, the mysterious power that grants superhuman abilities to those who master it.</p>
|
4 |
-
<h2>one piece haki legend apk</h2><br /><p><b><b>Download File</b> ✯ <a href="https://jinyurl.com/2uNPq1">https://jinyurl.com/2uNPq1</a></b></p><br /><br />
|
5 |
-
<h2>What is One Piece Haki Legend APK?</h2>
|
6 |
-
<p>One Piece Haki Legend APK is a role-playing game based on the One Piece franchise. It is developed by VDOMDHTMLtml, a Chinese game studio that specializes in anime-themed games. The game was released in September 2023 for Android devices, and it has received positive reviews from players and critics alike. The game is not available on Google Play Store, but you can download it from the official website.</p>
|
7 |
-
<h3>Features of One Piece Haki Legend APK</h3>
|
8 |
-
<p>One Piece Haki Legend APK has many features that make it stand out from other One Piece games. Here are some of them:</p>
|
9 |
-
<h4>Stunning graphics and animations</h4>
|
10 |
-
<p>The game boasts high-quality graphics and animations that capture the essence of the original manga and anime. The characters are designed with great detail and accuracy, and they have expressive facial expressions and movements. The environments are also rich and colorful, with dynamic weather effects and lighting. The game also has cinematic cutscenes that enhance the story and immerse you in the world of One Piece.</p>
|
11 |
-
<p>one piece haki legend mobile download<br />
|
12 |
-
haki legend mobile gameplay android ios<br />
|
13 |
-
one piece rpg game haki legend apk<br />
|
14 |
-
haki legend mobile grand open apk<br />
|
15 |
-
one piece haki legend mod apk unlimited money<br />
|
16 |
-
haki legend mobile one piece online game<br />
|
17 |
-
one piece haki legend apk latest version<br />
|
18 |
-
haki legend mobile free download for android<br />
|
19 |
-
one piece haki legend hack apk no root<br />
|
20 |
-
haki legend mobile best characters and skills<br />
|
21 |
-
one piece haki legend apk english version<br />
|
22 |
-
haki legend mobile tips and tricks guide<br />
|
23 |
-
one piece haki legend review and rating<br />
|
24 |
-
haki legend mobile how to get devil fruits<br />
|
25 |
-
one piece haki legend apk offline mode<br />
|
26 |
-
haki legend mobile update and patch notes<br />
|
27 |
-
one piece haki legend cheats and codes<br />
|
28 |
-
haki legend mobile system requirements and compatibility<br />
|
29 |
-
one piece haki legend apk obb data file<br />
|
30 |
-
haki legend mobile pvp and guild features<br />
|
31 |
-
one piece haki legend apk pure download link<br />
|
32 |
-
haki legend mobile story and quests walkthrough<br />
|
33 |
-
one piece haki legend apk mirror direct download<br />
|
34 |
-
haki legend mobile support and feedback forum<br />
|
35 |
-
one piece haki legend apk for pc windows 10<br />
|
36 |
-
haki legend mobile redeem codes and coupons<br />
|
37 |
-
one piece haki legend apk mod menu unlocked<br />
|
38 |
-
haki legend mobile events and rewards list<br />
|
39 |
-
one piece haki legend apk uptodown safe download<br />
|
40 |
-
haki legend mobile wiki and database information<br />
|
41 |
-
one piece haki legend apk rexdl fast download<br />
|
42 |
-
haki legend mobile tier list and rankings<br />
|
43 |
-
one piece haki legend apk revdl secure download<br />
|
44 |
-
haki legend mobile facebook and twitter page<br />
|
45 |
-
one piece haki legend apk happymod easy download<br />
|
46 |
-
haki legend mobile discord and reddit community<br />
|
47 |
-
one piece haki legend apk an1.com verified download<br />
|
48 |
-
haki legend mobile youtube and twitch channel<br />
|
49 |
-
one piece haki legend apk apkpure.com trusted download<br />
|
50 |
-
haki legend mobile google play and app store link</p>
|
51 |
-
<h4>Original story and characters</h4>
|
52 |
-
<p>The game follows an original story that is faithful to the canon of One Piece. You will meet familiar characters such as Luffy, Zoro, Nami, Sanji, Chopper, Robin, Franky, Brook, Law, Sabo, Ace, Shanks, Mihawk, Doflamingo, Kaido, Big Mom, Blackbeard, and many more. You will also encounter new characters that are exclusive to the game, such as your own pirate crew members and allies. The game also features original voice acting from the Japanese cast of the anime.</p>
|
53 |
-
<h4>Various game modes and challenges</h4>
|
54 |
-
<p>The game offers a variety of game modes and challenges that will keep you entertained for hours. You can play the main story mode, where you will follow the plot of the game and complete missions. You can also play the side story mode, where you will explore different islands and scenarios that are not related to the main story. You can also play the arena mode, where you will compete against other players in real-time battles. You can also play the guild mode, where you will join forces with other players in co-op missions.</p>
|
55 |
-
<h4>Real-time battles and co-op missions</h4>
|
56 |
-
<p>The game features real-time battles that are fast-paced and exciting. You can control up to four characters at once, each with their own skills and abilities. You can switch between them at any time during combat, depending on the situation. You can also use haki skills to unleash powerful attacks or defend yourself from enemy attacks. The game also supports co-op missions, where you can team up with other players online to take down bosses or complete <h4>Customize your own pirate crew</h4>
|
57 |
-
<p>The game allows you to customize your own pirate crew, with up to 12 members. You can choose from a variety of characters, each with their own personality, appearance, and skills. You can also equip them with different weapons, accessories, and costumes. You can also name your crew and design your own pirate flag. You can also interact with your crew members and increase their friendship level, which will unlock more dialogue and events.</p>
|
58 |
-
<h2>How to download and install One Piece Haki Legend APK?</h2>
|
59 |
-
<p>If you want to play One Piece Haki Legend APK, you will need to download and install it on your Android device. Here are the steps you need to follow:</p>
|
60 |
-
<h3>Step 1: Download the APK file from the official website</h3>
|
61 |
-
<p>The first thing you need to do is to download the APK file from the official website. The file size is about 1.5 GB, so make sure you have enough storage space and a stable internet connection. You can also scan the QR code on the website to download the file directly to your device.</p>
|
62 |
-
<h3>Step 2: Enable unknown sources on your device</h3>
|
63 |
-
<p>The next thing you need to do is to enable unknown sources on your device. This will allow you to install apps that are not from the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on. You may also need to confirm this action by tapping OK or Allow.</p>
|
64 |
-
<h3>Step 3: Install the APK file and launch the game</h3>
|
65 |
-
<p>The final thing you need to do is to install the APK file and launch the game. To do this, locate the downloaded file in your file manager and tap on it. You may also need to grant some permissions to the app by tapping Install or Accept. Once the installation is complete, you can tap Open or Launch to start the game. You may also need to download some additional data before you can play the game.</p>
|
66 |
-
<h2>Tips and tricks for playing One Piece Haki Legend APK</h2>
|
67 |
-
<p>Now that you have downloaded and installed One Piece Haki Legend APK, you may want some tips and tricks to help you play the game better. Here are some of them:</p>
|
68 |
-
<h3>Learn the basics of combat and haki skills</h3>
|
69 |
-
<p>One of the most important aspects of the game is combat. You will need to learn how to use your characters' skills and haki skills effectively. Each character has four skills: a normal attack, a special attack, a passive skill, and a haki skill. You can tap on the skill icons to activate them, or swipe on the screen to move or dodge. You can also use haki skills by tapping on the haki icon at the bottom of the screen. There are three types of haki: observation haki, armament haki, and conqueror's haki. Each type has different effects and uses, such as increasing your attack power, defense power, or stunning enemies.</p>
|
70 |
-
<h3>Upgrade your characters and equipment</h3>
|
71 |
-
<p>Another important aspect of the game is upgrading your characters and equipment. You will need to collect resources such as gold, gems, materials, and fragments to do this. You can use gold and gems to level up your characters' skills and haki skills. You can use materials to enhance your equipment's attributes and quality. You can use fragments to unlock new characters or upgrade their stars. You can obtain these resources by completing missions, events, or quests.</p>
|
72 |
-
<h3>Join a guild and participate in events</h3>
|
73 |
-
<p>A fun way to enjoy the game is to join a guild and participate in events. A guild is a group of players who share a common interest in One Piece. You can join an existing guild or create your own guild with your friends. By joining a guild, you can chat with other members, exchange gifts, request help, or donate resources. You can also participate in guild events such as guild wars, guild raids, or guild missions. These events will reward you with exclusive items and benefits.</p>
|
74 |
-
<h3>Explore the map and collect resources</h3>
|
75 |
-
<p>A great way to experience the game is to explore the map and collect resources. The map is divided into different regions, each with its own theme and scenery. You can travel between regions by using your ship or fast travel points. You can also find various resources on the map, such as chests, treasure maps, fruits, fish, or animals. These resources will help you upgrade your characters and equipment.</p>
|
76 |
-
<h2>Conclusion</h2>
|
77 |
-
<p>One Piece Haki Legend APK is a fantastic game for One Piece fans and RPG lovers alike. It has stunning graphics and animations, original story and characters, various game modes and challenges, real-time battles and co-op missions, and a lot of customization options. You can download and install the game easily by following the steps in this article. You can also use the tips and tricks in this article to improve your gameplay and have more fun. If you are looking for a new adventure in the Grand Line, One Piece Haki Legend APK is the game for you.</p>
|
78 |
-
<h2>FAQs</h2>
|
79 |
-
<p>Here are some frequently asked questions about One Piece Haki Legend APK:</p>
|
80 |
-
<table>
|
81 |
-
<tr>
|
82 |
-
<th>Question</th>
|
83 |
-
<th>Answer</th>
|
84 |
-
</tr>
|
85 |
-
<tr>
|
86 |
-
<td>Is One Piece Haki Legend APK free to play?</td>
|
87 |
-
<td>Yes, One Piece Haki Legend APK is free to play, but it also has some optional in-app purchases that can enhance your gaming experience.</td>
|
88 |
-
</tr>
|
89 |
-
<tr>
|
90 |
-
<td>Is One Piece Haki Legend APK safe to download and install?</td>
|
91 |
-
<td>Yes, One Piece Haki Legend APK is safe to download and install, as long as you use the official website or a trusted source. You should also scan the file with an antivirus program before installing it.</td>
|
92 |
-
</tr>
|
93 |
-
<tr>
|
94 |
-
<td>Is One Piece Haki Legend APK compatible with my device?</td>
|
95 |
-
<td>One Piece Haki Legend APK is compatible with most Android devices that have Android 4.4 or higher. However, some devices may not run the game smoothly or have some bugs. You can check the compatibility of your device on the official website or contact the customer service for more information.</td>
|
96 |
-
</tr>
|
97 |
-
<tr>
|
98 |
-
<td>How can I contact the customer service of One Piece Haki Legend APK?</td>
|
99 |
-
<td>You can contact the customer service of One Piece Haki Legend APK by using the in-game feedback system or by sending an email to [email protected]. You can also follow the official Facebook page or join the official Discord server for more updates and support.</td>
|
100 |
-
</tr>
|
101 |
-
<tr>
|
102 |
-
<td>How can I support the development of One Piece Haki Legend APK?</td>
|
103 |
-
<td>You can support the development of One Piece Haki Legend APK by giving it a positive rating and review on the official website or on other platforms. You can also share the game with your friends and family, or make a donation through the in-game store.</td>
|
104 |
-
</tr>
|
105 |
-
</table>
|
106 |
-
: https://www.onepiecehakilegend.com/ : https://www.facebook.com/OnePieceHakiLegend : https://discord.gg/onepiecehakilegend</p> 197e85843d<br />
|
107 |
-
<br />
|
108 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/layers_new.py
DELETED
@@ -1,125 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch import nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
|
5 |
-
from . import spec_utils
|
6 |
-
|
7 |
-
|
8 |
-
class Conv2DBNActiv(nn.Module):
|
9 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
10 |
-
super(Conv2DBNActiv, self).__init__()
|
11 |
-
self.conv = nn.Sequential(
|
12 |
-
nn.Conv2d(
|
13 |
-
nin,
|
14 |
-
nout,
|
15 |
-
kernel_size=ksize,
|
16 |
-
stride=stride,
|
17 |
-
padding=pad,
|
18 |
-
dilation=dilation,
|
19 |
-
bias=False,
|
20 |
-
),
|
21 |
-
nn.BatchNorm2d(nout),
|
22 |
-
activ(),
|
23 |
-
)
|
24 |
-
|
25 |
-
def __call__(self, x):
|
26 |
-
return self.conv(x)
|
27 |
-
|
28 |
-
|
29 |
-
class Encoder(nn.Module):
|
30 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
|
31 |
-
super(Encoder, self).__init__()
|
32 |
-
self.conv1 = Conv2DBNActiv(nin, nout, ksize, stride, pad, activ=activ)
|
33 |
-
self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ)
|
34 |
-
|
35 |
-
def __call__(self, x):
|
36 |
-
h = self.conv1(x)
|
37 |
-
h = self.conv2(h)
|
38 |
-
|
39 |
-
return h
|
40 |
-
|
41 |
-
|
42 |
-
class Decoder(nn.Module):
|
43 |
-
def __init__(
|
44 |
-
self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
|
45 |
-
):
|
46 |
-
super(Decoder, self).__init__()
|
47 |
-
self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
48 |
-
# self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ)
|
49 |
-
self.dropout = nn.Dropout2d(0.1) if dropout else None
|
50 |
-
|
51 |
-
def __call__(self, x, skip=None):
|
52 |
-
x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
|
53 |
-
|
54 |
-
if skip is not None:
|
55 |
-
skip = spec_utils.crop_center(skip, x)
|
56 |
-
x = torch.cat([x, skip], dim=1)
|
57 |
-
|
58 |
-
h = self.conv1(x)
|
59 |
-
# h = self.conv2(h)
|
60 |
-
|
61 |
-
if self.dropout is not None:
|
62 |
-
h = self.dropout(h)
|
63 |
-
|
64 |
-
return h
|
65 |
-
|
66 |
-
|
67 |
-
class ASPPModule(nn.Module):
|
68 |
-
def __init__(self, nin, nout, dilations=(4, 8, 12), activ=nn.ReLU, dropout=False):
|
69 |
-
super(ASPPModule, self).__init__()
|
70 |
-
self.conv1 = nn.Sequential(
|
71 |
-
nn.AdaptiveAvgPool2d((1, None)),
|
72 |
-
Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ),
|
73 |
-
)
|
74 |
-
self.conv2 = Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ)
|
75 |
-
self.conv3 = Conv2DBNActiv(
|
76 |
-
nin, nout, 3, 1, dilations[0], dilations[0], activ=activ
|
77 |
-
)
|
78 |
-
self.conv4 = Conv2DBNActiv(
|
79 |
-
nin, nout, 3, 1, dilations[1], dilations[1], activ=activ
|
80 |
-
)
|
81 |
-
self.conv5 = Conv2DBNActiv(
|
82 |
-
nin, nout, 3, 1, dilations[2], dilations[2], activ=activ
|
83 |
-
)
|
84 |
-
self.bottleneck = Conv2DBNActiv(nout * 5, nout, 1, 1, 0, activ=activ)
|
85 |
-
self.dropout = nn.Dropout2d(0.1) if dropout else None
|
86 |
-
|
87 |
-
def forward(self, x):
|
88 |
-
_, _, h, w = x.size()
|
89 |
-
feat1 = F.interpolate(
|
90 |
-
self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
|
91 |
-
)
|
92 |
-
feat2 = self.conv2(x)
|
93 |
-
feat3 = self.conv3(x)
|
94 |
-
feat4 = self.conv4(x)
|
95 |
-
feat5 = self.conv5(x)
|
96 |
-
out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
|
97 |
-
out = self.bottleneck(out)
|
98 |
-
|
99 |
-
if self.dropout is not None:
|
100 |
-
out = self.dropout(out)
|
101 |
-
|
102 |
-
return out
|
103 |
-
|
104 |
-
|
105 |
-
class LSTMModule(nn.Module):
|
106 |
-
def __init__(self, nin_conv, nin_lstm, nout_lstm):
|
107 |
-
super(LSTMModule, self).__init__()
|
108 |
-
self.conv = Conv2DBNActiv(nin_conv, 1, 1, 1, 0)
|
109 |
-
self.lstm = nn.LSTM(
|
110 |
-
input_size=nin_lstm, hidden_size=nout_lstm // 2, bidirectional=True
|
111 |
-
)
|
112 |
-
self.dense = nn.Sequential(
|
113 |
-
nn.Linear(nout_lstm, nin_lstm), nn.BatchNorm1d(nin_lstm), nn.ReLU()
|
114 |
-
)
|
115 |
-
|
116 |
-
def forward(self, x):
|
117 |
-
N, _, nbins, nframes = x.size()
|
118 |
-
h = self.conv(x)[:, 0] # N, nbins, nframes
|
119 |
-
h = h.permute(2, 0, 1) # nframes, N, nbins
|
120 |
-
h, _ = self.lstm(h)
|
121 |
-
h = self.dense(h.reshape(-1, h.size()[-1])) # nframes * N, nbins
|
122 |
-
h = h.reshape(nframes, N, 1, nbins)
|
123 |
-
h = h.permute(1, 2, 3, 0)
|
124 |
-
|
125 |
-
return h
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/A666sxr/Genshin_TTS/preprocess.py
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import text
|
3 |
-
from utils import load_filepaths_and_text
|
4 |
-
|
5 |
-
if __name__ == '__main__':
|
6 |
-
parser = argparse.ArgumentParser()
|
7 |
-
parser.add_argument("--out_extension", default="cleaned")
|
8 |
-
parser.add_argument("--text_index", default=1, type=int)
|
9 |
-
parser.add_argument("--filelists", nargs="+", default=["filelists/ljs_audio_text_val_filelist.txt", "filelists/ljs_audio_text_test_filelist.txt"])
|
10 |
-
parser.add_argument("--text_cleaners", nargs="+", default=["english_cleaners2"])
|
11 |
-
|
12 |
-
args = parser.parse_args()
|
13 |
-
|
14 |
-
|
15 |
-
for filelist in args.filelists:
|
16 |
-
print("START:", filelist)
|
17 |
-
filepaths_and_text = load_filepaths_and_text(filelist)
|
18 |
-
for i in range(len(filepaths_and_text)):
|
19 |
-
original_text = filepaths_and_text[i][args.text_index]
|
20 |
-
cleaned_text = text._clean_text(original_text, args.text_cleaners)
|
21 |
-
filepaths_and_text[i][args.text_index] = cleaned_text
|
22 |
-
|
23 |
-
new_filelist = filelist + "." + args.out_extension
|
24 |
-
with open(new_filelist, "w", encoding="utf-8") as f:
|
25 |
-
f.writelines(["|".join(x) + "\n" for x in filepaths_and_text])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/optim/__init__.py
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
"""Optimization stuff. In particular, optimizers (DAdaptAdam), schedulers
|
7 |
-
and Exponential Moving Average.
|
8 |
-
"""
|
9 |
-
|
10 |
-
# flake8: noqa
|
11 |
-
from .cosine_lr_scheduler import CosineLRScheduler
|
12 |
-
from .dadam import DAdaptAdam
|
13 |
-
from .inverse_sqrt_lr_scheduler import InverseSquareRootLRScheduler
|
14 |
-
from .linear_warmup_lr_scheduler import LinearWarmupLRScheduler
|
15 |
-
from .polynomial_decay_lr_scheduler import PolynomialDecayLRScheduler
|
16 |
-
from .ema import ModuleDictEMA
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/riffusion-playground/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Riffusion Playground
|
3 |
-
emoji: 📚
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: purple
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.15.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
duplicated_from: riffusion/riffusion-playground
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/parallel_wavegan/optimizers/__init__.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
from torch.optim import * # NOQA
|
2 |
-
from .radam import * # NOQA
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/GPTalk.py
DELETED
@@ -1,83 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import secrets, time, json
|
4 |
-
from aiohttp import ClientSession
|
5 |
-
from typing import AsyncGenerator
|
6 |
-
|
7 |
-
from .base_provider import AsyncGeneratorProvider
|
8 |
-
from .helper import format_prompt
|
9 |
-
|
10 |
-
|
11 |
-
class GPTalk(AsyncGeneratorProvider):
|
12 |
-
url = "https://gptalk.net"
|
13 |
-
supports_gpt_35_turbo = True
|
14 |
-
working = True
|
15 |
-
_auth = None
|
16 |
-
|
17 |
-
@classmethod
|
18 |
-
async def create_async_generator(
|
19 |
-
cls,
|
20 |
-
model: str,
|
21 |
-
messages: list[dict[str, str]],
|
22 |
-
**kwargs
|
23 |
-
) -> AsyncGenerator:
|
24 |
-
if not model:
|
25 |
-
model = "gpt-3.5-turbo"
|
26 |
-
timestamp = int(time.time())
|
27 |
-
headers = {
|
28 |
-
'authority': 'gptalk.net',
|
29 |
-
'accept': '*/*',
|
30 |
-
'accept-language': 'de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US;q=0.6,nl;q=0.5,zh-CN;q=0.4,zh-TW;q=0.3,zh;q=0.2',
|
31 |
-
'content-type': 'application/json',
|
32 |
-
'origin': 'https://gptalk.net',
|
33 |
-
'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
|
34 |
-
'sec-ch-ua-mobile': '?0',
|
35 |
-
'sec-ch-ua-platform': '"Linux"',
|
36 |
-
'sec-fetch-dest': 'empty',
|
37 |
-
'sec-fetch-mode': 'cors',
|
38 |
-
'sec-fetch-site': 'same-origin',
|
39 |
-
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
|
40 |
-
'x-auth-appid': '2229',
|
41 |
-
'x-auth-openid': '',
|
42 |
-
'x-auth-platform': '',
|
43 |
-
'x-auth-timestamp': f"{timestamp}",
|
44 |
-
}
|
45 |
-
async with ClientSession(headers=headers) as session:
|
46 |
-
if not cls._auth or cls._auth["expires_at"] < timestamp:
|
47 |
-
data = {
|
48 |
-
"fingerprint": secrets.token_hex(16).zfill(32),
|
49 |
-
"platform": "fingerprint"
|
50 |
-
}
|
51 |
-
async with session.post(cls.url + "/api/chatgpt/user/login", json=data) as response:
|
52 |
-
response.raise_for_status()
|
53 |
-
cls._auth = (await response.json())["data"]
|
54 |
-
data = {
|
55 |
-
"content": format_prompt(messages),
|
56 |
-
"accept": "stream",
|
57 |
-
"from": 1,
|
58 |
-
"model": model,
|
59 |
-
"is_mobile": 0,
|
60 |
-
"user_agent": headers["user-agent"],
|
61 |
-
"is_open_ctx": 0,
|
62 |
-
"prompt": "",
|
63 |
-
"roid": 111,
|
64 |
-
"temperature": 0,
|
65 |
-
"ctx_msg_count": 3,
|
66 |
-
"created_at": timestamp
|
67 |
-
}
|
68 |
-
headers = {
|
69 |
-
'authorization': f'Bearer {cls._auth["token"]}',
|
70 |
-
}
|
71 |
-
async with session.post(cls.url + "/api/chatgpt/chatapi/text", json=data, headers=headers) as response:
|
72 |
-
response.raise_for_status()
|
73 |
-
token = (await response.json())["data"]["token"]
|
74 |
-
last_message = ""
|
75 |
-
async with session.get(cls.url + "/api/chatgpt/chatapi/stream", params={"token": token}) as response:
|
76 |
-
response.raise_for_status()
|
77 |
-
async for line in response.content:
|
78 |
-
if line.startswith(b"data: "):
|
79 |
-
if line.startswith(b"data: [DONE]"):
|
80 |
-
break
|
81 |
-
message = json.loads(line[6:-1])["content"]
|
82 |
-
yield message[len(last_message):]
|
83 |
-
last_message = message
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aditya9790/yolo7-object-tracking/deploy/triton-inference-server/labels.py
DELETED
@@ -1,83 +0,0 @@
|
|
1 |
-
from enum import Enum
|
2 |
-
|
3 |
-
class COCOLabels(Enum):
|
4 |
-
PERSON = 0
|
5 |
-
BICYCLE = 1
|
6 |
-
CAR = 2
|
7 |
-
MOTORBIKE = 3
|
8 |
-
AEROPLANE = 4
|
9 |
-
BUS = 5
|
10 |
-
TRAIN = 6
|
11 |
-
TRUCK = 7
|
12 |
-
BOAT = 8
|
13 |
-
TRAFFIC_LIGHT = 9
|
14 |
-
FIRE_HYDRANT = 10
|
15 |
-
STOP_SIGN = 11
|
16 |
-
PARKING_METER = 12
|
17 |
-
BENCH = 13
|
18 |
-
BIRD = 14
|
19 |
-
CAT = 15
|
20 |
-
DOG = 16
|
21 |
-
HORSE = 17
|
22 |
-
SHEEP = 18
|
23 |
-
COW = 19
|
24 |
-
ELEPHANT = 20
|
25 |
-
BEAR = 21
|
26 |
-
ZEBRA = 22
|
27 |
-
GIRAFFE = 23
|
28 |
-
BACKPACK = 24
|
29 |
-
UMBRELLA = 25
|
30 |
-
HANDBAG = 26
|
31 |
-
TIE = 27
|
32 |
-
SUITCASE = 28
|
33 |
-
FRISBEE = 29
|
34 |
-
SKIS = 30
|
35 |
-
SNOWBOARD = 31
|
36 |
-
SPORTS_BALL = 32
|
37 |
-
KITE = 33
|
38 |
-
BASEBALL_BAT = 34
|
39 |
-
BASEBALL_GLOVE = 35
|
40 |
-
SKATEBOARD = 36
|
41 |
-
SURFBOARD = 37
|
42 |
-
TENNIS_RACKET = 38
|
43 |
-
BOTTLE = 39
|
44 |
-
WINE_GLASS = 40
|
45 |
-
CUP = 41
|
46 |
-
FORK = 42
|
47 |
-
KNIFE = 43
|
48 |
-
SPOON = 44
|
49 |
-
BOWL = 45
|
50 |
-
BANANA = 46
|
51 |
-
APPLE = 47
|
52 |
-
SANDWICH = 48
|
53 |
-
ORANGE = 49
|
54 |
-
BROCCOLI = 50
|
55 |
-
CARROT = 51
|
56 |
-
HOT_DOG = 52
|
57 |
-
PIZZA = 53
|
58 |
-
DONUT = 54
|
59 |
-
CAKE = 55
|
60 |
-
CHAIR = 56
|
61 |
-
SOFA = 57
|
62 |
-
POTTEDPLANT = 58
|
63 |
-
BED = 59
|
64 |
-
DININGTABLE = 60
|
65 |
-
TOILET = 61
|
66 |
-
TVMONITOR = 62
|
67 |
-
LAPTOP = 63
|
68 |
-
MOUSE = 64
|
69 |
-
REMOTE = 65
|
70 |
-
KEYBOARD = 66
|
71 |
-
CELL_PHONE = 67
|
72 |
-
MICROWAVE = 68
|
73 |
-
OVEN = 69
|
74 |
-
TOASTER = 70
|
75 |
-
SINK = 71
|
76 |
-
REFRIGERATOR = 72
|
77 |
-
BOOK = 73
|
78 |
-
CLOCK = 74
|
79 |
-
VASE = 75
|
80 |
-
SCISSORS = 76
|
81 |
-
TEDDY_BEAR = 77
|
82 |
-
HAIR_DRIER = 78
|
83 |
-
TOOTHBRUSH = 79
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlanMars/QYL-AI-Space/app.py
DELETED
@@ -1,631 +0,0 @@
|
|
1 |
-
# -*- coding:utf-8 -*-
|
2 |
-
import os
|
3 |
-
import logging
|
4 |
-
import sys
|
5 |
-
|
6 |
-
import gradio as gr
|
7 |
-
|
8 |
-
from modules import config
|
9 |
-
from modules.config import *
|
10 |
-
from modules.utils import *
|
11 |
-
from modules.presets import *
|
12 |
-
from modules.overwrites import *
|
13 |
-
from modules.models.models import get_model
|
14 |
-
|
15 |
-
gr.Chatbot._postprocess_chat_messages = postprocess_chat_messages
|
16 |
-
gr.Chatbot.postprocess = postprocess
|
17 |
-
|
18 |
-
PromptHelper.compact_text_chunks = compact_text_chunks
|
19 |
-
|
20 |
-
with open("assets/custom.css", "r", encoding="utf-8") as f:
|
21 |
-
customCSS = f.read()
|
22 |
-
|
23 |
-
|
24 |
-
def create_new_model(current_system_prompt="You are a helpful assistant."):
|
25 |
-
return get_model(model_name=MODELS[DEFAULT_MODEL], access_key=my_api_key, system_prompt=current_system_prompt)[0]
|
26 |
-
|
27 |
-
|
28 |
-
def get_system_prompt_content_by_role_name(role_name: str) -> str:
|
29 |
-
template_name_list = get_template_names(plain=True)
|
30 |
-
template_role_prompt_dict = load_template(template_name_list[0], mode=2) # [act:prompt]
|
31 |
-
prompt_content = template_role_prompt_dict[role_name]
|
32 |
-
return prompt_content
|
33 |
-
|
34 |
-
|
35 |
-
def get_role_name_by_id(prompt_id) -> str:
|
36 |
-
template_name_list = get_template_names(plain=True)
|
37 |
-
template_id_role_dict = load_template(template_name_list[0], mode=3) # [id:act])
|
38 |
-
role_name = template_id_role_dict[prompt_id]
|
39 |
-
return role_name
|
40 |
-
|
41 |
-
|
42 |
-
def get_user_key_by_user_name(user_login_name: str) -> str:
|
43 |
-
user_key_pairs_dict = {row[0]: row[1] for row in user_key_pairs_list}
|
44 |
-
|
45 |
-
return user_key_pairs_dict[user_login_name]
|
46 |
-
|
47 |
-
|
48 |
-
with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
49 |
-
# Session State
|
50 |
-
user_name = gr.State("")
|
51 |
-
user_question = gr.State("")
|
52 |
-
user_topic = gr.State(i18n("未命名对话历史记录"))
|
53 |
-
|
54 |
-
assert type(my_api_key) == str
|
55 |
-
user_api_key = gr.State(my_api_key)
|
56 |
-
current_model = gr.State(create_new_model())
|
57 |
-
current_prompt_template = gr.State(load_template(get_template_names(plain=True)[0], mode=2))
|
58 |
-
|
59 |
-
# Header
|
60 |
-
with gr.Row():
|
61 |
-
gr.HTML(CHUANHU_TITLE, elem_id="app_title")
|
62 |
-
status_display = gr.Markdown(get_geoip(), elem_id="status_display")
|
63 |
-
with gr.Row(elem_id="float_display", visible=True):
|
64 |
-
user_info = gr.Markdown(value="getting user info...", elem_id="user_info")
|
65 |
-
|
66 |
-
# Body
|
67 |
-
with gr.Row().style(equal_height=True):
|
68 |
-
# Left Panel
|
69 |
-
with gr.Column(scale=5):
|
70 |
-
with gr.Row():
|
71 |
-
chatbot = gr.Chatbot(label="QYL Chat", elem_id="chuanhu_chatbot").style(height="100%")
|
72 |
-
with gr.Row():
|
73 |
-
with gr.Column(min_width=225, scale=12):
|
74 |
-
user_input = gr.Textbox(
|
75 |
-
elem_id="user_input_tb",
|
76 |
-
show_label=False, placeholder=i18n("在这里输入")
|
77 |
-
).style(container=False)
|
78 |
-
with gr.Column(min_width=42, scale=1):
|
79 |
-
submitBtn = gr.Button(value="", variant="primary", elem_id="submit_btn")
|
80 |
-
cancelBtn = gr.Button(value="", variant="secondary", visible=False, elem_id="cancel_btn")
|
81 |
-
with gr.Row():
|
82 |
-
emptyBtn = gr.Button(
|
83 |
-
i18n("🧹 新的对话"), elem_id="empty_btn"
|
84 |
-
)
|
85 |
-
retryBtn = gr.Button(i18n("🔄 重新生成"))
|
86 |
-
delFirstBtn = gr.Button(i18n("🗑️ 删除最旧对话"), visible=False)
|
87 |
-
delLastBtn = gr.Button(i18n("🗑️ 删除最新对话"), visible=False)
|
88 |
-
with gr.Row(visible=False) as like_dislike_area:
|
89 |
-
with gr.Column(min_width=20, scale=1):
|
90 |
-
likeBtn = gr.Button(i18n("👍"))
|
91 |
-
with gr.Column(min_width=20, scale=1):
|
92 |
-
dislikeBtn = gr.Button(i18n("👎"))
|
93 |
-
# Right Panel
|
94 |
-
with gr.Column():
|
95 |
-
with gr.Column(min_width=50, scale=1):
|
96 |
-
with gr.Tab(label=i18n("对话")):
|
97 |
-
# with gr.Accordion(label=i18n("保存/加载对话历史记录"), open=True):
|
98 |
-
with gr.Accordion(label=i18n("加载对话历史记录"), open=True):
|
99 |
-
with gr.Column():
|
100 |
-
with gr.Row():
|
101 |
-
with gr.Column(scale=6):
|
102 |
-
historyFileSelectDropdown = gr.Dropdown(
|
103 |
-
label=i18n("从列表中加载对话"),
|
104 |
-
choices=get_history_names(plain=True),
|
105 |
-
multiselect=False
|
106 |
-
).style(container=False)
|
107 |
-
with gr.Column(scale=1):
|
108 |
-
historyRefreshBtn = gr.Button(i18n("🔄 刷新"))
|
109 |
-
with gr.Row(visible=False):
|
110 |
-
with gr.Column(scale=6):
|
111 |
-
saveFileName = gr.Textbox(
|
112 |
-
show_label=True,
|
113 |
-
placeholder=i18n("设置文件名: 默认为.json,可选为.md"),
|
114 |
-
label=i18n("设置保存文件名"),
|
115 |
-
value=i18n("对话历史记录"),
|
116 |
-
).style(container=True)
|
117 |
-
with gr.Column(scale=1):
|
118 |
-
saveHistoryBtn = gr.Button(i18n("💾 保存对话"))
|
119 |
-
exportMarkdownBtn = gr.Button(i18n("📝 导出为Markdown"))
|
120 |
-
gr.Markdown(i18n("默认保存于history文件夹"))
|
121 |
-
with gr.Row(visible=False):
|
122 |
-
with gr.Column():
|
123 |
-
downloadFile = gr.File(interactive=True)
|
124 |
-
|
125 |
-
with gr.Tab(label=i18n("智人")):
|
126 |
-
systemPromptTxt = gr.Textbox(
|
127 |
-
show_label=True,
|
128 |
-
placeholder=i18n(f"在这里输入System Prompt...\n\n Current system prompt: {INITIAL_SYSTEM_PROMPT}"),
|
129 |
-
label="System prompt",
|
130 |
-
value=INITIAL_SYSTEM_PROMPT,
|
131 |
-
lines=12,
|
132 |
-
visible=False,
|
133 |
-
).style(container=False)
|
134 |
-
with gr.Accordion(label=i18n("植入角色"), open=True):
|
135 |
-
with gr.Column():
|
136 |
-
with gr.Row():
|
137 |
-
with gr.Column(scale=6):
|
138 |
-
templateFileSelectDropdown = gr.Dropdown(
|
139 |
-
label=i18n("选择分类"),
|
140 |
-
choices=[os.path.splitext(f)[0] for f in get_template_names(plain=True)],
|
141 |
-
multiselect=False,
|
142 |
-
value=os.path.splitext(get_template_names(plain=True)[0])[0]
|
143 |
-
).style(container=False)
|
144 |
-
with gr.Column(scale=1):
|
145 |
-
templateRefreshBtn = gr.Button(i18n("🔄 刷新"))
|
146 |
-
with gr.Row():
|
147 |
-
with gr.Column():
|
148 |
-
templateSelectDropdown = gr.Dropdown(
|
149 |
-
label=i18n("选择职能"),
|
150 |
-
choices=load_template(get_template_names(plain=True)[0], mode=1),
|
151 |
-
multiselect=False,
|
152 |
-
value=load_template(get_template_names(plain=True)[0], mode=1)[0]
|
153 |
-
).style(container=False)
|
154 |
-
model_select_dropdown = gr.Dropdown(
|
155 |
-
label=i18n("选择模型"), choices=MODELS, multiselect=False, value=MODELS[DEFAULT_MODEL],
|
156 |
-
interactive=True, visible=False
|
157 |
-
)
|
158 |
-
lora_select_dropdown = gr.Dropdown(
|
159 |
-
label=i18n("选择LoRA模型"), choices=[], multiselect=False, interactive=True, visible=False
|
160 |
-
)
|
161 |
-
with gr.Row():
|
162 |
-
use_streaming_checkbox = gr.Checkbox(label=i18n("实时传输回答"), value=True, visible=ENABLE_STREAMING_OPTION, interactive=False)
|
163 |
-
single_turn_checkbox = gr.Checkbox(label=i18n("单轮对话"), value=False, visible=False)
|
164 |
-
use_websearch_checkbox = gr.Checkbox(label=i18n("使用在线搜索"), value=False, visible=False)
|
165 |
-
# render_latex_checkbox = gr.Checkbox(label=i18n("渲染LaTeX公式"), value=render_latex, interactive=True, elem_id="render_latex_checkbox")
|
166 |
-
language_select_dropdown = gr.Dropdown(
|
167 |
-
label=i18n("选择回复语言(针对搜索&索引功能)"),
|
168 |
-
choices=REPLY_LANGUAGES,
|
169 |
-
multiselect=False,
|
170 |
-
value=REPLY_LANGUAGES[0],
|
171 |
-
visible=False
|
172 |
-
)
|
173 |
-
index_files = gr.Files(label=i18n("上传"), type="file", visible=False)
|
174 |
-
two_column = gr.Checkbox(label=i18n("双栏pdf"), value=advance_docs["pdf"].get("two_column", False),
|
175 |
-
visible=False)
|
176 |
-
# TODO: 公式ocr
|
177 |
-
# formula_ocr = gr.Checkbox(label=i18n("识别公式"), value=advance_docs["pdf"].get("formula_ocr", False))
|
178 |
-
|
179 |
-
with gr.Tab(label=i18n("高级")):
|
180 |
-
# gr.Markdown(i18n("# ⚠️ 务必谨慎更改 ⚠️\n\n如果无法使用请恢复默认设置"))
|
181 |
-
# gr.HTML(APPEARANCE_SWITCHER, elem_classes="insert_block")
|
182 |
-
keyTxt = gr.Textbox(
|
183 |
-
show_label=True,
|
184 |
-
placeholder=f"Your API-key...",
|
185 |
-
value=hide_middle_chars(user_api_key.value),
|
186 |
-
type="password",
|
187 |
-
visible=not HIDE_MY_KEY,
|
188 |
-
label="API-Key",
|
189 |
-
)
|
190 |
-
if multi_api_key:
|
191 |
-
usageTxt = gr.Markdown(i18n("多账号模式已开启,无需输入key,可直接开始对话"), elem_id="usage_display",
|
192 |
-
elem_classes="insert_block")
|
193 |
-
else:
|
194 |
-
usageTxt = gr.Markdown(i18n("**发送消息** 或 **提交key** 以显示额度"), elem_id="usage_display",
|
195 |
-
elem_classes="insert_block")
|
196 |
-
with gr.Accordion(i18n("参数"), open=True):
|
197 |
-
temperature_slider = gr.Slider(
|
198 |
-
minimum=-0,
|
199 |
-
maximum=2.0,
|
200 |
-
value=1.0,
|
201 |
-
step=0.1,
|
202 |
-
interactive=True,
|
203 |
-
label=i18n("创意度")
|
204 |
-
)
|
205 |
-
top_p_slider = gr.Slider(
|
206 |
-
minimum=-0,
|
207 |
-
maximum=1.0,
|
208 |
-
value=1.0,
|
209 |
-
step=0.05,
|
210 |
-
interactive=True,
|
211 |
-
label=i18n("top-p"),
|
212 |
-
visible=False
|
213 |
-
)
|
214 |
-
n_choices_slider = gr.Slider(
|
215 |
-
minimum=1,
|
216 |
-
maximum=10,
|
217 |
-
value=1,
|
218 |
-
step=1,
|
219 |
-
interactive=True,
|
220 |
-
label=i18n("n choices"),
|
221 |
-
visible=False
|
222 |
-
)
|
223 |
-
stop_sequence_txt = gr.Textbox(
|
224 |
-
show_label=True,
|
225 |
-
placeholder=i18n("在这里输入停止符,用英文逗号隔开..."),
|
226 |
-
label="stop",
|
227 |
-
value="",
|
228 |
-
lines=1,
|
229 |
-
visible=False,
|
230 |
-
)
|
231 |
-
max_context_length_slider = gr.Slider(
|
232 |
-
minimum=1,
|
233 |
-
maximum=32768,
|
234 |
-
value=2000,
|
235 |
-
step=1,
|
236 |
-
interactive=True,
|
237 |
-
label=i18n("max context"),
|
238 |
-
visible=False
|
239 |
-
)
|
240 |
-
max_generation_slider = gr.Slider(
|
241 |
-
minimum=1,
|
242 |
-
maximum=32768,
|
243 |
-
value=1000,
|
244 |
-
step=1,
|
245 |
-
interactive=True,
|
246 |
-
label=i18n("max generations"),
|
247 |
-
visible=False
|
248 |
-
)
|
249 |
-
presence_penalty_slider = gr.Slider(
|
250 |
-
minimum=-2.0,
|
251 |
-
maximum=2.0,
|
252 |
-
value=0.0,
|
253 |
-
step=0.01,
|
254 |
-
interactive=True,
|
255 |
-
label=i18n("presence penalty"),
|
256 |
-
visible=False
|
257 |
-
)
|
258 |
-
frequency_penalty_slider = gr.Slider(
|
259 |
-
minimum=-2.0,
|
260 |
-
maximum=2.0,
|
261 |
-
value=0.0,
|
262 |
-
step=0.01,
|
263 |
-
interactive=True,
|
264 |
-
label=i18n("frequency penalty"),
|
265 |
-
visible=False
|
266 |
-
)
|
267 |
-
logit_bias_txt = gr.Textbox(
|
268 |
-
show_label=True,
|
269 |
-
placeholder=f"word:likelihood",
|
270 |
-
label="logit bias",
|
271 |
-
value="",
|
272 |
-
lines=1,
|
273 |
-
visible=False
|
274 |
-
)
|
275 |
-
user_identifier_txt = gr.Textbox(
|
276 |
-
show_label=True,
|
277 |
-
placeholder=i18n("用于定位滥用行为"),
|
278 |
-
label=i18n("用户名"),
|
279 |
-
value=user_name.value,
|
280 |
-
lines=1,
|
281 |
-
visible=False
|
282 |
-
)
|
283 |
-
|
284 |
-
with gr.Accordion(i18n("网络设置"), open=False, visible=False):
|
285 |
-
# 优先展示自定义的api_host
|
286 |
-
apihostTxt = gr.Textbox(
|
287 |
-
show_label=True,
|
288 |
-
placeholder=i18n("在这里输入API-Host..."),
|
289 |
-
label="API-Host",
|
290 |
-
value=config.api_host or shared.API_HOST,
|
291 |
-
lines=1
|
292 |
-
)
|
293 |
-
changeAPIURLBtn = gr.Button(i18n("🔄 切换API地址"))
|
294 |
-
proxyTxt = gr.Textbox(
|
295 |
-
show_label=True,
|
296 |
-
placeholder=i18n("在这里输入代理地址..."),
|
297 |
-
label=i18n("代理地址(示例:http://127.0.0.1:10809)"),
|
298 |
-
value="",
|
299 |
-
lines=2
|
300 |
-
)
|
301 |
-
changeProxyBtn = gr.Button(i18n("🔄 设置代理地址"))
|
302 |
-
default_btn = gr.Button(i18n("🔙 恢复默认设置"))
|
303 |
-
|
304 |
-
# Footer
|
305 |
-
gr.Markdown(CHUANHU_DESCRIPTION, elem_id="description")
|
306 |
-
gr.HTML(FOOTER.format(versions=versions_html()), elem_id="footer")
|
307 |
-
|
308 |
-
|
309 |
-
def create_greeting(request: gr.Request):
|
310 |
-
"""
|
311 |
-
Access username from gr.Request class. Be able to access current username from application #3259
|
312 |
-
https://github.com/gradio-app/gradio/pull/3296
|
313 |
-
"""
|
314 |
-
# Update System Prompt
|
315 |
-
show_system_prompt = False
|
316 |
-
current_user_prompts = []
|
317 |
-
current_user_api_key = ""
|
318 |
-
|
319 |
-
# Update User Profile
|
320 |
-
user_request_name = ANONYMOUS_USER # using anonymous
|
321 |
-
if hasattr(request, "username") and request.username: # is not None or is not ""
|
322 |
-
logging.info(f"Get user name from gr.Request: {request.username}")
|
323 |
-
if request.username == 'admin001' or request.username == 'admin002' or request.username == 'admin003':
|
324 |
-
show_system_prompt = True
|
325 |
-
logging.info(f"show_system_prompt: {show_system_prompt}")
|
326 |
-
|
327 |
-
# Update user prompt
|
328 |
-
current_user_prompts = load_user_prompts(request.username)
|
329 |
-
|
330 |
-
# Update user API KEY
|
331 |
-
current_user_api_key = get_user_key_by_user_name(request.username)
|
332 |
-
logging.debug(f"Current user and key pairs: {request.username}:{current_user_api_key}")
|
333 |
-
|
334 |
-
user_request_name = hide_username(request.username) # just show the last N character
|
335 |
-
else:
|
336 |
-
logging.info(f"Failed to get user name from gr.Request. ANONYMOUS_USER: {user_request_name}")
|
337 |
-
current_user_prompts = load_user_prompts(user_request_name)
|
338 |
-
logging.info(f"current_user_prompts: {current_user_prompts}")
|
339 |
-
|
340 |
-
if show_system_prompt:
|
341 |
-
user_info_string = gr.Markdown.update(value=i18n(f"Admin: {user_request_name}"))
|
342 |
-
else:
|
343 |
-
user_info_string = gr.Markdown.update(value=i18n(f"User: {user_request_name}"))
|
344 |
-
|
345 |
-
# Update current model
|
346 |
-
first_system_prompt = get_system_prompt_content_by_role_name(current_user_prompts[0])
|
347 |
-
current_model = create_new_model(first_system_prompt)
|
348 |
-
current_model.set_user_identifier(user_request_name)
|
349 |
-
|
350 |
-
# Update chatBot
|
351 |
-
chatbot = gr.Chatbot.update(label=MODELS[DEFAULT_MODEL])
|
352 |
-
|
353 |
-
return user_info_string, user_request_name, \
|
354 |
-
current_model, toggle_like_btn_visibility(DEFAULT_MODEL), *current_model.auto_load(), \
|
355 |
-
get_history_names(False, user_request_name), chatbot, gr.update(visible=show_system_prompt), \
|
356 |
-
gr.update(choices=current_user_prompts, value=current_user_prompts[0]), \
|
357 |
-
gr.update(value=current_user_api_key), gr.update(value=current_user_api_key)
|
358 |
-
|
359 |
-
|
360 |
-
demo.load(fn=create_greeting,
|
361 |
-
inputs=None,
|
362 |
-
outputs=[user_info, user_name, current_model, like_dislike_area, systemPromptTxt, chatbot,
|
363 |
-
historyFileSelectDropdown, chatbot, systemPromptTxt, templateSelectDropdown, user_api_key, keyTxt],
|
364 |
-
api_name="load")
|
365 |
-
|
366 |
-
# Debugging
|
367 |
-
'''
|
368 |
-
logging.info(
|
369 |
-
colorama.Back.GREEN
|
370 |
-
+ f"\nAfter demo.load() gr.systemPromptTxt: {systemPromptTxt.value}"
|
371 |
-
+ f"\nAfter demo.load() gr.State.current_prompt_template: {current_prompt_template.value}"
|
372 |
-
+ f"\nAfter demo.load() gr.State.current_model.system_prompt: {current_model.value.system_prompt}"
|
373 |
-
+ colorama.Style.RESET_ALL
|
374 |
-
)
|
375 |
-
'''
|
376 |
-
|
377 |
-
chatgpt_predict_args = dict(
|
378 |
-
fn=predict,
|
379 |
-
inputs=[
|
380 |
-
current_model,
|
381 |
-
user_question,
|
382 |
-
chatbot,
|
383 |
-
use_streaming_checkbox,
|
384 |
-
use_websearch_checkbox,
|
385 |
-
index_files,
|
386 |
-
language_select_dropdown,
|
387 |
-
],
|
388 |
-
outputs=[chatbot, status_display],
|
389 |
-
show_progress=True,
|
390 |
-
)
|
391 |
-
|
392 |
-
start_outputing_args = dict(
|
393 |
-
fn=start_outputing,
|
394 |
-
inputs=[],
|
395 |
-
outputs=[submitBtn, cancelBtn],
|
396 |
-
show_progress=True,
|
397 |
-
)
|
398 |
-
|
399 |
-
end_outputing_args = dict(
|
400 |
-
fn=end_outputing, inputs=[], outputs=[submitBtn, cancelBtn]
|
401 |
-
)
|
402 |
-
|
403 |
-
reset_textbox_args = dict(
|
404 |
-
fn=reset_textbox, inputs=[], outputs=[user_input]
|
405 |
-
)
|
406 |
-
|
407 |
-
transfer_input_args = dict(
|
408 |
-
fn=transfer_input, inputs=[user_input], outputs=[user_question, user_input, submitBtn, cancelBtn],
|
409 |
-
show_progress=True
|
410 |
-
)
|
411 |
-
|
412 |
-
get_usage_args = dict(
|
413 |
-
fn=billing_info, inputs=[current_model], outputs=[usageTxt], show_progress=False
|
414 |
-
)
|
415 |
-
|
416 |
-
load_history_from_file_args = dict(
|
417 |
-
fn=load_chat_history,
|
418 |
-
inputs=[current_model, historyFileSelectDropdown, user_name],
|
419 |
-
outputs=[saveFileName, systemPromptTxt, chatbot]
|
420 |
-
)
|
421 |
-
|
422 |
-
# Chatbot
|
423 |
-
cancelBtn.click(interrupt, [current_model], [])
|
424 |
-
''' Running Events Consecutively
|
425 |
-
run events consecutively by using the then method of an event listener. This will run an event after the previous
|
426 |
-
event has finished running. This is useful for running events that update components in multiple steps.
|
427 |
-
The .then() method of an event listener executes the subsequent event regardless of whether the previous event
|
428 |
-
raised any errors. If you'd like to only run subsequent events if the previous event executed successfully,
|
429 |
-
use the .success() method, which takes the same arguments as .then().
|
430 |
-
'''
|
431 |
-
user_input.submit(**transfer_input_args).then(**chatgpt_predict_args).then(**end_outputing_args)
|
432 |
-
user_input.submit(**get_usage_args)
|
433 |
-
|
434 |
-
submitBtn.click(**transfer_input_args).then(**chatgpt_predict_args, api_name="predict").then(**end_outputing_args)
|
435 |
-
submitBtn.click(**get_usage_args)
|
436 |
-
|
437 |
-
index_files.change(handle_file_upload, [current_model, index_files, chatbot],
|
438 |
-
[index_files, chatbot, status_display])
|
439 |
-
|
440 |
-
emptyBtn.click(
|
441 |
-
reset,
|
442 |
-
inputs=[current_model],
|
443 |
-
outputs=[chatbot, status_display],
|
444 |
-
show_progress=True,
|
445 |
-
)
|
446 |
-
|
447 |
-
retryBtn.click(**start_outputing_args).then(
|
448 |
-
retry,
|
449 |
-
[
|
450 |
-
current_model,
|
451 |
-
chatbot,
|
452 |
-
use_streaming_checkbox,
|
453 |
-
use_websearch_checkbox,
|
454 |
-
index_files,
|
455 |
-
language_select_dropdown,
|
456 |
-
],
|
457 |
-
[chatbot, status_display],
|
458 |
-
show_progress=True,
|
459 |
-
).then(**end_outputing_args)
|
460 |
-
retryBtn.click(**get_usage_args)
|
461 |
-
|
462 |
-
delFirstBtn.click(
|
463 |
-
delete_first_conversation,
|
464 |
-
[current_model],
|
465 |
-
[status_display],
|
466 |
-
)
|
467 |
-
|
468 |
-
delLastBtn.click(
|
469 |
-
delete_last_conversation,
|
470 |
-
[current_model, chatbot],
|
471 |
-
[chatbot, status_display],
|
472 |
-
show_progress=False
|
473 |
-
)
|
474 |
-
|
475 |
-
likeBtn.click(
|
476 |
-
like,
|
477 |
-
[current_model],
|
478 |
-
[status_display],
|
479 |
-
show_progress=False
|
480 |
-
)
|
481 |
-
|
482 |
-
dislikeBtn.click(
|
483 |
-
dislike,
|
484 |
-
[current_model],
|
485 |
-
[status_display],
|
486 |
-
show_progress=False
|
487 |
-
)
|
488 |
-
|
489 |
-
two_column.change(update_doc_config, [two_column], None)
|
490 |
-
|
491 |
-
# LLM Models
|
492 |
-
keyTxt.change(set_key, [current_model, keyTxt], [user_api_key, status_display], api_name="set_key").then(
|
493 |
-
**get_usage_args)
|
494 |
-
keyTxt.submit(**get_usage_args)
|
495 |
-
single_turn_checkbox.change(set_single_turn, [current_model, single_turn_checkbox], None)
|
496 |
-
model_select_dropdown.change(get_model,
|
497 |
-
[model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider,
|
498 |
-
top_p_slider, systemPromptTxt, user_name],
|
499 |
-
[current_model, status_display, chatbot, lora_select_dropdown], show_progress=True,
|
500 |
-
api_name="get_model")
|
501 |
-
model_select_dropdown.change(toggle_like_btn_visibility, [model_select_dropdown], [like_dislike_area],
|
502 |
-
show_progress=False)
|
503 |
-
lora_select_dropdown.change(get_model,
|
504 |
-
[model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider,
|
505 |
-
top_p_slider, systemPromptTxt, user_name], [current_model, status_display, chatbot],
|
506 |
-
show_progress=True)
|
507 |
-
|
508 |
-
# Template
|
509 |
-
systemPromptTxt.change(set_system_prompt, [current_model, systemPromptTxt], None)
|
510 |
-
templateRefreshBtn.click(get_template_names_without_extension, None, [templateFileSelectDropdown])
|
511 |
-
templateFileSelectDropdown.change(
|
512 |
-
load_template,
|
513 |
-
[templateFileSelectDropdown],
|
514 |
-
[current_prompt_template, templateSelectDropdown],
|
515 |
-
show_progress=True,
|
516 |
-
)
|
517 |
-
templateSelectDropdown.change(
|
518 |
-
get_template_content,
|
519 |
-
[current_prompt_template, templateSelectDropdown, systemPromptTxt],
|
520 |
-
[systemPromptTxt],
|
521 |
-
show_progress=True,
|
522 |
-
)
|
523 |
-
|
524 |
-
# S&L
|
525 |
-
saveHistoryBtn.click(
|
526 |
-
save_chat_history,
|
527 |
-
[current_model, saveFileName, chatbot, user_name],
|
528 |
-
downloadFile,
|
529 |
-
show_progress=True,
|
530 |
-
)
|
531 |
-
saveHistoryBtn.click(get_history_names, [gr.State(False), user_name], [historyFileSelectDropdown])
|
532 |
-
exportMarkdownBtn.click(
|
533 |
-
export_markdown,
|
534 |
-
[current_model, saveFileName, chatbot, user_name],
|
535 |
-
downloadFile,
|
536 |
-
show_progress=True,
|
537 |
-
)
|
538 |
-
historyRefreshBtn.click(get_history_names, [gr.State(False), user_name], [historyFileSelectDropdown])
|
539 |
-
historyFileSelectDropdown.change(**load_history_from_file_args)
|
540 |
-
downloadFile.change(upload_chat_history, [current_model, downloadFile, user_name],
|
541 |
-
[saveFileName, systemPromptTxt, chatbot])
|
542 |
-
|
543 |
-
# Advanced
|
544 |
-
max_context_length_slider.change(set_token_upper_limit, [current_model, max_context_length_slider], None)
|
545 |
-
temperature_slider.change(set_temperature, [current_model, temperature_slider], None)
|
546 |
-
top_p_slider.change(set_top_p, [current_model, top_p_slider], None)
|
547 |
-
n_choices_slider.change(set_n_choices, [current_model, n_choices_slider], None)
|
548 |
-
stop_sequence_txt.change(set_stop_sequence, [current_model, stop_sequence_txt], None)
|
549 |
-
max_generation_slider.change(set_max_tokens, [current_model, max_generation_slider], None)
|
550 |
-
presence_penalty_slider.change(set_presence_penalty, [current_model, presence_penalty_slider], None)
|
551 |
-
frequency_penalty_slider.change(set_frequency_penalty, [current_model, frequency_penalty_slider], None)
|
552 |
-
logit_bias_txt.change(set_logit_bias, [current_model, logit_bias_txt], None)
|
553 |
-
user_identifier_txt.change(set_user_identifier, [current_model, user_identifier_txt], None)
|
554 |
-
|
555 |
-
default_btn.click(
|
556 |
-
reset_default, [], [apihostTxt, proxyTxt, status_display], show_progress=True
|
557 |
-
)
|
558 |
-
changeAPIURLBtn.click(
|
559 |
-
change_api_host,
|
560 |
-
[apihostTxt],
|
561 |
-
[status_display],
|
562 |
-
show_progress=True,
|
563 |
-
)
|
564 |
-
changeProxyBtn.click(
|
565 |
-
change_proxy,
|
566 |
-
[proxyTxt],
|
567 |
-
[status_display],
|
568 |
-
show_progress=True,
|
569 |
-
)
|
570 |
-
|
571 |
-
logging.info(
|
572 |
-
colorama.Back.GREEN
|
573 |
-
+ f"\n温馨提示:访问 http://{server_name}:{server_port} 查看界面"
|
574 |
-
+ colorama.Style.RESET_ALL
|
575 |
-
)
|
576 |
-
# 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接
|
577 |
-
demo.title = i18n("启源力 AI")
|
578 |
-
|
579 |
-
'''Control the rate of processed requests by creating a queue. This will allow you to set the number of requests to
|
580 |
-
be processed at one time, and will let users know their position in the queue. Number of worker threads that will be
|
581 |
-
processing requests from the queue concurrently. Increasing this number will increase the rate at which requests are
|
582 |
-
processed, but will also increase the memory usage of the queue. '''
|
583 |
-
demo.queue(concurrency_count=CONCURRENT_COUNT)
|
584 |
-
|
585 |
-
if __name__ == "__main__":
|
586 |
-
reload_javascript()
|
587 |
-
|
588 |
-
# HF deploy
|
589 |
-
demo.launch(
|
590 |
-
share=False,
|
591 |
-
auth=auth_list if authflag else None,
|
592 |
-
auth_message=i18n("启源力智人AI助理"),
|
593 |
-
favicon_path="./assets/favicon-64.png"
|
594 |
-
)
|
595 |
-
|
596 |
-
'''
|
597 |
-
# Cloud deploy
|
598 |
-
demo.launch(
|
599 |
-
server_name=server_name,
|
600 |
-
server_port=server_port,
|
601 |
-
share=False,
|
602 |
-
auth=auth_list if authflag else None,
|
603 |
-
auth_message=i18n("启源力智人AI助理"),
|
604 |
-
favicon_path="./assets/favicon-64.png"
|
605 |
-
)
|
606 |
-
'''
|
607 |
-
|
608 |
-
'''
|
609 |
-
# Local deploy
|
610 |
-
demo.launch(
|
611 |
-
server_name=server_name,
|
612 |
-
server_port=server_port,
|
613 |
-
share=True,
|
614 |
-
auth=auth_list if authflag else None,
|
615 |
-
auth_message=i18n("启源力智人AI助理"),
|
616 |
-
favicon_path="./assets/favicon-64.png",
|
617 |
-
inbrowser=not dockerflag # 禁止在docker下开启inbrowser
|
618 |
-
)
|
619 |
-
'''
|
620 |
-
# demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
|
621 |
-
# server_name=server_name,
|
622 |
-
# server_port=server_port,
|
623 |
-
# share=share,
|
624 |
-
# auth=auth_list if authflag else None,
|
625 |
-
# favicon_path="./assets/favicon-64.png",
|
626 |
-
# inbrowser=not dockerflag, # 禁止在docker下开启inbrowser
|
627 |
-
# )
|
628 |
-
|
629 |
-
# demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=7860, share=False) # 可自定义端口
|
630 |
-
# demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=7860,auth=("在这里填写用户名", "在这里填写密码")) # 可设置用户名与密码
|
631 |
-
# demo.queue(concurrency_count=CONCURRENT_COUNT).launch(auth=("在这里填写用户名", "在这里填写密码")) # 适合Nginx反向代理
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alpaca233/ChatPDF-GUI/app.py
DELETED
@@ -1,51 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
from gpt_reader.pdf_reader import PaperReader
|
4 |
-
from gpt_reader.prompt import BASE_POINTS
|
5 |
-
|
6 |
-
|
7 |
-
class GUI:
|
8 |
-
def __init__(self):
|
9 |
-
self.api_key = ""
|
10 |
-
self.session = ""
|
11 |
-
|
12 |
-
def analyse(self, api_key, pdf_file):
|
13 |
-
self.session = PaperReader(api_key, points_to_focus=BASE_POINTS)
|
14 |
-
return self.session.read_pdf_and_summarize(pdf_file)
|
15 |
-
|
16 |
-
def ask_question(self, question):
|
17 |
-
if self.session == "":
|
18 |
-
return "Please upload PDF file first!"
|
19 |
-
return self.session.question(question)
|
20 |
-
|
21 |
-
|
22 |
-
with gr.Blocks() as demo:
|
23 |
-
gr.Markdown(
|
24 |
-
"""
|
25 |
-
# CHATGPT-PAPER-READER
|
26 |
-
""")
|
27 |
-
|
28 |
-
with gr.Tab("Upload PDF File"):
|
29 |
-
pdf_input = gr.File(label="PDF File")
|
30 |
-
api_input = gr.Textbox(label="OpenAI API Key")
|
31 |
-
result = gr.Textbox(label="PDF Summary")
|
32 |
-
upload_button = gr.Button("Start Analyse")
|
33 |
-
with gr.Tab("Ask question about your PDF"):
|
34 |
-
question_input = gr.Textbox(label="Your Question", placeholder="Authors of this paper?")
|
35 |
-
answer = gr.Textbox(label="Answer")
|
36 |
-
ask_button = gr.Button("Ask")
|
37 |
-
with gr.Accordion("About this project"):
|
38 |
-
gr.Markdown(
|
39 |
-
"""## CHATGPT-PAPER-READER📝
|
40 |
-
This repository provides a simple interface that utilizes the gpt-3.5-turbo
|
41 |
-
model to read academic papers in PDF format locally. You can use it to help you summarize papers,
|
42 |
-
create presentation slides, or simply fulfill tasks assigned by your supervisor.\n
|
43 |
-
[Github](https://github.com/talkingwallace/ChatGPT-Paper-Reader)""")
|
44 |
-
|
45 |
-
app = GUI()
|
46 |
-
upload_button.click(fn=app.analyse, inputs=[api_input, pdf_input], outputs=result)
|
47 |
-
ask_button.click(app.ask_question, inputs=question_input, outputs=answer)
|
48 |
-
|
49 |
-
if __name__ == "__main__":
|
50 |
-
demo.title = "CHATGPT-PAPER-READER"
|
51 |
-
demo.launch() # add "share=True" to share CHATGPT-PAPER-READER app on Internet.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/img2img.md
DELETED
@@ -1,55 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# Image-to-image
|
14 |
-
|
15 |
-
The Stable Diffusion model can also be applied to image-to-image generation by passing a text prompt and an initial image to condition the generation of new images.
|
16 |
-
|
17 |
-
The [`StableDiffusionImg2ImgPipeline`] uses the diffusion-denoising mechanism proposed in [SDEdit: Guided Image Synthesis and Editing with Stochastic Differential Equations](https://huggingface.co/papers/2108.01073) by Chenlin Meng, Yutong He, Yang Song, Jiaming Song, Jiajun Wu, Jun-Yan Zhu, Stefano Ermon.
|
18 |
-
|
19 |
-
The abstract from the paper is:
|
20 |
-
|
21 |
-
*Guided image synthesis enables everyday users to create and edit photo-realistic images with minimum effort. The key challenge is balancing faithfulness to the user input (e.g., hand-drawn colored strokes) and realism of the synthesized image. Existing GAN-based methods attempt to achieve such balance using either conditional GANs or GAN inversions, which are challenging and often require additional training data or loss functions for individual applications. To address these issues, we introduce a new image synthesis and editing method, Stochastic Differential Editing (SDEdit), based on a diffusion model generative prior, which synthesizes realistic images by iteratively denoising through a stochastic differential equation (SDE). Given an input image with user guide of any type, SDEdit first adds noise to the input, then subsequently denoises the resulting image through the SDE prior to increase its realism. SDEdit does not require task-specific training or inversions and can naturally achieve the balance between realism and faithfulness. SDEdit significantly outperforms state-of-the-art GAN-based methods by up to 98.09% on realism and 91.72% on overall satisfaction scores, according to a human perception study, on multiple tasks, including stroke-based image synthesis and editing as well as image compositing.*
|
22 |
-
|
23 |
-
<Tip>
|
24 |
-
|
25 |
-
Make sure to check out the Stable Diffusion [Tips](overview#tips) section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently!
|
26 |
-
|
27 |
-
</Tip>
|
28 |
-
|
29 |
-
## StableDiffusionImg2ImgPipeline
|
30 |
-
|
31 |
-
[[autodoc]] StableDiffusionImg2ImgPipeline
|
32 |
-
- all
|
33 |
-
- __call__
|
34 |
-
- enable_attention_slicing
|
35 |
-
- disable_attention_slicing
|
36 |
-
- enable_xformers_memory_efficient_attention
|
37 |
-
- disable_xformers_memory_efficient_attention
|
38 |
-
- load_textual_inversion
|
39 |
-
- from_single_file
|
40 |
-
- load_lora_weights
|
41 |
-
- save_lora_weights
|
42 |
-
|
43 |
-
## StableDiffusionPipelineOutput
|
44 |
-
|
45 |
-
[[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput
|
46 |
-
|
47 |
-
## FlaxStableDiffusionImg2ImgPipeline
|
48 |
-
|
49 |
-
[[autodoc]] FlaxStableDiffusionImg2ImgPipeline
|
50 |
-
- all
|
51 |
-
- __call__
|
52 |
-
|
53 |
-
## FlaxStableDiffusionPipelineOutput
|
54 |
-
|
55 |
-
[[autodoc]] pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/spectrogram_diffusion/midi_utils.py
DELETED
@@ -1,667 +0,0 @@
|
|
1 |
-
# Copyright 2022 The Music Spectrogram Diffusion Authors.
|
2 |
-
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import dataclasses
|
17 |
-
import math
|
18 |
-
import os
|
19 |
-
from typing import Any, Callable, List, Mapping, MutableMapping, Optional, Sequence, Tuple, Union
|
20 |
-
|
21 |
-
import numpy as np
|
22 |
-
import torch
|
23 |
-
import torch.nn.functional as F
|
24 |
-
|
25 |
-
from ...utils import is_note_seq_available
|
26 |
-
from .pipeline_spectrogram_diffusion import TARGET_FEATURE_LENGTH
|
27 |
-
|
28 |
-
|
29 |
-
if is_note_seq_available():
|
30 |
-
import note_seq
|
31 |
-
else:
|
32 |
-
raise ImportError("Please install note-seq via `pip install note-seq`")
|
33 |
-
|
34 |
-
|
35 |
-
INPUT_FEATURE_LENGTH = 2048
|
36 |
-
|
37 |
-
SAMPLE_RATE = 16000
|
38 |
-
HOP_SIZE = 320
|
39 |
-
FRAME_RATE = int(SAMPLE_RATE // HOP_SIZE)
|
40 |
-
|
41 |
-
DEFAULT_STEPS_PER_SECOND = 100
|
42 |
-
DEFAULT_MAX_SHIFT_SECONDS = 10
|
43 |
-
DEFAULT_NUM_VELOCITY_BINS = 1
|
44 |
-
|
45 |
-
SLAKH_CLASS_PROGRAMS = {
|
46 |
-
"Acoustic Piano": 0,
|
47 |
-
"Electric Piano": 4,
|
48 |
-
"Chromatic Percussion": 8,
|
49 |
-
"Organ": 16,
|
50 |
-
"Acoustic Guitar": 24,
|
51 |
-
"Clean Electric Guitar": 26,
|
52 |
-
"Distorted Electric Guitar": 29,
|
53 |
-
"Acoustic Bass": 32,
|
54 |
-
"Electric Bass": 33,
|
55 |
-
"Violin": 40,
|
56 |
-
"Viola": 41,
|
57 |
-
"Cello": 42,
|
58 |
-
"Contrabass": 43,
|
59 |
-
"Orchestral Harp": 46,
|
60 |
-
"Timpani": 47,
|
61 |
-
"String Ensemble": 48,
|
62 |
-
"Synth Strings": 50,
|
63 |
-
"Choir and Voice": 52,
|
64 |
-
"Orchestral Hit": 55,
|
65 |
-
"Trumpet": 56,
|
66 |
-
"Trombone": 57,
|
67 |
-
"Tuba": 58,
|
68 |
-
"French Horn": 60,
|
69 |
-
"Brass Section": 61,
|
70 |
-
"Soprano/Alto Sax": 64,
|
71 |
-
"Tenor Sax": 66,
|
72 |
-
"Baritone Sax": 67,
|
73 |
-
"Oboe": 68,
|
74 |
-
"English Horn": 69,
|
75 |
-
"Bassoon": 70,
|
76 |
-
"Clarinet": 71,
|
77 |
-
"Pipe": 73,
|
78 |
-
"Synth Lead": 80,
|
79 |
-
"Synth Pad": 88,
|
80 |
-
}
|
81 |
-
|
82 |
-
|
83 |
-
@dataclasses.dataclass
|
84 |
-
class NoteRepresentationConfig:
|
85 |
-
"""Configuration note representations."""
|
86 |
-
|
87 |
-
onsets_only: bool
|
88 |
-
include_ties: bool
|
89 |
-
|
90 |
-
|
91 |
-
@dataclasses.dataclass
|
92 |
-
class NoteEventData:
|
93 |
-
pitch: int
|
94 |
-
velocity: Optional[int] = None
|
95 |
-
program: Optional[int] = None
|
96 |
-
is_drum: Optional[bool] = None
|
97 |
-
instrument: Optional[int] = None
|
98 |
-
|
99 |
-
|
100 |
-
@dataclasses.dataclass
|
101 |
-
class NoteEncodingState:
|
102 |
-
"""Encoding state for note transcription, keeping track of active pitches."""
|
103 |
-
|
104 |
-
# velocity bin for active pitches and programs
|
105 |
-
active_pitches: MutableMapping[Tuple[int, int], int] = dataclasses.field(default_factory=dict)
|
106 |
-
|
107 |
-
|
108 |
-
@dataclasses.dataclass
|
109 |
-
class EventRange:
|
110 |
-
type: str
|
111 |
-
min_value: int
|
112 |
-
max_value: int
|
113 |
-
|
114 |
-
|
115 |
-
@dataclasses.dataclass
|
116 |
-
class Event:
|
117 |
-
type: str
|
118 |
-
value: int
|
119 |
-
|
120 |
-
|
121 |
-
class Tokenizer:
|
122 |
-
def __init__(self, regular_ids: int):
|
123 |
-
# The special tokens: 0=PAD, 1=EOS, and 2=UNK
|
124 |
-
self._num_special_tokens = 3
|
125 |
-
self._num_regular_tokens = regular_ids
|
126 |
-
|
127 |
-
def encode(self, token_ids):
|
128 |
-
encoded = []
|
129 |
-
for token_id in token_ids:
|
130 |
-
if not 0 <= token_id < self._num_regular_tokens:
|
131 |
-
raise ValueError(
|
132 |
-
f"token_id {token_id} does not fall within valid range of [0, {self._num_regular_tokens})"
|
133 |
-
)
|
134 |
-
encoded.append(token_id + self._num_special_tokens)
|
135 |
-
|
136 |
-
# Add EOS token
|
137 |
-
encoded.append(1)
|
138 |
-
|
139 |
-
# Pad to till INPUT_FEATURE_LENGTH
|
140 |
-
encoded = encoded + [0] * (INPUT_FEATURE_LENGTH - len(encoded))
|
141 |
-
|
142 |
-
return encoded
|
143 |
-
|
144 |
-
|
145 |
-
class Codec:
|
146 |
-
"""Encode and decode events.
|
147 |
-
|
148 |
-
Useful for declaring what certain ranges of a vocabulary should be used for. This is intended to be used from
|
149 |
-
Python before encoding or after decoding with GenericTokenVocabulary. This class is more lightweight and does not
|
150 |
-
include things like EOS or UNK token handling.
|
151 |
-
|
152 |
-
To ensure that 'shift' events are always the first block of the vocab and start at 0, that event type is required
|
153 |
-
and specified separately.
|
154 |
-
"""
|
155 |
-
|
156 |
-
def __init__(self, max_shift_steps: int, steps_per_second: float, event_ranges: List[EventRange]):
|
157 |
-
"""Define Codec.
|
158 |
-
|
159 |
-
Args:
|
160 |
-
max_shift_steps: Maximum number of shift steps that can be encoded.
|
161 |
-
steps_per_second: Shift steps will be interpreted as having a duration of
|
162 |
-
1 / steps_per_second.
|
163 |
-
event_ranges: Other supported event types and their ranges.
|
164 |
-
"""
|
165 |
-
self.steps_per_second = steps_per_second
|
166 |
-
self._shift_range = EventRange(type="shift", min_value=0, max_value=max_shift_steps)
|
167 |
-
self._event_ranges = [self._shift_range] + event_ranges
|
168 |
-
# Ensure all event types have unique names.
|
169 |
-
assert len(self._event_ranges) == len({er.type for er in self._event_ranges})
|
170 |
-
|
171 |
-
@property
|
172 |
-
def num_classes(self) -> int:
|
173 |
-
return sum(er.max_value - er.min_value + 1 for er in self._event_ranges)
|
174 |
-
|
175 |
-
# The next couple methods are simplified special case methods just for shift
|
176 |
-
# events that are intended to be used from within autograph functions.
|
177 |
-
|
178 |
-
def is_shift_event_index(self, index: int) -> bool:
|
179 |
-
return (self._shift_range.min_value <= index) and (index <= self._shift_range.max_value)
|
180 |
-
|
181 |
-
@property
|
182 |
-
def max_shift_steps(self) -> int:
|
183 |
-
return self._shift_range.max_value
|
184 |
-
|
185 |
-
def encode_event(self, event: Event) -> int:
|
186 |
-
"""Encode an event to an index."""
|
187 |
-
offset = 0
|
188 |
-
for er in self._event_ranges:
|
189 |
-
if event.type == er.type:
|
190 |
-
if not er.min_value <= event.value <= er.max_value:
|
191 |
-
raise ValueError(
|
192 |
-
f"Event value {event.value} is not within valid range "
|
193 |
-
f"[{er.min_value}, {er.max_value}] for type {event.type}"
|
194 |
-
)
|
195 |
-
return offset + event.value - er.min_value
|
196 |
-
offset += er.max_value - er.min_value + 1
|
197 |
-
|
198 |
-
raise ValueError(f"Unknown event type: {event.type}")
|
199 |
-
|
200 |
-
def event_type_range(self, event_type: str) -> Tuple[int, int]:
|
201 |
-
"""Return [min_id, max_id] for an event type."""
|
202 |
-
offset = 0
|
203 |
-
for er in self._event_ranges:
|
204 |
-
if event_type == er.type:
|
205 |
-
return offset, offset + (er.max_value - er.min_value)
|
206 |
-
offset += er.max_value - er.min_value + 1
|
207 |
-
|
208 |
-
raise ValueError(f"Unknown event type: {event_type}")
|
209 |
-
|
210 |
-
def decode_event_index(self, index: int) -> Event:
|
211 |
-
"""Decode an event index to an Event."""
|
212 |
-
offset = 0
|
213 |
-
for er in self._event_ranges:
|
214 |
-
if offset <= index <= offset + er.max_value - er.min_value:
|
215 |
-
return Event(type=er.type, value=er.min_value + index - offset)
|
216 |
-
offset += er.max_value - er.min_value + 1
|
217 |
-
|
218 |
-
raise ValueError(f"Unknown event index: {index}")
|
219 |
-
|
220 |
-
|
221 |
-
@dataclasses.dataclass
|
222 |
-
class ProgramGranularity:
|
223 |
-
# both tokens_map_fn and program_map_fn should be idempotent
|
224 |
-
tokens_map_fn: Callable[[Sequence[int], Codec], Sequence[int]]
|
225 |
-
program_map_fn: Callable[[int], int]
|
226 |
-
|
227 |
-
|
228 |
-
def drop_programs(tokens, codec: Codec):
|
229 |
-
"""Drops program change events from a token sequence."""
|
230 |
-
min_program_id, max_program_id = codec.event_type_range("program")
|
231 |
-
return tokens[(tokens < min_program_id) | (tokens > max_program_id)]
|
232 |
-
|
233 |
-
|
234 |
-
def programs_to_midi_classes(tokens, codec):
|
235 |
-
"""Modifies program events to be the first program in the MIDI class."""
|
236 |
-
min_program_id, max_program_id = codec.event_type_range("program")
|
237 |
-
is_program = (tokens >= min_program_id) & (tokens <= max_program_id)
|
238 |
-
return np.where(is_program, min_program_id + 8 * ((tokens - min_program_id) // 8), tokens)
|
239 |
-
|
240 |
-
|
241 |
-
PROGRAM_GRANULARITIES = {
|
242 |
-
# "flat" granularity; drop program change tokens and set NoteSequence
|
243 |
-
# programs to zero
|
244 |
-
"flat": ProgramGranularity(tokens_map_fn=drop_programs, program_map_fn=lambda program: 0),
|
245 |
-
# map each program to the first program in its MIDI class
|
246 |
-
"midi_class": ProgramGranularity(
|
247 |
-
tokens_map_fn=programs_to_midi_classes, program_map_fn=lambda program: 8 * (program // 8)
|
248 |
-
),
|
249 |
-
# leave programs as is
|
250 |
-
"full": ProgramGranularity(tokens_map_fn=lambda tokens, codec: tokens, program_map_fn=lambda program: program),
|
251 |
-
}
|
252 |
-
|
253 |
-
|
254 |
-
def frame(signal, frame_length, frame_step, pad_end=False, pad_value=0, axis=-1):
|
255 |
-
"""
|
256 |
-
equivalent of tf.signal.frame
|
257 |
-
"""
|
258 |
-
signal_length = signal.shape[axis]
|
259 |
-
if pad_end:
|
260 |
-
frames_overlap = frame_length - frame_step
|
261 |
-
rest_samples = np.abs(signal_length - frames_overlap) % np.abs(frame_length - frames_overlap)
|
262 |
-
pad_size = int(frame_length - rest_samples)
|
263 |
-
|
264 |
-
if pad_size != 0:
|
265 |
-
pad_axis = [0] * signal.ndim
|
266 |
-
pad_axis[axis] = pad_size
|
267 |
-
signal = F.pad(signal, pad_axis, "constant", pad_value)
|
268 |
-
frames = signal.unfold(axis, frame_length, frame_step)
|
269 |
-
return frames
|
270 |
-
|
271 |
-
|
272 |
-
def program_to_slakh_program(program):
|
273 |
-
# this is done very hackily, probably should use a custom mapping
|
274 |
-
for slakh_program in sorted(SLAKH_CLASS_PROGRAMS.values(), reverse=True):
|
275 |
-
if program >= slakh_program:
|
276 |
-
return slakh_program
|
277 |
-
|
278 |
-
|
279 |
-
def audio_to_frames(
|
280 |
-
samples,
|
281 |
-
hop_size: int,
|
282 |
-
frame_rate: int,
|
283 |
-
) -> Tuple[Sequence[Sequence[int]], torch.Tensor]:
|
284 |
-
"""Convert audio samples to non-overlapping frames and frame times."""
|
285 |
-
frame_size = hop_size
|
286 |
-
samples = np.pad(samples, [0, frame_size - len(samples) % frame_size], mode="constant")
|
287 |
-
|
288 |
-
# Split audio into frames.
|
289 |
-
frames = frame(
|
290 |
-
torch.Tensor(samples).unsqueeze(0),
|
291 |
-
frame_length=frame_size,
|
292 |
-
frame_step=frame_size,
|
293 |
-
pad_end=False, # TODO check why its off by 1 here when True
|
294 |
-
)
|
295 |
-
|
296 |
-
num_frames = len(samples) // frame_size
|
297 |
-
|
298 |
-
times = np.arange(num_frames) / frame_rate
|
299 |
-
return frames, times
|
300 |
-
|
301 |
-
|
302 |
-
def note_sequence_to_onsets_and_offsets_and_programs(
|
303 |
-
ns: note_seq.NoteSequence,
|
304 |
-
) -> Tuple[Sequence[float], Sequence[NoteEventData]]:
|
305 |
-
"""Extract onset & offset times and pitches & programs from a NoteSequence.
|
306 |
-
|
307 |
-
The onset & offset times will not necessarily be in sorted order.
|
308 |
-
|
309 |
-
Args:
|
310 |
-
ns: NoteSequence from which to extract onsets and offsets.
|
311 |
-
|
312 |
-
Returns:
|
313 |
-
times: A list of note onset and offset times. values: A list of NoteEventData objects where velocity is zero for
|
314 |
-
note
|
315 |
-
offsets.
|
316 |
-
"""
|
317 |
-
# Sort by program and pitch and put offsets before onsets as a tiebreaker for
|
318 |
-
# subsequent stable sort.
|
319 |
-
notes = sorted(ns.notes, key=lambda note: (note.is_drum, note.program, note.pitch))
|
320 |
-
times = [note.end_time for note in notes if not note.is_drum] + [note.start_time for note in notes]
|
321 |
-
values = [
|
322 |
-
NoteEventData(pitch=note.pitch, velocity=0, program=note.program, is_drum=False)
|
323 |
-
for note in notes
|
324 |
-
if not note.is_drum
|
325 |
-
] + [
|
326 |
-
NoteEventData(pitch=note.pitch, velocity=note.velocity, program=note.program, is_drum=note.is_drum)
|
327 |
-
for note in notes
|
328 |
-
]
|
329 |
-
return times, values
|
330 |
-
|
331 |
-
|
332 |
-
def num_velocity_bins_from_codec(codec: Codec):
|
333 |
-
"""Get number of velocity bins from event codec."""
|
334 |
-
lo, hi = codec.event_type_range("velocity")
|
335 |
-
return hi - lo
|
336 |
-
|
337 |
-
|
338 |
-
# segment an array into segments of length n
|
339 |
-
def segment(a, n):
|
340 |
-
return [a[i : i + n] for i in range(0, len(a), n)]
|
341 |
-
|
342 |
-
|
343 |
-
def velocity_to_bin(velocity, num_velocity_bins):
|
344 |
-
if velocity == 0:
|
345 |
-
return 0
|
346 |
-
else:
|
347 |
-
return math.ceil(num_velocity_bins * velocity / note_seq.MAX_MIDI_VELOCITY)
|
348 |
-
|
349 |
-
|
350 |
-
def note_event_data_to_events(
|
351 |
-
state: Optional[NoteEncodingState],
|
352 |
-
value: NoteEventData,
|
353 |
-
codec: Codec,
|
354 |
-
) -> Sequence[Event]:
|
355 |
-
"""Convert note event data to a sequence of events."""
|
356 |
-
if value.velocity is None:
|
357 |
-
# onsets only, no program or velocity
|
358 |
-
return [Event("pitch", value.pitch)]
|
359 |
-
else:
|
360 |
-
num_velocity_bins = num_velocity_bins_from_codec(codec)
|
361 |
-
velocity_bin = velocity_to_bin(value.velocity, num_velocity_bins)
|
362 |
-
if value.program is None:
|
363 |
-
# onsets + offsets + velocities only, no programs
|
364 |
-
if state is not None:
|
365 |
-
state.active_pitches[(value.pitch, 0)] = velocity_bin
|
366 |
-
return [Event("velocity", velocity_bin), Event("pitch", value.pitch)]
|
367 |
-
else:
|
368 |
-
if value.is_drum:
|
369 |
-
# drum events use a separate vocabulary
|
370 |
-
return [Event("velocity", velocity_bin), Event("drum", value.pitch)]
|
371 |
-
else:
|
372 |
-
# program + velocity + pitch
|
373 |
-
if state is not None:
|
374 |
-
state.active_pitches[(value.pitch, value.program)] = velocity_bin
|
375 |
-
return [
|
376 |
-
Event("program", value.program),
|
377 |
-
Event("velocity", velocity_bin),
|
378 |
-
Event("pitch", value.pitch),
|
379 |
-
]
|
380 |
-
|
381 |
-
|
382 |
-
def note_encoding_state_to_events(state: NoteEncodingState) -> Sequence[Event]:
|
383 |
-
"""Output program and pitch events for active notes plus a final tie event."""
|
384 |
-
events = []
|
385 |
-
for pitch, program in sorted(state.active_pitches.keys(), key=lambda k: k[::-1]):
|
386 |
-
if state.active_pitches[(pitch, program)]:
|
387 |
-
events += [Event("program", program), Event("pitch", pitch)]
|
388 |
-
events.append(Event("tie", 0))
|
389 |
-
return events
|
390 |
-
|
391 |
-
|
392 |
-
def encode_and_index_events(
|
393 |
-
state, event_times, event_values, codec, frame_times, encode_event_fn, encoding_state_to_events_fn=None
|
394 |
-
):
|
395 |
-
"""Encode a sequence of timed events and index to audio frame times.
|
396 |
-
|
397 |
-
Encodes time shifts as repeated single step shifts for later run length encoding.
|
398 |
-
|
399 |
-
Optionally, also encodes a sequence of "state events", keeping track of the current encoding state at each audio
|
400 |
-
frame. This can be used e.g. to prepend events representing the current state to a targets segment.
|
401 |
-
|
402 |
-
Args:
|
403 |
-
state: Initial event encoding state.
|
404 |
-
event_times: Sequence of event times.
|
405 |
-
event_values: Sequence of event values.
|
406 |
-
encode_event_fn: Function that transforms event value into a sequence of one
|
407 |
-
or more Event objects.
|
408 |
-
codec: An Codec object that maps Event objects to indices.
|
409 |
-
frame_times: Time for every audio frame.
|
410 |
-
encoding_state_to_events_fn: Function that transforms encoding state into a
|
411 |
-
sequence of one or more Event objects.
|
412 |
-
|
413 |
-
Returns:
|
414 |
-
events: Encoded events and shifts. event_start_indices: Corresponding start event index for every audio frame.
|
415 |
-
Note: one event can correspond to multiple audio indices due to sampling rate differences. This makes
|
416 |
-
splitting sequences tricky because the same event can appear at the end of one sequence and the beginning of
|
417 |
-
another.
|
418 |
-
event_end_indices: Corresponding end event index for every audio frame. Used
|
419 |
-
to ensure when slicing that one chunk ends where the next begins. Should always be true that
|
420 |
-
event_end_indices[i] = event_start_indices[i + 1].
|
421 |
-
state_events: Encoded "state" events representing the encoding state before
|
422 |
-
each event.
|
423 |
-
state_event_indices: Corresponding state event index for every audio frame.
|
424 |
-
"""
|
425 |
-
indices = np.argsort(event_times, kind="stable")
|
426 |
-
event_steps = [round(event_times[i] * codec.steps_per_second) for i in indices]
|
427 |
-
event_values = [event_values[i] for i in indices]
|
428 |
-
|
429 |
-
events = []
|
430 |
-
state_events = []
|
431 |
-
event_start_indices = []
|
432 |
-
state_event_indices = []
|
433 |
-
|
434 |
-
cur_step = 0
|
435 |
-
cur_event_idx = 0
|
436 |
-
cur_state_event_idx = 0
|
437 |
-
|
438 |
-
def fill_event_start_indices_to_cur_step():
|
439 |
-
while (
|
440 |
-
len(event_start_indices) < len(frame_times)
|
441 |
-
and frame_times[len(event_start_indices)] < cur_step / codec.steps_per_second
|
442 |
-
):
|
443 |
-
event_start_indices.append(cur_event_idx)
|
444 |
-
state_event_indices.append(cur_state_event_idx)
|
445 |
-
|
446 |
-
for event_step, event_value in zip(event_steps, event_values):
|
447 |
-
while event_step > cur_step:
|
448 |
-
events.append(codec.encode_event(Event(type="shift", value=1)))
|
449 |
-
cur_step += 1
|
450 |
-
fill_event_start_indices_to_cur_step()
|
451 |
-
cur_event_idx = len(events)
|
452 |
-
cur_state_event_idx = len(state_events)
|
453 |
-
if encoding_state_to_events_fn:
|
454 |
-
# Dump state to state events *before* processing the next event, because
|
455 |
-
# we want to capture the state prior to the occurrence of the event.
|
456 |
-
for e in encoding_state_to_events_fn(state):
|
457 |
-
state_events.append(codec.encode_event(e))
|
458 |
-
|
459 |
-
for e in encode_event_fn(state, event_value, codec):
|
460 |
-
events.append(codec.encode_event(e))
|
461 |
-
|
462 |
-
# After the last event, continue filling out the event_start_indices array.
|
463 |
-
# The inequality is not strict because if our current step lines up exactly
|
464 |
-
# with (the start of) an audio frame, we need to add an additional shift event
|
465 |
-
# to "cover" that frame.
|
466 |
-
while cur_step / codec.steps_per_second <= frame_times[-1]:
|
467 |
-
events.append(codec.encode_event(Event(type="shift", value=1)))
|
468 |
-
cur_step += 1
|
469 |
-
fill_event_start_indices_to_cur_step()
|
470 |
-
cur_event_idx = len(events)
|
471 |
-
|
472 |
-
# Now fill in event_end_indices. We need this extra array to make sure that
|
473 |
-
# when we slice events, each slice ends exactly where the subsequent slice
|
474 |
-
# begins.
|
475 |
-
event_end_indices = event_start_indices[1:] + [len(events)]
|
476 |
-
|
477 |
-
events = np.array(events).astype(np.int32)
|
478 |
-
state_events = np.array(state_events).astype(np.int32)
|
479 |
-
event_start_indices = segment(np.array(event_start_indices).astype(np.int32), TARGET_FEATURE_LENGTH)
|
480 |
-
event_end_indices = segment(np.array(event_end_indices).astype(np.int32), TARGET_FEATURE_LENGTH)
|
481 |
-
state_event_indices = segment(np.array(state_event_indices).astype(np.int32), TARGET_FEATURE_LENGTH)
|
482 |
-
|
483 |
-
outputs = []
|
484 |
-
for start_indices, end_indices, event_indices in zip(event_start_indices, event_end_indices, state_event_indices):
|
485 |
-
outputs.append(
|
486 |
-
{
|
487 |
-
"inputs": events,
|
488 |
-
"event_start_indices": start_indices,
|
489 |
-
"event_end_indices": end_indices,
|
490 |
-
"state_events": state_events,
|
491 |
-
"state_event_indices": event_indices,
|
492 |
-
}
|
493 |
-
)
|
494 |
-
|
495 |
-
return outputs
|
496 |
-
|
497 |
-
|
498 |
-
def extract_sequence_with_indices(features, state_events_end_token=None, feature_key="inputs"):
|
499 |
-
"""Extract target sequence corresponding to audio token segment."""
|
500 |
-
features = features.copy()
|
501 |
-
start_idx = features["event_start_indices"][0]
|
502 |
-
end_idx = features["event_end_indices"][-1]
|
503 |
-
|
504 |
-
features[feature_key] = features[feature_key][start_idx:end_idx]
|
505 |
-
|
506 |
-
if state_events_end_token is not None:
|
507 |
-
# Extract the state events corresponding to the audio start token, and
|
508 |
-
# prepend them to the targets array.
|
509 |
-
state_event_start_idx = features["state_event_indices"][0]
|
510 |
-
state_event_end_idx = state_event_start_idx + 1
|
511 |
-
while features["state_events"][state_event_end_idx - 1] != state_events_end_token:
|
512 |
-
state_event_end_idx += 1
|
513 |
-
features[feature_key] = np.concatenate(
|
514 |
-
[
|
515 |
-
features["state_events"][state_event_start_idx:state_event_end_idx],
|
516 |
-
features[feature_key],
|
517 |
-
],
|
518 |
-
axis=0,
|
519 |
-
)
|
520 |
-
|
521 |
-
return features
|
522 |
-
|
523 |
-
|
524 |
-
def map_midi_programs(
|
525 |
-
feature, codec: Codec, granularity_type: str = "full", feature_key: str = "inputs"
|
526 |
-
) -> Mapping[str, Any]:
|
527 |
-
"""Apply MIDI program map to token sequences."""
|
528 |
-
granularity = PROGRAM_GRANULARITIES[granularity_type]
|
529 |
-
|
530 |
-
feature[feature_key] = granularity.tokens_map_fn(feature[feature_key], codec)
|
531 |
-
return feature
|
532 |
-
|
533 |
-
|
534 |
-
def run_length_encode_shifts_fn(
|
535 |
-
features,
|
536 |
-
codec: Codec,
|
537 |
-
feature_key: str = "inputs",
|
538 |
-
state_change_event_types: Sequence[str] = (),
|
539 |
-
) -> Callable[[Mapping[str, Any]], Mapping[str, Any]]:
|
540 |
-
"""Return a function that run-length encodes shifts for a given codec.
|
541 |
-
|
542 |
-
Args:
|
543 |
-
codec: The Codec to use for shift events.
|
544 |
-
feature_key: The feature key for which to run-length encode shifts.
|
545 |
-
state_change_event_types: A list of event types that represent state
|
546 |
-
changes; tokens corresponding to these event types will be interpreted as state changes and redundant ones
|
547 |
-
will be removed.
|
548 |
-
|
549 |
-
Returns:
|
550 |
-
A preprocessing function that run-length encodes single-step shifts.
|
551 |
-
"""
|
552 |
-
state_change_event_ranges = [codec.event_type_range(event_type) for event_type in state_change_event_types]
|
553 |
-
|
554 |
-
def run_length_encode_shifts(features: MutableMapping[str, Any]) -> Mapping[str, Any]:
|
555 |
-
"""Combine leading/interior shifts, trim trailing shifts.
|
556 |
-
|
557 |
-
Args:
|
558 |
-
features: Dict of features to process.
|
559 |
-
|
560 |
-
Returns:
|
561 |
-
A dict of features.
|
562 |
-
"""
|
563 |
-
events = features[feature_key]
|
564 |
-
|
565 |
-
shift_steps = 0
|
566 |
-
total_shift_steps = 0
|
567 |
-
output = np.array([], dtype=np.int32)
|
568 |
-
|
569 |
-
current_state = np.zeros(len(state_change_event_ranges), dtype=np.int32)
|
570 |
-
|
571 |
-
for event in events:
|
572 |
-
if codec.is_shift_event_index(event):
|
573 |
-
shift_steps += 1
|
574 |
-
total_shift_steps += 1
|
575 |
-
|
576 |
-
else:
|
577 |
-
# If this event is a state change and has the same value as the current
|
578 |
-
# state, we can skip it entirely.
|
579 |
-
is_redundant = False
|
580 |
-
for i, (min_index, max_index) in enumerate(state_change_event_ranges):
|
581 |
-
if (min_index <= event) and (event <= max_index):
|
582 |
-
if current_state[i] == event:
|
583 |
-
is_redundant = True
|
584 |
-
current_state[i] = event
|
585 |
-
if is_redundant:
|
586 |
-
continue
|
587 |
-
|
588 |
-
# Once we've reached a non-shift event, RLE all previous shift events
|
589 |
-
# before outputting the non-shift event.
|
590 |
-
if shift_steps > 0:
|
591 |
-
shift_steps = total_shift_steps
|
592 |
-
while shift_steps > 0:
|
593 |
-
output_steps = np.minimum(codec.max_shift_steps, shift_steps)
|
594 |
-
output = np.concatenate([output, [output_steps]], axis=0)
|
595 |
-
shift_steps -= output_steps
|
596 |
-
output = np.concatenate([output, [event]], axis=0)
|
597 |
-
|
598 |
-
features[feature_key] = output
|
599 |
-
return features
|
600 |
-
|
601 |
-
return run_length_encode_shifts(features)
|
602 |
-
|
603 |
-
|
604 |
-
def note_representation_processor_chain(features, codec: Codec, note_representation_config: NoteRepresentationConfig):
|
605 |
-
tie_token = codec.encode_event(Event("tie", 0))
|
606 |
-
state_events_end_token = tie_token if note_representation_config.include_ties else None
|
607 |
-
|
608 |
-
features = extract_sequence_with_indices(
|
609 |
-
features, state_events_end_token=state_events_end_token, feature_key="inputs"
|
610 |
-
)
|
611 |
-
|
612 |
-
features = map_midi_programs(features, codec)
|
613 |
-
|
614 |
-
features = run_length_encode_shifts_fn(features, codec, state_change_event_types=["velocity", "program"])
|
615 |
-
|
616 |
-
return features
|
617 |
-
|
618 |
-
|
619 |
-
class MidiProcessor:
|
620 |
-
def __init__(self):
|
621 |
-
self.codec = Codec(
|
622 |
-
max_shift_steps=DEFAULT_MAX_SHIFT_SECONDS * DEFAULT_STEPS_PER_SECOND,
|
623 |
-
steps_per_second=DEFAULT_STEPS_PER_SECOND,
|
624 |
-
event_ranges=[
|
625 |
-
EventRange("pitch", note_seq.MIN_MIDI_PITCH, note_seq.MAX_MIDI_PITCH),
|
626 |
-
EventRange("velocity", 0, DEFAULT_NUM_VELOCITY_BINS),
|
627 |
-
EventRange("tie", 0, 0),
|
628 |
-
EventRange("program", note_seq.MIN_MIDI_PROGRAM, note_seq.MAX_MIDI_PROGRAM),
|
629 |
-
EventRange("drum", note_seq.MIN_MIDI_PITCH, note_seq.MAX_MIDI_PITCH),
|
630 |
-
],
|
631 |
-
)
|
632 |
-
self.tokenizer = Tokenizer(self.codec.num_classes)
|
633 |
-
self.note_representation_config = NoteRepresentationConfig(onsets_only=False, include_ties=True)
|
634 |
-
|
635 |
-
def __call__(self, midi: Union[bytes, os.PathLike, str]):
|
636 |
-
if not isinstance(midi, bytes):
|
637 |
-
with open(midi, "rb") as f:
|
638 |
-
midi = f.read()
|
639 |
-
|
640 |
-
ns = note_seq.midi_to_note_sequence(midi)
|
641 |
-
ns_sus = note_seq.apply_sustain_control_changes(ns)
|
642 |
-
|
643 |
-
for note in ns_sus.notes:
|
644 |
-
if not note.is_drum:
|
645 |
-
note.program = program_to_slakh_program(note.program)
|
646 |
-
|
647 |
-
samples = np.zeros(int(ns_sus.total_time * SAMPLE_RATE))
|
648 |
-
|
649 |
-
_, frame_times = audio_to_frames(samples, HOP_SIZE, FRAME_RATE)
|
650 |
-
times, values = note_sequence_to_onsets_and_offsets_and_programs(ns_sus)
|
651 |
-
|
652 |
-
events = encode_and_index_events(
|
653 |
-
state=NoteEncodingState(),
|
654 |
-
event_times=times,
|
655 |
-
event_values=values,
|
656 |
-
frame_times=frame_times,
|
657 |
-
codec=self.codec,
|
658 |
-
encode_event_fn=note_event_data_to_events,
|
659 |
-
encoding_state_to_events_fn=note_encoding_state_to_events,
|
660 |
-
)
|
661 |
-
|
662 |
-
events = [
|
663 |
-
note_representation_processor_chain(event, self.codec, self.note_representation_config) for event in events
|
664 |
-
]
|
665 |
-
input_tokens = [self.tokenizer.encode(event["inputs"]) for event in events]
|
666 |
-
|
667 |
-
return input_tokens
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/pil_utils.py
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
import PIL.Image
|
2 |
-
import PIL.ImageOps
|
3 |
-
from packaging import version
|
4 |
-
from PIL import Image
|
5 |
-
|
6 |
-
|
7 |
-
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
|
8 |
-
PIL_INTERPOLATION = {
|
9 |
-
"linear": PIL.Image.Resampling.BILINEAR,
|
10 |
-
"bilinear": PIL.Image.Resampling.BILINEAR,
|
11 |
-
"bicubic": PIL.Image.Resampling.BICUBIC,
|
12 |
-
"lanczos": PIL.Image.Resampling.LANCZOS,
|
13 |
-
"nearest": PIL.Image.Resampling.NEAREST,
|
14 |
-
}
|
15 |
-
else:
|
16 |
-
PIL_INTERPOLATION = {
|
17 |
-
"linear": PIL.Image.LINEAR,
|
18 |
-
"bilinear": PIL.Image.BILINEAR,
|
19 |
-
"bicubic": PIL.Image.BICUBIC,
|
20 |
-
"lanczos": PIL.Image.LANCZOS,
|
21 |
-
"nearest": PIL.Image.NEAREST,
|
22 |
-
}
|
23 |
-
|
24 |
-
|
25 |
-
def pt_to_pil(images):
|
26 |
-
"""
|
27 |
-
Convert a torch image to a PIL image.
|
28 |
-
"""
|
29 |
-
images = (images / 2 + 0.5).clamp(0, 1)
|
30 |
-
images = images.cpu().permute(0, 2, 3, 1).float().numpy()
|
31 |
-
images = numpy_to_pil(images)
|
32 |
-
return images
|
33 |
-
|
34 |
-
|
35 |
-
def numpy_to_pil(images):
|
36 |
-
"""
|
37 |
-
Convert a numpy image or a batch of images to a PIL image.
|
38 |
-
"""
|
39 |
-
if images.ndim == 3:
|
40 |
-
images = images[None, ...]
|
41 |
-
images = (images * 255).round().astype("uint8")
|
42 |
-
if images.shape[-1] == 1:
|
43 |
-
# special case for grayscale (single channel) images
|
44 |
-
pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images]
|
45 |
-
else:
|
46 |
-
pil_images = [Image.fromarray(image) for image in images]
|
47 |
-
|
48 |
-
return pil_images
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/_base_/models/mask_rcnn_swin_fpn.py
DELETED
@@ -1,127 +0,0 @@
|
|
1 |
-
# model settings
|
2 |
-
model = dict(
|
3 |
-
type='MaskRCNN',
|
4 |
-
pretrained=None,
|
5 |
-
backbone=dict(
|
6 |
-
type='SwinTransformer',
|
7 |
-
embed_dim=96,
|
8 |
-
depths=[2, 2, 6, 2],
|
9 |
-
num_heads=[3, 6, 12, 24],
|
10 |
-
window_size=7,
|
11 |
-
mlp_ratio=4.,
|
12 |
-
qkv_bias=True,
|
13 |
-
qk_scale=None,
|
14 |
-
drop_rate=0.,
|
15 |
-
attn_drop_rate=0.,
|
16 |
-
drop_path_rate=0.2,
|
17 |
-
ape=False,
|
18 |
-
patch_norm=True,
|
19 |
-
out_indices=(0, 1, 2, 3),
|
20 |
-
use_checkpoint=False),
|
21 |
-
neck=dict(
|
22 |
-
type='FPN',
|
23 |
-
in_channels=[96, 192, 384, 768],
|
24 |
-
out_channels=256,
|
25 |
-
num_outs=5),
|
26 |
-
rpn_head=dict(
|
27 |
-
type='RPNHead',
|
28 |
-
in_channels=256,
|
29 |
-
feat_channels=256,
|
30 |
-
anchor_generator=dict(
|
31 |
-
type='AnchorGenerator',
|
32 |
-
scales=[8],
|
33 |
-
ratios=[0.5, 1.0, 2.0],
|
34 |
-
strides=[4, 8, 16, 32, 64]),
|
35 |
-
bbox_coder=dict(
|
36 |
-
type='DeltaXYWHBBoxCoder',
|
37 |
-
target_means=[.0, .0, .0, .0],
|
38 |
-
target_stds=[1.0, 1.0, 1.0, 1.0]),
|
39 |
-
loss_cls=dict(
|
40 |
-
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
|
41 |
-
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
|
42 |
-
roi_head=dict(
|
43 |
-
type='StandardRoIHead',
|
44 |
-
bbox_roi_extractor=dict(
|
45 |
-
type='SingleRoIExtractor',
|
46 |
-
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
|
47 |
-
out_channels=256,
|
48 |
-
featmap_strides=[4, 8, 16, 32]),
|
49 |
-
bbox_head=dict(
|
50 |
-
type='Shared2FCBBoxHead',
|
51 |
-
in_channels=256,
|
52 |
-
fc_out_channels=1024,
|
53 |
-
roi_feat_size=7,
|
54 |
-
num_classes=80,
|
55 |
-
bbox_coder=dict(
|
56 |
-
type='DeltaXYWHBBoxCoder',
|
57 |
-
target_means=[0., 0., 0., 0.],
|
58 |
-
target_stds=[0.1, 0.1, 0.2, 0.2]),
|
59 |
-
reg_class_agnostic=False,
|
60 |
-
loss_cls=dict(
|
61 |
-
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
|
62 |
-
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
|
63 |
-
mask_roi_extractor=dict(
|
64 |
-
type='SingleRoIExtractor',
|
65 |
-
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
|
66 |
-
out_channels=256,
|
67 |
-
featmap_strides=[4, 8, 16, 32]),
|
68 |
-
mask_head=dict(
|
69 |
-
type='FCNMaskHead',
|
70 |
-
num_convs=4,
|
71 |
-
in_channels=256,
|
72 |
-
conv_out_channels=256,
|
73 |
-
num_classes=80,
|
74 |
-
loss_mask=dict(
|
75 |
-
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
|
76 |
-
# model training and testing settings
|
77 |
-
train_cfg=dict(
|
78 |
-
rpn=dict(
|
79 |
-
assigner=dict(
|
80 |
-
type='MaxIoUAssigner',
|
81 |
-
pos_iou_thr=0.7,
|
82 |
-
neg_iou_thr=0.3,
|
83 |
-
min_pos_iou=0.3,
|
84 |
-
match_low_quality=True,
|
85 |
-
ignore_iof_thr=-1),
|
86 |
-
sampler=dict(
|
87 |
-
type='RandomSampler',
|
88 |
-
num=256,
|
89 |
-
pos_fraction=0.5,
|
90 |
-
neg_pos_ub=-1,
|
91 |
-
add_gt_as_proposals=False),
|
92 |
-
allowed_border=-1,
|
93 |
-
pos_weight=-1,
|
94 |
-
debug=False),
|
95 |
-
rpn_proposal=dict(
|
96 |
-
nms_pre=2000,
|
97 |
-
max_per_img=1000,
|
98 |
-
nms=dict(type='nms', iou_threshold=0.7),
|
99 |
-
min_bbox_size=0),
|
100 |
-
rcnn=dict(
|
101 |
-
assigner=dict(
|
102 |
-
type='MaxIoUAssigner',
|
103 |
-
pos_iou_thr=0.5,
|
104 |
-
neg_iou_thr=0.5,
|
105 |
-
min_pos_iou=0.5,
|
106 |
-
match_low_quality=True,
|
107 |
-
ignore_iof_thr=-1),
|
108 |
-
sampler=dict(
|
109 |
-
type='RandomSampler',
|
110 |
-
num=512,
|
111 |
-
pos_fraction=0.25,
|
112 |
-
neg_pos_ub=-1,
|
113 |
-
add_gt_as_proposals=True),
|
114 |
-
mask_size=28,
|
115 |
-
pos_weight=-1,
|
116 |
-
debug=False)),
|
117 |
-
test_cfg=dict(
|
118 |
-
rpn=dict(
|
119 |
-
nms_pre=1000,
|
120 |
-
max_per_img=1000,
|
121 |
-
nms=dict(type='nms', iou_threshold=0.7),
|
122 |
-
min_bbox_size=0),
|
123 |
-
rcnn=dict(
|
124 |
-
score_thr=0.05,
|
125 |
-
nms=dict(type='nms', iou_threshold=0.5),
|
126 |
-
max_per_img=100,
|
127 |
-
mask_thr_binary=0.5)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/pisa/README.md
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
# Prime Sample Attention in Object Detection
|
2 |
-
|
3 |
-
## Introduction
|
4 |
-
|
5 |
-
[ALGORITHM]
|
6 |
-
|
7 |
-
```latex
|
8 |
-
@inproceedings{cao2019prime,
|
9 |
-
title={Prime sample attention in object detection},
|
10 |
-
author={Cao, Yuhang and Chen, Kai and Loy, Chen Change and Lin, Dahua},
|
11 |
-
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
|
12 |
-
year={2020}
|
13 |
-
}
|
14 |
-
```
|
15 |
-
|
16 |
-
## Results and models
|
17 |
-
|
18 |
-
| PISA | Network | Backbone | Lr schd | box AP | mask AP | Config | Download |
|
19 |
-
|:----:|:-------:|:-------------------:|:-------:|:------:|:-------:|:------:|:--------:|
|
20 |
-
| × | Faster R-CNN | R-50-FPN | 1x | 36.4 | | - |
|
21 |
-
| √ | Faster R-CNN | R-50-FPN | 1x | 38.4 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_r50_fpn_1x_coco/pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_r50_fpn_1x_coco/pisa_faster_rcnn_r50_fpn_1x_coco_20200506_185619.log.json) |
|
22 |
-
| × | Faster R-CNN | X101-32x4d-FPN | 1x | 40.1 | | - |
|
23 |
-
| √ | Faster R-CNN | X101-32x4d-FPN | 1x | 41.9 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco-e4accec4.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco_20200505_181503.log.json) |
|
24 |
-
| × | Mask R-CNN | R-50-FPN | 1x | 37.3 | 34.2 | - |
|
25 |
-
| √ | Mask R-CNN | R-50-FPN | 1x | 39.1 | 35.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_mask_rcnn_r50_fpn_1x_coco/pisa_mask_rcnn_r50_fpn_1x_coco-dfcedba6.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_mask_rcnn_r50_fpn_1x_coco/pisa_mask_rcnn_r50_fpn_1x_coco_20200508_150500.log.json) |
|
26 |
-
| × | Mask R-CNN | X101-32x4d-FPN | 1x | 41.1 | 37.1 | - |
|
27 |
-
| √ | Mask R-CNN | X101-32x4d-FPN | 1x | | | |
|
28 |
-
| × | RetinaNet | R-50-FPN | 1x | 35.6 | | - |
|
29 |
-
| √ | RetinaNet | R-50-FPN | 1x | 36.9 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_retinanet_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_r50_fpn_1x_coco/pisa_retinanet_r50_fpn_1x_coco-76409952.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_r50_fpn_1x_coco/pisa_retinanet_r50_fpn_1x_coco_20200504_014311.log.json) |
|
30 |
-
| × | RetinaNet | X101-32x4d-FPN | 1x | 39.0 | | - |
|
31 |
-
| √ | RetinaNet | X101-32x4d-FPN | 1x | 40.7 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco/pisa_retinanet_x101_32x4d_fpn_1x_coco-a0c13c73.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco/pisa_retinanet_x101_32x4d_fpn_1x_coco_20200505_001404.log.json) |
|
32 |
-
| × | SSD300 | VGG16 | 1x | 25.6 | | - |
|
33 |
-
| √ | SSD300 | VGG16 | 1x | 27.6 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_ssd300_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd300_coco/pisa_ssd300_coco-710e3ac9.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd300_coco/pisa_ssd300_coco_20200504_144325.log.json) |
|
34 |
-
| × | SSD300 | VGG16 | 1x | 29.3 | | - |
|
35 |
-
| √ | SSD300 | VGG16 | 1x | 31.8 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_ssd512_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd512_coco/pisa_ssd512_coco-247addee.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd512_coco/pisa_ssd512_coco_20200508_131030.log.json) |
|
36 |
-
|
37 |
-
**Notes:**
|
38 |
-
|
39 |
-
- In the original paper, all models are trained and tested on mmdet v1.x, thus results may not be exactly the same with this release on v2.0.
|
40 |
-
- It is noted PISA only modifies the training pipeline so the inference time remains the same with the baseline.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r50-d8_512x512_80k_ade20k.py
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/psanet_r50-d8.py', '../_base_/datasets/ade20k.py',
|
3 |
-
'../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
|
4 |
-
]
|
5 |
-
model = dict(
|
6 |
-
decode_head=dict(mask_size=(66, 66), num_classes=150),
|
7 |
-
auxiliary_head=dict(num_classes=150))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/exp/upernet_global_small/test_config_w32.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../../configs/_base_/models/upernet_uniformer.py',
|
3 |
-
'../../configs/_base_/datasets/ade20k.py',
|
4 |
-
'../../configs/_base_/default_runtime.py',
|
5 |
-
'../../configs/_base_/schedules/schedule_160k.py'
|
6 |
-
]
|
7 |
-
model = dict(
|
8 |
-
backbone=dict(
|
9 |
-
type='UniFormer',
|
10 |
-
embed_dim=[64, 128, 320, 512],
|
11 |
-
layers=[3, 4, 8, 3],
|
12 |
-
head_dim=64,
|
13 |
-
drop_path_rate=0.25,
|
14 |
-
windows=True,
|
15 |
-
hybrid=False,
|
16 |
-
window_size=32
|
17 |
-
),
|
18 |
-
decode_head=dict(
|
19 |
-
in_channels=[64, 128, 320, 512],
|
20 |
-
num_classes=150
|
21 |
-
),
|
22 |
-
auxiliary_head=dict(
|
23 |
-
in_channels=320,
|
24 |
-
num_classes=150
|
25 |
-
))
|
26 |
-
|
27 |
-
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
|
28 |
-
optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
|
29 |
-
paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
|
30 |
-
'relative_position_bias_table': dict(decay_mult=0.),
|
31 |
-
'norm': dict(decay_mult=0.)}))
|
32 |
-
|
33 |
-
lr_config = dict(_delete_=True, policy='poly',
|
34 |
-
warmup='linear',
|
35 |
-
warmup_iters=1500,
|
36 |
-
warmup_ratio=1e-6,
|
37 |
-
power=1.0, min_lr=0.0, by_epoch=False)
|
38 |
-
|
39 |
-
data=dict(samples_per_gpu=2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Apex-X/ROOPOK/roop/processors/frame/__init__.py
DELETED
File without changes
|
spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/deoldify/loss.py
DELETED
@@ -1,136 +0,0 @@
|
|
1 |
-
from fastai import *
|
2 |
-
from fastai.core import *
|
3 |
-
from fastai.torch_core import *
|
4 |
-
from fastai.callbacks import hook_outputs
|
5 |
-
import torchvision.models as models
|
6 |
-
|
7 |
-
|
8 |
-
class FeatureLoss(nn.Module):
|
9 |
-
def __init__(self, layer_wgts=[20, 70, 10]):
|
10 |
-
super().__init__()
|
11 |
-
|
12 |
-
self.m_feat = models.vgg16_bn(True).features.cuda().eval()
|
13 |
-
requires_grad(self.m_feat, False)
|
14 |
-
blocks = [
|
15 |
-
i - 1
|
16 |
-
for i, o in enumerate(children(self.m_feat))
|
17 |
-
if isinstance(o, nn.MaxPool2d)
|
18 |
-
]
|
19 |
-
layer_ids = blocks[2:5]
|
20 |
-
self.loss_features = [self.m_feat[i] for i in layer_ids]
|
21 |
-
self.hooks = hook_outputs(self.loss_features, detach=False)
|
22 |
-
self.wgts = layer_wgts
|
23 |
-
self.metric_names = ['pixel'] + [f'feat_{i}' for i in range(len(layer_ids))]
|
24 |
-
self.base_loss = F.l1_loss
|
25 |
-
|
26 |
-
def _make_features(self, x, clone=False):
|
27 |
-
self.m_feat(x)
|
28 |
-
return [(o.clone() if clone else o) for o in self.hooks.stored]
|
29 |
-
|
30 |
-
def forward(self, input, target):
|
31 |
-
out_feat = self._make_features(target, clone=True)
|
32 |
-
in_feat = self._make_features(input)
|
33 |
-
self.feat_losses = [self.base_loss(input, target)]
|
34 |
-
self.feat_losses += [
|
35 |
-
self.base_loss(f_in, f_out) * w
|
36 |
-
for f_in, f_out, w in zip(in_feat, out_feat, self.wgts)
|
37 |
-
]
|
38 |
-
|
39 |
-
self.metrics = dict(zip(self.metric_names, self.feat_losses))
|
40 |
-
return sum(self.feat_losses)
|
41 |
-
|
42 |
-
def __del__(self):
|
43 |
-
self.hooks.remove()
|
44 |
-
|
45 |
-
|
46 |
-
# Refactored code, originally from https://github.com/VinceMarron/style_transfer
|
47 |
-
class WassFeatureLoss(nn.Module):
|
48 |
-
def __init__(self, layer_wgts=[5, 15, 2], wass_wgts=[3.0, 0.7, 0.01]):
|
49 |
-
super().__init__()
|
50 |
-
self.m_feat = models.vgg16_bn(True).features.cuda().eval()
|
51 |
-
requires_grad(self.m_feat, False)
|
52 |
-
blocks = [
|
53 |
-
i - 1
|
54 |
-
for i, o in enumerate(children(self.m_feat))
|
55 |
-
if isinstance(o, nn.MaxPool2d)
|
56 |
-
]
|
57 |
-
layer_ids = blocks[2:5]
|
58 |
-
self.loss_features = [self.m_feat[i] for i in layer_ids]
|
59 |
-
self.hooks = hook_outputs(self.loss_features, detach=False)
|
60 |
-
self.wgts = layer_wgts
|
61 |
-
self.wass_wgts = wass_wgts
|
62 |
-
self.metric_names = (
|
63 |
-
['pixel']
|
64 |
-
+ [f'feat_{i}' for i in range(len(layer_ids))]
|
65 |
-
+ [f'wass_{i}' for i in range(len(layer_ids))]
|
66 |
-
)
|
67 |
-
self.base_loss = F.l1_loss
|
68 |
-
|
69 |
-
def _make_features(self, x, clone=False):
|
70 |
-
self.m_feat(x)
|
71 |
-
return [(o.clone() if clone else o) for o in self.hooks.stored]
|
72 |
-
|
73 |
-
def _calc_2_moments(self, tensor):
|
74 |
-
chans = tensor.shape[1]
|
75 |
-
tensor = tensor.view(1, chans, -1)
|
76 |
-
n = tensor.shape[2]
|
77 |
-
mu = tensor.mean(2)
|
78 |
-
tensor = (tensor - mu[:, :, None]).squeeze(0)
|
79 |
-
# Prevents nasty bug that happens very occassionally- divide by zero. Why such things happen?
|
80 |
-
if n == 0:
|
81 |
-
return None, None
|
82 |
-
cov = torch.mm(tensor, tensor.t()) / float(n)
|
83 |
-
return mu, cov
|
84 |
-
|
85 |
-
def _get_style_vals(self, tensor):
|
86 |
-
mean, cov = self._calc_2_moments(tensor)
|
87 |
-
if mean is None:
|
88 |
-
return None, None, None
|
89 |
-
eigvals, eigvects = torch.symeig(cov, eigenvectors=True)
|
90 |
-
eigroot_mat = torch.diag(torch.sqrt(eigvals.clamp(min=0)))
|
91 |
-
root_cov = torch.mm(torch.mm(eigvects, eigroot_mat), eigvects.t())
|
92 |
-
tr_cov = eigvals.clamp(min=0).sum()
|
93 |
-
return mean, tr_cov, root_cov
|
94 |
-
|
95 |
-
def _calc_l2wass_dist(
|
96 |
-
self, mean_stl, tr_cov_stl, root_cov_stl, mean_synth, cov_synth
|
97 |
-
):
|
98 |
-
tr_cov_synth = torch.symeig(cov_synth, eigenvectors=True)[0].clamp(min=0).sum()
|
99 |
-
mean_diff_squared = (mean_stl - mean_synth).pow(2).sum()
|
100 |
-
cov_prod = torch.mm(torch.mm(root_cov_stl, cov_synth), root_cov_stl)
|
101 |
-
var_overlap = torch.sqrt(
|
102 |
-
torch.symeig(cov_prod, eigenvectors=True)[0].clamp(min=0) + 1e-8
|
103 |
-
).sum()
|
104 |
-
dist = mean_diff_squared + tr_cov_stl + tr_cov_synth - 2 * var_overlap
|
105 |
-
return dist
|
106 |
-
|
107 |
-
def _single_wass_loss(self, pred, targ):
|
108 |
-
mean_test, tr_cov_test, root_cov_test = targ
|
109 |
-
mean_synth, cov_synth = self._calc_2_moments(pred)
|
110 |
-
loss = self._calc_l2wass_dist(
|
111 |
-
mean_test, tr_cov_test, root_cov_test, mean_synth, cov_synth
|
112 |
-
)
|
113 |
-
return loss
|
114 |
-
|
115 |
-
def forward(self, input, target):
|
116 |
-
out_feat = self._make_features(target, clone=True)
|
117 |
-
in_feat = self._make_features(input)
|
118 |
-
self.feat_losses = [self.base_loss(input, target)]
|
119 |
-
self.feat_losses += [
|
120 |
-
self.base_loss(f_in, f_out) * w
|
121 |
-
for f_in, f_out, w in zip(in_feat, out_feat, self.wgts)
|
122 |
-
]
|
123 |
-
|
124 |
-
styles = [self._get_style_vals(i) for i in out_feat]
|
125 |
-
|
126 |
-
if styles[0][0] is not None:
|
127 |
-
self.feat_losses += [
|
128 |
-
self._single_wass_loss(f_pred, f_targ) * w
|
129 |
-
for f_pred, f_targ, w in zip(in_feat, styles, self.wass_wgts)
|
130 |
-
]
|
131 |
-
|
132 |
-
self.metrics = dict(zip(self.metric_names, self.feat_losses))
|
133 |
-
return sum(self.feat_losses)
|
134 |
-
|
135 |
-
def __del__(self):
|
136 |
-
self.hooks.remove()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AriaMei/TTSdemo/train_ms.py
DELETED
@@ -1,296 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
-
import argparse
|
4 |
-
import itertools
|
5 |
-
import math
|
6 |
-
import torch
|
7 |
-
from torch import nn, optim
|
8 |
-
from torch.nn import functional as F
|
9 |
-
from torch.utils.data import DataLoader
|
10 |
-
from torch.utils.tensorboard import SummaryWriter
|
11 |
-
import torch.multiprocessing as mp
|
12 |
-
import torch.distributed as dist
|
13 |
-
from torch.nn.parallel import DistributedDataParallel as DDP
|
14 |
-
from torch.cuda.amp import autocast, GradScaler
|
15 |
-
|
16 |
-
import commons
|
17 |
-
import utils
|
18 |
-
from data_utils import (
|
19 |
-
TextAudioSpeakerLoader,
|
20 |
-
TextAudioSpeakerCollate,
|
21 |
-
DistributedBucketSampler
|
22 |
-
)
|
23 |
-
from models import (
|
24 |
-
SynthesizerTrn,
|
25 |
-
MultiPeriodDiscriminator,
|
26 |
-
)
|
27 |
-
from losses import (
|
28 |
-
generator_loss,
|
29 |
-
discriminator_loss,
|
30 |
-
feature_loss,
|
31 |
-
kl_loss
|
32 |
-
)
|
33 |
-
from mel_processing import mel_spectrogram_torch, spec_to_mel_torch
|
34 |
-
from text.symbols import symbols
|
35 |
-
|
36 |
-
|
37 |
-
torch.backends.cudnn.benchmark = True
|
38 |
-
global_step = 0
|
39 |
-
|
40 |
-
|
41 |
-
def main():
|
42 |
-
"""Assume Single Node Multi GPUs Training Only"""
|
43 |
-
assert torch.cuda.is_available(), "CPU training is not allowed."
|
44 |
-
|
45 |
-
n_gpus = torch.cuda.device_count()
|
46 |
-
os.environ['MASTER_ADDR'] = 'localhost'
|
47 |
-
os.environ['MASTER_PORT'] = '8899'
|
48 |
-
|
49 |
-
hps = utils.get_hparams()
|
50 |
-
mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
|
51 |
-
|
52 |
-
|
53 |
-
def run(rank, n_gpus, hps):
|
54 |
-
global global_step
|
55 |
-
if rank == 0:
|
56 |
-
logger = utils.get_logger(hps.model_dir)
|
57 |
-
print(hps) or logger.info(hps)
|
58 |
-
utils.check_git_hash(hps.model_dir)
|
59 |
-
writer = SummaryWriter(log_dir=hps.model_dir)
|
60 |
-
writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
|
61 |
-
|
62 |
-
dist.init_process_group(backend='nccl', init_method='env://', world_size=n_gpus, rank=rank)
|
63 |
-
torch.manual_seed(hps.train.seed)
|
64 |
-
torch.cuda.set_device(rank)
|
65 |
-
|
66 |
-
train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data)
|
67 |
-
train_sampler = DistributedBucketSampler(
|
68 |
-
train_dataset,
|
69 |
-
hps.train.batch_size,
|
70 |
-
[32,300,400,500,600,700,800,900,1000],
|
71 |
-
num_replicas=n_gpus,
|
72 |
-
rank=rank,
|
73 |
-
shuffle=True)
|
74 |
-
collate_fn = TextAudioSpeakerCollate()
|
75 |
-
train_loader = DataLoader(train_dataset, num_workers=8, shuffle=False, pin_memory=True,
|
76 |
-
collate_fn=collate_fn, batch_sampler=train_sampler)
|
77 |
-
if rank == 0:
|
78 |
-
eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data)
|
79 |
-
eval_loader = DataLoader(eval_dataset, num_workers=8, shuffle=False,
|
80 |
-
batch_size=hps.train.batch_size, pin_memory=True,
|
81 |
-
drop_last=False, collate_fn=collate_fn)
|
82 |
-
|
83 |
-
net_g = SynthesizerTrn(
|
84 |
-
len(symbols),
|
85 |
-
hps.data.filter_length // 2 + 1,
|
86 |
-
hps.train.segment_size // hps.data.hop_length,
|
87 |
-
n_speakers=hps.data.n_speakers,
|
88 |
-
**hps.model).cuda(rank)
|
89 |
-
net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
|
90 |
-
optim_g = torch.optim.AdamW(
|
91 |
-
net_g.parameters(),
|
92 |
-
hps.train.learning_rate,
|
93 |
-
betas=hps.train.betas,
|
94 |
-
eps=hps.train.eps)
|
95 |
-
optim_d = torch.optim.AdamW(
|
96 |
-
net_d.parameters(),
|
97 |
-
hps.train.learning_rate,
|
98 |
-
betas=hps.train.betas,
|
99 |
-
eps=hps.train.eps)
|
100 |
-
net_g = DDP(net_g, device_ids=[rank])
|
101 |
-
net_d = DDP(net_d, device_ids=[rank])
|
102 |
-
|
103 |
-
try:
|
104 |
-
_, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g)
|
105 |
-
_, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d)
|
106 |
-
global_step = (epoch_str - 1) * len(train_loader)
|
107 |
-
except:
|
108 |
-
epoch_str = 1
|
109 |
-
global_step = 0
|
110 |
-
|
111 |
-
scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str-2)
|
112 |
-
scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str-2)
|
113 |
-
|
114 |
-
scaler = GradScaler(enabled=hps.train.fp16_run)
|
115 |
-
|
116 |
-
for epoch in range(epoch_str, hps.train.epochs + 1):
|
117 |
-
if rank==0:
|
118 |
-
train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, eval_loader], logger, [writer, writer_eval])
|
119 |
-
else:
|
120 |
-
train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, None], None, None)
|
121 |
-
scheduler_g.step()
|
122 |
-
scheduler_d.step()
|
123 |
-
|
124 |
-
|
125 |
-
def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers):
|
126 |
-
net_g, net_d = nets
|
127 |
-
optim_g, optim_d = optims
|
128 |
-
scheduler_g, scheduler_d = schedulers
|
129 |
-
train_loader, eval_loader = loaders
|
130 |
-
if writers is not None:
|
131 |
-
writer, writer_eval = writers
|
132 |
-
|
133 |
-
train_loader.batch_sampler.set_epoch(epoch)
|
134 |
-
global global_step
|
135 |
-
|
136 |
-
net_g.train()
|
137 |
-
net_d.train()
|
138 |
-
for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, emo) in enumerate(train_loader):
|
139 |
-
x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
|
140 |
-
spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True)
|
141 |
-
y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
|
142 |
-
speakers = speakers.cuda(rank, non_blocking=True)
|
143 |
-
emo = emo.cuda(rank, non_blocking=True)
|
144 |
-
|
145 |
-
with autocast(enabled=hps.train.fp16_run):
|
146 |
-
y_hat, l_length, attn, ids_slice, x_mask, z_mask,\
|
147 |
-
(z, z_p, m_p, logs_p, m_q, logs_q) = net_g(x, x_lengths, spec, spec_lengths, speakers, emo)
|
148 |
-
|
149 |
-
mel = spec_to_mel_torch(
|
150 |
-
spec,
|
151 |
-
hps.data.filter_length,
|
152 |
-
hps.data.n_mel_channels,
|
153 |
-
hps.data.sampling_rate,
|
154 |
-
hps.data.mel_fmin,
|
155 |
-
hps.data.mel_fmax)
|
156 |
-
y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length)
|
157 |
-
y_hat_mel = mel_spectrogram_torch(
|
158 |
-
y_hat.squeeze(1),
|
159 |
-
hps.data.filter_length,
|
160 |
-
hps.data.n_mel_channels,
|
161 |
-
hps.data.sampling_rate,
|
162 |
-
hps.data.hop_length,
|
163 |
-
hps.data.win_length,
|
164 |
-
hps.data.mel_fmin,
|
165 |
-
hps.data.mel_fmax
|
166 |
-
)
|
167 |
-
|
168 |
-
y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice
|
169 |
-
|
170 |
-
# Discriminator
|
171 |
-
y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach())
|
172 |
-
with autocast(enabled=False):
|
173 |
-
loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g)
|
174 |
-
loss_disc_all = loss_disc
|
175 |
-
optim_d.zero_grad()
|
176 |
-
scaler.scale(loss_disc_all).backward()
|
177 |
-
scaler.unscale_(optim_d)
|
178 |
-
grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None)
|
179 |
-
scaler.step(optim_d)
|
180 |
-
|
181 |
-
with autocast(enabled=hps.train.fp16_run):
|
182 |
-
# Generator
|
183 |
-
y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat)
|
184 |
-
with autocast(enabled=False):
|
185 |
-
loss_dur = torch.sum(l_length.float())
|
186 |
-
loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
|
187 |
-
loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
|
188 |
-
|
189 |
-
loss_fm = feature_loss(fmap_r, fmap_g)
|
190 |
-
loss_gen, losses_gen = generator_loss(y_d_hat_g)
|
191 |
-
loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl
|
192 |
-
optim_g.zero_grad()
|
193 |
-
scaler.scale(loss_gen_all.float()).backward()
|
194 |
-
scaler.unscale_(optim_g)
|
195 |
-
grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None)
|
196 |
-
scaler.step(optim_g)
|
197 |
-
scaler.update()
|
198 |
-
|
199 |
-
if rank==0:
|
200 |
-
if global_step % hps.train.log_interval == 0:
|
201 |
-
lr = optim_g.param_groups[0]['lr']
|
202 |
-
losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl]
|
203 |
-
logger.info('Train Epoch: {} [{:.0f}%]'.format(
|
204 |
-
epoch,
|
205 |
-
100. * batch_idx / len(train_loader)))
|
206 |
-
print([x.item() for x in losses] + [global_step, lr]) or logger.info([x.item() for x in losses] + [global_step, lr])
|
207 |
-
|
208 |
-
scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g}
|
209 |
-
scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl})
|
210 |
-
|
211 |
-
scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)})
|
212 |
-
scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)})
|
213 |
-
scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)})
|
214 |
-
image_dict = {
|
215 |
-
"slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()),
|
216 |
-
"slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()),
|
217 |
-
"all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()),
|
218 |
-
"all/attn": utils.plot_alignment_to_numpy(attn[0,0].data.cpu().numpy())
|
219 |
-
}
|
220 |
-
utils.summarize(
|
221 |
-
writer=writer,
|
222 |
-
global_step=global_step,
|
223 |
-
images=image_dict,
|
224 |
-
scalars=scalar_dict)
|
225 |
-
|
226 |
-
if global_step % hps.train.eval_interval == 0:
|
227 |
-
evaluate(hps, net_g, eval_loader, writer_eval)
|
228 |
-
utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "G_{}.pth".format(global_step)))
|
229 |
-
utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "D_{}.pth".format(global_step)))
|
230 |
-
global_step += 1
|
231 |
-
|
232 |
-
if rank == 0:
|
233 |
-
print('====> Epoch: {}'.format(epoch)) or logger.info('====> Epoch: {}'.format(epoch))
|
234 |
-
|
235 |
-
|
236 |
-
def evaluate(hps, generator, eval_loader, writer_eval):
|
237 |
-
generator.eval()
|
238 |
-
with torch.no_grad():
|
239 |
-
for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, emo) in enumerate(eval_loader):
|
240 |
-
x, x_lengths = x.cuda(0), x_lengths.cuda(0)
|
241 |
-
spec, spec_lengths = spec.cuda(0), spec_lengths.cuda(0)
|
242 |
-
y, y_lengths = y.cuda(0), y_lengths.cuda(0)
|
243 |
-
speakers = speakers.cuda(0)
|
244 |
-
emo = emo.cuda(0)
|
245 |
-
# remove else
|
246 |
-
x = x[:1]
|
247 |
-
x_lengths = x_lengths[:1]
|
248 |
-
spec = spec[:1]
|
249 |
-
spec_lengths = spec_lengths[:1]
|
250 |
-
y = y[:1]
|
251 |
-
y_lengths = y_lengths[:1]
|
252 |
-
speakers = speakers[:1]
|
253 |
-
emo = emo[:1]
|
254 |
-
break
|
255 |
-
y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, speakers,emo, max_len=1000)
|
256 |
-
y_hat_lengths = mask.sum([1,2]).long() * hps.data.hop_length
|
257 |
-
|
258 |
-
mel = spec_to_mel_torch(
|
259 |
-
spec,
|
260 |
-
hps.data.filter_length,
|
261 |
-
hps.data.n_mel_channels,
|
262 |
-
hps.data.sampling_rate,
|
263 |
-
hps.data.mel_fmin,
|
264 |
-
hps.data.mel_fmax)
|
265 |
-
y_hat_mel = mel_spectrogram_torch(
|
266 |
-
y_hat.squeeze(1).float(),
|
267 |
-
hps.data.filter_length,
|
268 |
-
hps.data.n_mel_channels,
|
269 |
-
hps.data.sampling_rate,
|
270 |
-
hps.data.hop_length,
|
271 |
-
hps.data.win_length,
|
272 |
-
hps.data.mel_fmin,
|
273 |
-
hps.data.mel_fmax
|
274 |
-
)
|
275 |
-
image_dict = {
|
276 |
-
"gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy())
|
277 |
-
}
|
278 |
-
audio_dict = {
|
279 |
-
"gen/audio": y_hat[0,:,:y_hat_lengths[0]]
|
280 |
-
}
|
281 |
-
if global_step == 0:
|
282 |
-
image_dict.update({"gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())})
|
283 |
-
audio_dict.update({"gt/audio": y[0,:,:y_lengths[0]]})
|
284 |
-
|
285 |
-
utils.summarize(
|
286 |
-
writer=writer_eval,
|
287 |
-
global_step=global_step,
|
288 |
-
images=image_dict,
|
289 |
-
audios=audio_dict,
|
290 |
-
audio_sampling_rate=hps.data.sampling_rate
|
291 |
-
)
|
292 |
-
generator.train()
|
293 |
-
|
294 |
-
|
295 |
-
if __name__ == "__main__":
|
296 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Armandoliv/cars-parts-segmentation-resnet18/app.py
DELETED
@@ -1,184 +0,0 @@
|
|
1 |
-
import segmentation_models as sm
|
2 |
-
import numpy as np
|
3 |
-
import os
|
4 |
-
import cv2
|
5 |
-
import keras
|
6 |
-
import matplotlib.colors as colorsHTML
|
7 |
-
from PIL import Image
|
8 |
-
import gradio as gr
|
9 |
-
|
10 |
-
import os
|
11 |
-
os.system('wget https://huggingface.co/Armandoliv/cars-parts-segmentation-unet-resnet18/resolve/main/best_model.h5')
|
12 |
-
os.system('pip -qq install pycocotools @ git+https://github.com/philferriere/cocoapi.git@2929bd2ef6b451054755dfd7ceb09278f935f7ad#subdirectory=PythonAPI')
|
13 |
-
|
14 |
-
c= ['_background_', 'back_bumper', 'back_glass', 'back_left_door','back_left_light',
|
15 |
-
'back_right_door', 'back_right_light', 'front_bumper','front_glass',
|
16 |
-
'front_left_door', 'front_left_light', 'front_right_door', 'front_right_light', 'hood', 'left_mirror',
|
17 |
-
'right_mirror', 'tailgate', 'trunk', 'wheel']
|
18 |
-
|
19 |
-
colors = [ (245,255,250), (75,0,130), (0,255,0), (32,178,170),(0,0,255), (0,255,255), (255,0,255), (128,0,128), (255,140,0),
|
20 |
-
(85,107,47), (102,205,170), (0,191,255), (255,0,0), (255,228,196), (205,133,63),
|
21 |
-
(220,20,60), (255,69,0), (143,188,143), (255,255,0)]
|
22 |
-
|
23 |
-
|
24 |
-
sm.set_framework('tf.keras')
|
25 |
-
|
26 |
-
sm.framework()
|
27 |
-
|
28 |
-
BACKBONE = 'resnet18'
|
29 |
-
n_classes = 19
|
30 |
-
activation = 'softmax'
|
31 |
-
|
32 |
-
#create model
|
33 |
-
model = sm.Unet(BACKBONE, classes=n_classes, activation=activation)
|
34 |
-
|
35 |
-
# load best weights
|
36 |
-
model.load_weights('best_model.h5')
|
37 |
-
|
38 |
-
def get_colored_segmentation_image(seg_arr, n_classes, colors=colors):
|
39 |
-
output_height = seg_arr.shape[0]
|
40 |
-
output_width = seg_arr.shape[1]
|
41 |
-
|
42 |
-
seg_img = np.zeros((output_height, output_width, 3))
|
43 |
-
|
44 |
-
for c in range(n_classes):
|
45 |
-
seg_arr_c = seg_arr[:, :] == c
|
46 |
-
# print(sum(sum(seg_arr_c)), colors[c] )
|
47 |
-
seg_img[:, :, 0] += ((seg_arr_c)*(colors[c][0])).astype('uint8')
|
48 |
-
seg_img[:, :, 1] += ((seg_arr_c)*(colors[c][1])).astype('uint8')
|
49 |
-
seg_img[:, :, 2] += ((seg_arr_c)*(colors[c][2])).astype('uint8')
|
50 |
-
|
51 |
-
return seg_img/255
|
52 |
-
|
53 |
-
def get_legends(class_names, colors, tags):
|
54 |
-
|
55 |
-
n_classes = len(class_names)
|
56 |
-
legend = np.zeros(((len(class_names) * 25) + 25, 125, 3),
|
57 |
-
dtype="uint8") + 255
|
58 |
-
|
59 |
-
class_names_colors = enumerate(zip(class_names[:n_classes],
|
60 |
-
colors[:n_classes]))
|
61 |
-
j = 0
|
62 |
-
for (i, (class_name, color)) in class_names_colors:
|
63 |
-
if i in tags:
|
64 |
-
color = [int(c) for c in color]
|
65 |
-
cv2.putText(legend, class_name, (5, (j * 25) + 17),
|
66 |
-
cv2.FONT_HERSHEY_COMPLEX, 0.35, (0, 0, 0), 1)
|
67 |
-
cv2.rectangle(legend, (100, (j* 25)), (125, (j * 25) + 25),
|
68 |
-
tuple(color), -1)
|
69 |
-
j +=1
|
70 |
-
return legend
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
def preprocess_image(path_img):
|
75 |
-
img = Image.open(path_img)
|
76 |
-
ww = 512
|
77 |
-
hh = 512
|
78 |
-
img.thumbnail((hh, ww))
|
79 |
-
i = np.array(img)
|
80 |
-
ht, wd, cc= i.shape
|
81 |
-
|
82 |
-
# create new image of desired size and color (blue) for padding
|
83 |
-
color = (0,0,0)
|
84 |
-
result = np.full((hh,ww,cc), color, dtype=np.uint8)
|
85 |
-
|
86 |
-
# copy img image into center of result image
|
87 |
-
result[:ht, :wd] = img
|
88 |
-
return result, ht, wd
|
89 |
-
|
90 |
-
def concat_lengends(seg_img, legend_img):
|
91 |
-
|
92 |
-
new_h = np.maximum(seg_img.shape[0], legend_img.shape[0])
|
93 |
-
new_w = seg_img.shape[1] + legend_img.shape[1]
|
94 |
-
|
95 |
-
out_img = np.zeros((new_h, new_w, 3)).astype('uint8') + legend_img[0, 0, 0]
|
96 |
-
|
97 |
-
out_img[:legend_img.shape[0], : legend_img.shape[1]] = np.copy(legend_img)
|
98 |
-
out_img[:seg_img.shape[0], legend_img.shape[1]:] = np.copy(seg_img)
|
99 |
-
|
100 |
-
return out_img
|
101 |
-
|
102 |
-
def main_convert(filename):
|
103 |
-
|
104 |
-
print(filename)
|
105 |
-
#load the image
|
106 |
-
img_path = filename
|
107 |
-
img = Image.open(img_path).convert("RGB")
|
108 |
-
tags = []
|
109 |
-
|
110 |
-
#preprocess the image
|
111 |
-
img_scaled_arr = preprocess_image(img_path)
|
112 |
-
image = np.expand_dims(img_scaled_arr[0], axis=0)
|
113 |
-
|
114 |
-
#make the predictions
|
115 |
-
pr_mask = model.predict(image).squeeze()
|
116 |
-
pr_mask_int = np.zeros((pr_mask.shape[0],pr_mask.shape[1]))
|
117 |
-
|
118 |
-
#filter the smallest noisy segments
|
119 |
-
kernel = np.ones((5, 5), 'uint8')
|
120 |
-
|
121 |
-
for i in range(1,19):
|
122 |
-
array_one = np.round(pr_mask[:,:,i])
|
123 |
-
op = cv2.morphologyEx(array_one, cv2.MORPH_OPEN, kernel)
|
124 |
-
if sum(sum(op ==1)) > 100:
|
125 |
-
tags.append(i)
|
126 |
-
pr_mask_int[op ==1] = i
|
127 |
-
|
128 |
-
img_segmented = np.array(Image.fromarray(pr_mask_int[:img_scaled_arr[1], :img_scaled_arr[2]]).resize(img.size))
|
129 |
-
|
130 |
-
seg = get_colored_segmentation_image(img_segmented,19, colors=colors)
|
131 |
-
|
132 |
-
fused_img = ((np.array(img)/255)/2 + seg/2).astype('float32')
|
133 |
-
|
134 |
-
seg = Image.fromarray((seg*255).astype(np.uint8))
|
135 |
-
fused_img = Image.fromarray((fused_img *255).astype(np.uint8))
|
136 |
-
|
137 |
-
#get the legends
|
138 |
-
legend_predicted = get_legends(c, colors, tags)
|
139 |
-
|
140 |
-
final_img = concat_lengends(np.array(fused_img), np.array(legend_predicted))
|
141 |
-
|
142 |
-
return final_img, seg
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
inputs = [gr.Image(type="filepath", label="Car Image")]
|
147 |
-
outputs = [gr.Image(type="PIL.Image", label="Detected Segments Image"),gr.Image(type="PIL.Image", label="Segment Image")]
|
148 |
-
|
149 |
-
|
150 |
-
title = "Car Parts Segmentation APP"
|
151 |
-
description = """This demo uses AI Models to detect 18 parts of cars: \n
|
152 |
-
1: background,
|
153 |
-
2: back bumper,
|
154 |
-
3: back glass,
|
155 |
-
4: back left door,
|
156 |
-
5: back left light,
|
157 |
-
6: back right door,
|
158 |
-
7: back right light,
|
159 |
-
8: front bumper,
|
160 |
-
9: front glass,
|
161 |
-
10: front left door,
|
162 |
-
11: front left light,
|
163 |
-
12: front right door,
|
164 |
-
13: front right light,
|
165 |
-
14: hood,
|
166 |
-
15: left mirror,
|
167 |
-
16: right mirror,
|
168 |
-
17: tailgate,
|
169 |
-
18: trunk,
|
170 |
-
19: wheel"""
|
171 |
-
|
172 |
-
examples = [['test_image.jpeg']]
|
173 |
-
io = gr.Interface(fn=main_convert, inputs=inputs, outputs=outputs, title=title, description=description, examples=examples,
|
174 |
-
css= """.gr-button-primary { background: -webkit-linear-gradient(
|
175 |
-
90deg, #355764 0%, #55a8a1 100% ) !important; background: #355764;
|
176 |
-
background: linear-gradient(
|
177 |
-
90deg, #355764 0%, #55a8a1 100% ) !important;
|
178 |
-
background: -moz-linear-gradient( 90deg, #355764 0%, #55a8a1 100% ) !important;
|
179 |
-
background: -webkit-linear-gradient(
|
180 |
-
90deg, #355764 0%, #55a8a1 100% ) !important;
|
181 |
-
color:white !important}"""
|
182 |
-
)
|
183 |
-
|
184 |
-
io.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/_windows.py
DELETED
@@ -1,72 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
from dataclasses import dataclass
|
3 |
-
|
4 |
-
|
5 |
-
@dataclass
|
6 |
-
class WindowsConsoleFeatures:
|
7 |
-
"""Windows features available."""
|
8 |
-
|
9 |
-
vt: bool = False
|
10 |
-
"""The console supports VT codes."""
|
11 |
-
truecolor: bool = False
|
12 |
-
"""The console supports truecolor."""
|
13 |
-
|
14 |
-
|
15 |
-
try:
|
16 |
-
import ctypes
|
17 |
-
from ctypes import LibraryLoader
|
18 |
-
|
19 |
-
if sys.platform == "win32":
|
20 |
-
windll = LibraryLoader(ctypes.WinDLL)
|
21 |
-
else:
|
22 |
-
windll = None
|
23 |
-
raise ImportError("Not windows")
|
24 |
-
|
25 |
-
from pip._vendor.rich._win32_console import (
|
26 |
-
ENABLE_VIRTUAL_TERMINAL_PROCESSING,
|
27 |
-
GetConsoleMode,
|
28 |
-
GetStdHandle,
|
29 |
-
LegacyWindowsError,
|
30 |
-
)
|
31 |
-
|
32 |
-
except (AttributeError, ImportError, ValueError):
|
33 |
-
|
34 |
-
# Fallback if we can't load the Windows DLL
|
35 |
-
def get_windows_console_features() -> WindowsConsoleFeatures:
|
36 |
-
features = WindowsConsoleFeatures()
|
37 |
-
return features
|
38 |
-
|
39 |
-
else:
|
40 |
-
|
41 |
-
def get_windows_console_features() -> WindowsConsoleFeatures:
|
42 |
-
"""Get windows console features.
|
43 |
-
|
44 |
-
Returns:
|
45 |
-
WindowsConsoleFeatures: An instance of WindowsConsoleFeatures.
|
46 |
-
"""
|
47 |
-
handle = GetStdHandle()
|
48 |
-
try:
|
49 |
-
console_mode = GetConsoleMode(handle)
|
50 |
-
success = True
|
51 |
-
except LegacyWindowsError:
|
52 |
-
console_mode = 0
|
53 |
-
success = False
|
54 |
-
vt = bool(success and console_mode & ENABLE_VIRTUAL_TERMINAL_PROCESSING)
|
55 |
-
truecolor = False
|
56 |
-
if vt:
|
57 |
-
win_version = sys.getwindowsversion()
|
58 |
-
truecolor = win_version.major > 10 or (
|
59 |
-
win_version.major == 10 and win_version.build >= 15063
|
60 |
-
)
|
61 |
-
features = WindowsConsoleFeatures(vt=vt, truecolor=truecolor)
|
62 |
-
return features
|
63 |
-
|
64 |
-
|
65 |
-
if __name__ == "__main__":
|
66 |
-
import platform
|
67 |
-
|
68 |
-
features = get_windows_console_features()
|
69 |
-
from pip._vendor.rich import print
|
70 |
-
|
71 |
-
print(f'platform="{platform.system()}"')
|
72 |
-
print(repr(features))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/__init__.py
DELETED
@@ -1,247 +0,0 @@
|
|
1 |
-
"""Extensions to the 'distutils' for large or complex distributions"""
|
2 |
-
|
3 |
-
import functools
|
4 |
-
import os
|
5 |
-
import re
|
6 |
-
import warnings
|
7 |
-
|
8 |
-
import _distutils_hack.override # noqa: F401
|
9 |
-
|
10 |
-
import distutils.core
|
11 |
-
from distutils.errors import DistutilsOptionError
|
12 |
-
from distutils.util import convert_path as _convert_path
|
13 |
-
|
14 |
-
from ._deprecation_warning import SetuptoolsDeprecationWarning
|
15 |
-
|
16 |
-
import setuptools.version
|
17 |
-
from setuptools.extension import Extension
|
18 |
-
from setuptools.dist import Distribution
|
19 |
-
from setuptools.depends import Require
|
20 |
-
from setuptools.discovery import PackageFinder, PEP420PackageFinder
|
21 |
-
from . import monkey
|
22 |
-
from . import logging
|
23 |
-
|
24 |
-
|
25 |
-
__all__ = [
|
26 |
-
'setup',
|
27 |
-
'Distribution',
|
28 |
-
'Command',
|
29 |
-
'Extension',
|
30 |
-
'Require',
|
31 |
-
'SetuptoolsDeprecationWarning',
|
32 |
-
'find_packages',
|
33 |
-
'find_namespace_packages',
|
34 |
-
]
|
35 |
-
|
36 |
-
__version__ = setuptools.version.__version__
|
37 |
-
|
38 |
-
bootstrap_install_from = None
|
39 |
-
|
40 |
-
|
41 |
-
find_packages = PackageFinder.find
|
42 |
-
find_namespace_packages = PEP420PackageFinder.find
|
43 |
-
|
44 |
-
|
45 |
-
def _install_setup_requires(attrs):
|
46 |
-
# Note: do not use `setuptools.Distribution` directly, as
|
47 |
-
# our PEP 517 backend patch `distutils.core.Distribution`.
|
48 |
-
class MinimalDistribution(distutils.core.Distribution):
|
49 |
-
"""
|
50 |
-
A minimal version of a distribution for supporting the
|
51 |
-
fetch_build_eggs interface.
|
52 |
-
"""
|
53 |
-
|
54 |
-
def __init__(self, attrs):
|
55 |
-
_incl = 'dependency_links', 'setup_requires'
|
56 |
-
filtered = {k: attrs[k] for k in set(_incl) & set(attrs)}
|
57 |
-
super().__init__(filtered)
|
58 |
-
# Prevent accidentally triggering discovery with incomplete set of attrs
|
59 |
-
self.set_defaults._disable()
|
60 |
-
|
61 |
-
def _get_project_config_files(self, filenames=None):
|
62 |
-
"""Ignore ``pyproject.toml``, they are not related to setup_requires"""
|
63 |
-
try:
|
64 |
-
cfg, toml = super()._split_standard_project_metadata(filenames)
|
65 |
-
return cfg, ()
|
66 |
-
except Exception:
|
67 |
-
return filenames, ()
|
68 |
-
|
69 |
-
def finalize_options(self):
|
70 |
-
"""
|
71 |
-
Disable finalize_options to avoid building the working set.
|
72 |
-
Ref #2158.
|
73 |
-
"""
|
74 |
-
|
75 |
-
dist = MinimalDistribution(attrs)
|
76 |
-
|
77 |
-
# Honor setup.cfg's options.
|
78 |
-
dist.parse_config_files(ignore_option_errors=True)
|
79 |
-
if dist.setup_requires:
|
80 |
-
dist.fetch_build_eggs(dist.setup_requires)
|
81 |
-
|
82 |
-
|
83 |
-
def setup(**attrs):
|
84 |
-
# Make sure we have any requirements needed to interpret 'attrs'.
|
85 |
-
logging.configure()
|
86 |
-
_install_setup_requires(attrs)
|
87 |
-
return distutils.core.setup(**attrs)
|
88 |
-
|
89 |
-
|
90 |
-
setup.__doc__ = distutils.core.setup.__doc__
|
91 |
-
|
92 |
-
|
93 |
-
_Command = monkey.get_unpatched(distutils.core.Command)
|
94 |
-
|
95 |
-
|
96 |
-
class Command(_Command):
|
97 |
-
"""
|
98 |
-
Setuptools internal actions are organized using a *command design pattern*.
|
99 |
-
This means that each action (or group of closely related actions) executed during
|
100 |
-
the build should be implemented as a ``Command`` subclass.
|
101 |
-
|
102 |
-
These commands are abstractions and do not necessarily correspond to a command that
|
103 |
-
can (or should) be executed via a terminal, in a CLI fashion (although historically
|
104 |
-
they would).
|
105 |
-
|
106 |
-
When creating a new command from scratch, custom defined classes **SHOULD** inherit
|
107 |
-
from ``setuptools.Command`` and implement a few mandatory methods.
|
108 |
-
Between these mandatory methods, are listed:
|
109 |
-
|
110 |
-
.. method:: initialize_options(self)
|
111 |
-
|
112 |
-
Set or (reset) all options/attributes/caches used by the command
|
113 |
-
to their default values. Note that these values may be overwritten during
|
114 |
-
the build.
|
115 |
-
|
116 |
-
.. method:: finalize_options(self)
|
117 |
-
|
118 |
-
Set final values for all options/attributes used by the command.
|
119 |
-
Most of the time, each option/attribute/cache should only be set if it does not
|
120 |
-
have any value yet (e.g. ``if self.attr is None: self.attr = val``).
|
121 |
-
|
122 |
-
.. method:: run(self)
|
123 |
-
|
124 |
-
Execute the actions intended by the command.
|
125 |
-
(Side effects **SHOULD** only take place when ``run`` is executed,
|
126 |
-
for example, creating new files or writing to the terminal output).
|
127 |
-
|
128 |
-
A useful analogy for command classes is to think of them as subroutines with local
|
129 |
-
variables called "options". The options are "declared" in ``initialize_options()``
|
130 |
-
and "defined" (given their final values, aka "finalized") in ``finalize_options()``,
|
131 |
-
both of which must be defined by every command class. The "body" of the subroutine,
|
132 |
-
(where it does all the work) is the ``run()`` method.
|
133 |
-
Between ``initialize_options()`` and ``finalize_options()``, ``setuptools`` may set
|
134 |
-
the values for options/attributes based on user's input (or circumstance),
|
135 |
-
which means that the implementation should be careful to not overwrite values in
|
136 |
-
``finalize_options`` unless necessary.
|
137 |
-
|
138 |
-
Please note that other commands (or other parts of setuptools) may also overwrite
|
139 |
-
the values of the command's options/attributes multiple times during the build
|
140 |
-
process.
|
141 |
-
Therefore it is important to consistently implement ``initialize_options()`` and
|
142 |
-
``finalize_options()``. For example, all derived attributes (or attributes that
|
143 |
-
depend on the value of other attributes) **SHOULD** be recomputed in
|
144 |
-
``finalize_options``.
|
145 |
-
|
146 |
-
When overwriting existing commands, custom defined classes **MUST** abide by the
|
147 |
-
same APIs implemented by the original class. They also **SHOULD** inherit from the
|
148 |
-
original class.
|
149 |
-
"""
|
150 |
-
|
151 |
-
command_consumes_arguments = False
|
152 |
-
|
153 |
-
def __init__(self, dist, **kw):
|
154 |
-
"""
|
155 |
-
Construct the command for dist, updating
|
156 |
-
vars(self) with any keyword parameters.
|
157 |
-
"""
|
158 |
-
super().__init__(dist)
|
159 |
-
vars(self).update(kw)
|
160 |
-
|
161 |
-
def _ensure_stringlike(self, option, what, default=None):
|
162 |
-
val = getattr(self, option)
|
163 |
-
if val is None:
|
164 |
-
setattr(self, option, default)
|
165 |
-
return default
|
166 |
-
elif not isinstance(val, str):
|
167 |
-
raise DistutilsOptionError(
|
168 |
-
"'%s' must be a %s (got `%s`)" % (option, what, val)
|
169 |
-
)
|
170 |
-
return val
|
171 |
-
|
172 |
-
def ensure_string_list(self, option):
|
173 |
-
r"""Ensure that 'option' is a list of strings. If 'option' is
|
174 |
-
currently a string, we split it either on /,\s*/ or /\s+/, so
|
175 |
-
"foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
|
176 |
-
["foo", "bar", "baz"].
|
177 |
-
|
178 |
-
..
|
179 |
-
TODO: This method seems to be similar to the one in ``distutils.cmd``
|
180 |
-
Probably it is just here for backward compatibility with old Python versions?
|
181 |
-
|
182 |
-
:meta private:
|
183 |
-
"""
|
184 |
-
val = getattr(self, option)
|
185 |
-
if val is None:
|
186 |
-
return
|
187 |
-
elif isinstance(val, str):
|
188 |
-
setattr(self, option, re.split(r',\s*|\s+', val))
|
189 |
-
else:
|
190 |
-
if isinstance(val, list):
|
191 |
-
ok = all(isinstance(v, str) for v in val)
|
192 |
-
else:
|
193 |
-
ok = False
|
194 |
-
if not ok:
|
195 |
-
raise DistutilsOptionError(
|
196 |
-
"'%s' must be a list of strings (got %r)" % (option, val)
|
197 |
-
)
|
198 |
-
|
199 |
-
def reinitialize_command(self, command, reinit_subcommands=0, **kw):
|
200 |
-
cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
|
201 |
-
vars(cmd).update(kw)
|
202 |
-
return cmd
|
203 |
-
|
204 |
-
|
205 |
-
def _find_all_simple(path):
|
206 |
-
"""
|
207 |
-
Find all files under 'path'
|
208 |
-
"""
|
209 |
-
results = (
|
210 |
-
os.path.join(base, file)
|
211 |
-
for base, dirs, files in os.walk(path, followlinks=True)
|
212 |
-
for file in files
|
213 |
-
)
|
214 |
-
return filter(os.path.isfile, results)
|
215 |
-
|
216 |
-
|
217 |
-
def findall(dir=os.curdir):
|
218 |
-
"""
|
219 |
-
Find all files under 'dir' and return the list of full filenames.
|
220 |
-
Unless dir is '.', return full filenames with dir prepended.
|
221 |
-
"""
|
222 |
-
files = _find_all_simple(dir)
|
223 |
-
if dir == os.curdir:
|
224 |
-
make_rel = functools.partial(os.path.relpath, start=dir)
|
225 |
-
files = map(make_rel, files)
|
226 |
-
return list(files)
|
227 |
-
|
228 |
-
|
229 |
-
@functools.wraps(_convert_path)
|
230 |
-
def convert_path(pathname):
|
231 |
-
from inspect import cleandoc
|
232 |
-
|
233 |
-
msg = """
|
234 |
-
The function `convert_path` is considered internal and not part of the public API.
|
235 |
-
Its direct usage by 3rd-party packages is considered deprecated and the function
|
236 |
-
may be removed in the future.
|
237 |
-
"""
|
238 |
-
warnings.warn(cleandoc(msg), SetuptoolsDeprecationWarning)
|
239 |
-
return _convert_path(pathname)
|
240 |
-
|
241 |
-
|
242 |
-
class sic(str):
|
243 |
-
"""Treat this string as-is (https://en.wikipedia.org/wiki/Sic)"""
|
244 |
-
|
245 |
-
|
246 |
-
# Apply monkey patches
|
247 |
-
monkey.patch_all()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BairaS/Tabular_ML/app.py
DELETED
@@ -1,87 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import plotly.figure_factory as ff
|
3 |
-
import matplotlib, matplotlib.pyplot as pl
|
4 |
-
import shap
|
5 |
-
import pandas as pd
|
6 |
-
import os
|
7 |
-
|
8 |
-
#import profiling capability
|
9 |
-
import pandas_profiling
|
10 |
-
from streamlit_pandas_profiling import st_profile_report
|
11 |
-
|
12 |
-
#ML stuff
|
13 |
-
from pycaret.classification import *
|
14 |
-
#from pycaret.regression import setup, compare_models, pull, save_model, load_model
|
15 |
-
|
16 |
-
if os.path.exists("sourcedata.csv"):
|
17 |
-
df = pd.read_csv("sourcedata.csv", index_col=None)
|
18 |
-
|
19 |
-
with st.sidebar:
|
20 |
-
st.image("https://www.onepointltd.com/wp-content/uploads/2020/03/inno2.png")
|
21 |
-
st.title("Tabular_ML")
|
22 |
-
choice = st.radio("Navigation", ["Upload", "Profiling", "Modelling", "Analyse", "Download"])
|
23 |
-
st.info("This application allows you to build an automated ML pipeline using Streamlit, Panadas Profiling and PyCaret.")
|
24 |
-
|
25 |
-
if choice == "Upload":
|
26 |
-
st.title("Upload Your Data for Modelling!")
|
27 |
-
file = st.file_uploader("Upload Your Dataset Here")
|
28 |
-
if file:
|
29 |
-
df = pd.read_csv(file, index_col=None)
|
30 |
-
df.to_csv("sourcedata.csv", index=None)
|
31 |
-
st.dataframe(df)
|
32 |
-
|
33 |
-
if choice == "Profiling":
|
34 |
-
st.title("Automated Exploratory Data Analysis")
|
35 |
-
#profile_report = df.profile_report()
|
36 |
-
#st_profile_report(profile_report)
|
37 |
-
st_profile_report(pandas_profiling.ProfileReport(df))
|
38 |
-
|
39 |
-
if choice == "Modelling":
|
40 |
-
chosen_target = st.selectbox('Choose the Target Column', df.columns)
|
41 |
-
if st.button('Run Modelling'):
|
42 |
-
setup(df, target=chosen_target, silent=True)
|
43 |
-
setup_df = pull()
|
44 |
-
#st.dataframe(setup_df)
|
45 |
-
best_model = compare_models(include = ['rf','et', 'dt', 'lightgbm'])
|
46 |
-
compare_df = pull()
|
47 |
-
st.dataframe(compare_df)
|
48 |
-
save_model(best_model, 'best_model')
|
49 |
-
|
50 |
-
if choice == "Analyse":
|
51 |
-
st.title("Correlation Analysis")
|
52 |
-
best_model = load_model('best_model')
|
53 |
-
train_pipe = best_model[:-1].transform(df)
|
54 |
-
explainer = shap.TreeExplainer(best_model.named_steps["trained_model"])
|
55 |
-
shap_values = explainer.shap_values(train_pipe)
|
56 |
-
pl.title('Assessing feature importance based on Shap values')
|
57 |
-
shap.summary_plot(shap_values,df,plot_type="bar",show=False)
|
58 |
-
st.pyplot(bbox_inches='tight')
|
59 |
-
pl.clf()
|
60 |
-
#st.pyplot(shap.force_plot(explainer.expected_value[0], shap_values[0]))
|
61 |
-
#shap.force_plot(explainer.expected_value, shap_values, train_pipe)
|
62 |
-
#st.write(interpret_model(train_pipe, plot = 'correlation'))
|
63 |
-
|
64 |
-
#chosen_target = st.selectbox('Choose the Target Column', df.columns)
|
65 |
-
#if st.button('Run Modelling'):
|
66 |
-
# setup(df, target=chosen_target, silent=True)
|
67 |
-
# setup_df = pull()
|
68 |
-
|
69 |
-
# creating a model
|
70 |
-
# xgboost = create_model('xgboost')
|
71 |
-
|
72 |
-
# interpret model
|
73 |
-
# st.write(interpret_model(xgboost,, plot = 'correlation'))
|
74 |
-
#st.plotly_chart(interpret_model(xgboost), use_container_width=True)
|
75 |
-
|
76 |
-
#chosen_target = st.selectbox('Choose the Target Column', df.columns)
|
77 |
-
#if st.button('Run xgboost Modelling Analysis'):
|
78 |
-
#chosen_target = st.selectbox('Choose the Target Column', df.columns)
|
79 |
-
# creating a model
|
80 |
-
#setup(df, target=chosen_target, silent=True)
|
81 |
-
#xgboost = create_model('xgboost')
|
82 |
-
#interpret model
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
if choice == "Download":
|
87 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BernardoOlisan/vqganclip/taming-transformers/taming/data/custom.py
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import numpy as np
|
3 |
-
import albumentations
|
4 |
-
from torch.utils.data import Dataset
|
5 |
-
|
6 |
-
from taming.data.base import ImagePaths, NumpyPaths, ConcatDatasetWithIndex
|
7 |
-
|
8 |
-
|
9 |
-
class CustomBase(Dataset):
|
10 |
-
def __init__(self, *args, **kwargs):
|
11 |
-
super().__init__()
|
12 |
-
self.data = None
|
13 |
-
|
14 |
-
def __len__(self):
|
15 |
-
return len(self.data)
|
16 |
-
|
17 |
-
def __getitem__(self, i):
|
18 |
-
example = self.data[i]
|
19 |
-
return example
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
class CustomTrain(CustomBase):
|
24 |
-
def __init__(self, size, training_images_list_file):
|
25 |
-
super().__init__()
|
26 |
-
with open(training_images_list_file, "r") as f:
|
27 |
-
paths = f.read().splitlines()
|
28 |
-
self.data = ImagePaths(paths=paths, size=size, random_crop=False)
|
29 |
-
|
30 |
-
|
31 |
-
class CustomTest(CustomBase):
|
32 |
-
def __init__(self, size, test_images_list_file):
|
33 |
-
super().__init__()
|
34 |
-
with open(test_images_list_file, "r") as f:
|
35 |
-
paths = f.read().splitlines()
|
36 |
-
self.data = ImagePaths(paths=paths, size=size, random_crop=False)
|
37 |
-
|
38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BetterAPI/BetterChat/src/hooks.server.ts
DELETED
@@ -1,37 +0,0 @@
|
|
1 |
-
import { dev } from "$app/environment";
|
2 |
-
import { COOKIE_NAME } from "$env/static/private";
|
3 |
-
import type { Handle } from "@sveltejs/kit";
|
4 |
-
import { PUBLIC_GOOGLE_ANALYTICS_ID } from "$env/static/public";
|
5 |
-
import { addYears } from "date-fns";
|
6 |
-
|
7 |
-
export const handle: Handle = async ({ event, resolve }) => {
|
8 |
-
const token = event.cookies.get(COOKIE_NAME);
|
9 |
-
|
10 |
-
event.locals.sessionId = token || crypto.randomUUID();
|
11 |
-
|
12 |
-
// Refresh cookie expiration date
|
13 |
-
event.cookies.set(COOKIE_NAME, event.locals.sessionId, {
|
14 |
-
path: "/",
|
15 |
-
// So that it works inside the space's iframe
|
16 |
-
sameSite: dev ? "lax" : "none",
|
17 |
-
secure: !dev,
|
18 |
-
httpOnly: true,
|
19 |
-
expires: addYears(new Date(), 1),
|
20 |
-
});
|
21 |
-
|
22 |
-
let replaced = false;
|
23 |
-
|
24 |
-
const response = await resolve(event, {
|
25 |
-
transformPageChunk: (chunk) => {
|
26 |
-
// For some reason, Sveltekit doesn't let us load env variables from .env in the app.html template
|
27 |
-
if (replaced || !chunk.html.includes("%gaId%")) {
|
28 |
-
return chunk.html;
|
29 |
-
}
|
30 |
-
replaced = true;
|
31 |
-
|
32 |
-
return chunk.html.replace("%gaId%", PUBLIC_GOOGLE_ANALYTICS_ID);
|
33 |
-
},
|
34 |
-
});
|
35 |
-
|
36 |
-
return response;
|
37 |
-
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/containers.py
DELETED
@@ -1,167 +0,0 @@
|
|
1 |
-
from itertools import zip_longest
|
2 |
-
from typing import (
|
3 |
-
Iterator,
|
4 |
-
Iterable,
|
5 |
-
List,
|
6 |
-
Optional,
|
7 |
-
Union,
|
8 |
-
overload,
|
9 |
-
TypeVar,
|
10 |
-
TYPE_CHECKING,
|
11 |
-
)
|
12 |
-
|
13 |
-
if TYPE_CHECKING:
|
14 |
-
from .console import (
|
15 |
-
Console,
|
16 |
-
ConsoleOptions,
|
17 |
-
JustifyMethod,
|
18 |
-
OverflowMethod,
|
19 |
-
RenderResult,
|
20 |
-
RenderableType,
|
21 |
-
)
|
22 |
-
from .text import Text
|
23 |
-
|
24 |
-
from .cells import cell_len
|
25 |
-
from .measure import Measurement
|
26 |
-
|
27 |
-
T = TypeVar("T")
|
28 |
-
|
29 |
-
|
30 |
-
class Renderables:
|
31 |
-
"""A list subclass which renders its contents to the console."""
|
32 |
-
|
33 |
-
def __init__(
|
34 |
-
self, renderables: Optional[Iterable["RenderableType"]] = None
|
35 |
-
) -> None:
|
36 |
-
self._renderables: List["RenderableType"] = (
|
37 |
-
list(renderables) if renderables is not None else []
|
38 |
-
)
|
39 |
-
|
40 |
-
def __rich_console__(
|
41 |
-
self, console: "Console", options: "ConsoleOptions"
|
42 |
-
) -> "RenderResult":
|
43 |
-
"""Console render method to insert line-breaks."""
|
44 |
-
yield from self._renderables
|
45 |
-
|
46 |
-
def __rich_measure__(
|
47 |
-
self, console: "Console", options: "ConsoleOptions"
|
48 |
-
) -> "Measurement":
|
49 |
-
dimensions = [
|
50 |
-
Measurement.get(console, options, renderable)
|
51 |
-
for renderable in self._renderables
|
52 |
-
]
|
53 |
-
if not dimensions:
|
54 |
-
return Measurement(1, 1)
|
55 |
-
_min = max(dimension.minimum for dimension in dimensions)
|
56 |
-
_max = max(dimension.maximum for dimension in dimensions)
|
57 |
-
return Measurement(_min, _max)
|
58 |
-
|
59 |
-
def append(self, renderable: "RenderableType") -> None:
|
60 |
-
self._renderables.append(renderable)
|
61 |
-
|
62 |
-
def __iter__(self) -> Iterable["RenderableType"]:
|
63 |
-
return iter(self._renderables)
|
64 |
-
|
65 |
-
|
66 |
-
class Lines:
|
67 |
-
"""A list subclass which can render to the console."""
|
68 |
-
|
69 |
-
def __init__(self, lines: Iterable["Text"] = ()) -> None:
|
70 |
-
self._lines: List["Text"] = list(lines)
|
71 |
-
|
72 |
-
def __repr__(self) -> str:
|
73 |
-
return f"Lines({self._lines!r})"
|
74 |
-
|
75 |
-
def __iter__(self) -> Iterator["Text"]:
|
76 |
-
return iter(self._lines)
|
77 |
-
|
78 |
-
@overload
|
79 |
-
def __getitem__(self, index: int) -> "Text":
|
80 |
-
...
|
81 |
-
|
82 |
-
@overload
|
83 |
-
def __getitem__(self, index: slice) -> List["Text"]:
|
84 |
-
...
|
85 |
-
|
86 |
-
def __getitem__(self, index: Union[slice, int]) -> Union["Text", List["Text"]]:
|
87 |
-
return self._lines[index]
|
88 |
-
|
89 |
-
def __setitem__(self, index: int, value: "Text") -> "Lines":
|
90 |
-
self._lines[index] = value
|
91 |
-
return self
|
92 |
-
|
93 |
-
def __len__(self) -> int:
|
94 |
-
return self._lines.__len__()
|
95 |
-
|
96 |
-
def __rich_console__(
|
97 |
-
self, console: "Console", options: "ConsoleOptions"
|
98 |
-
) -> "RenderResult":
|
99 |
-
"""Console render method to insert line-breaks."""
|
100 |
-
yield from self._lines
|
101 |
-
|
102 |
-
def append(self, line: "Text") -> None:
|
103 |
-
self._lines.append(line)
|
104 |
-
|
105 |
-
def extend(self, lines: Iterable["Text"]) -> None:
|
106 |
-
self._lines.extend(lines)
|
107 |
-
|
108 |
-
def pop(self, index: int = -1) -> "Text":
|
109 |
-
return self._lines.pop(index)
|
110 |
-
|
111 |
-
def justify(
|
112 |
-
self,
|
113 |
-
console: "Console",
|
114 |
-
width: int,
|
115 |
-
justify: "JustifyMethod" = "left",
|
116 |
-
overflow: "OverflowMethod" = "fold",
|
117 |
-
) -> None:
|
118 |
-
"""Justify and overflow text to a given width.
|
119 |
-
|
120 |
-
Args:
|
121 |
-
console (Console): Console instance.
|
122 |
-
width (int): Number of characters per line.
|
123 |
-
justify (str, optional): Default justify method for text: "left", "center", "full" or "right". Defaults to "left".
|
124 |
-
overflow (str, optional): Default overflow for text: "crop", "fold", or "ellipsis". Defaults to "fold".
|
125 |
-
|
126 |
-
"""
|
127 |
-
from .text import Text
|
128 |
-
|
129 |
-
if justify == "left":
|
130 |
-
for line in self._lines:
|
131 |
-
line.truncate(width, overflow=overflow, pad=True)
|
132 |
-
elif justify == "center":
|
133 |
-
for line in self._lines:
|
134 |
-
line.rstrip()
|
135 |
-
line.truncate(width, overflow=overflow)
|
136 |
-
line.pad_left((width - cell_len(line.plain)) // 2)
|
137 |
-
line.pad_right(width - cell_len(line.plain))
|
138 |
-
elif justify == "right":
|
139 |
-
for line in self._lines:
|
140 |
-
line.rstrip()
|
141 |
-
line.truncate(width, overflow=overflow)
|
142 |
-
line.pad_left(width - cell_len(line.plain))
|
143 |
-
elif justify == "full":
|
144 |
-
for line_index, line in enumerate(self._lines):
|
145 |
-
if line_index == len(self._lines) - 1:
|
146 |
-
break
|
147 |
-
words = line.split(" ")
|
148 |
-
words_size = sum(cell_len(word.plain) for word in words)
|
149 |
-
num_spaces = len(words) - 1
|
150 |
-
spaces = [1 for _ in range(num_spaces)]
|
151 |
-
index = 0
|
152 |
-
if spaces:
|
153 |
-
while words_size + num_spaces < width:
|
154 |
-
spaces[len(spaces) - index - 1] += 1
|
155 |
-
num_spaces += 1
|
156 |
-
index = (index + 1) % len(spaces)
|
157 |
-
tokens: List[Text] = []
|
158 |
-
for index, (word, next_word) in enumerate(
|
159 |
-
zip_longest(words, words[1:])
|
160 |
-
):
|
161 |
-
tokens.append(word)
|
162 |
-
if index < len(spaces):
|
163 |
-
style = word.get_style_at_offset(console, -1)
|
164 |
-
next_style = next_word.get_style_at_offset(console, 0)
|
165 |
-
space_style = style if style == next_style else line.style
|
166 |
-
tokens.append(Text(" " * spaces[index], style=space_style))
|
167 |
-
self[line_index] = Text("").join(tokens)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/jaraco/text/__init__.py
DELETED
@@ -1,599 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
import itertools
|
3 |
-
import textwrap
|
4 |
-
import functools
|
5 |
-
|
6 |
-
try:
|
7 |
-
from importlib.resources import files # type: ignore
|
8 |
-
except ImportError: # pragma: nocover
|
9 |
-
from setuptools.extern.importlib_resources import files # type: ignore
|
10 |
-
|
11 |
-
from setuptools.extern.jaraco.functools import compose, method_cache
|
12 |
-
from setuptools.extern.jaraco.context import ExceptionTrap
|
13 |
-
|
14 |
-
|
15 |
-
def substitution(old, new):
|
16 |
-
"""
|
17 |
-
Return a function that will perform a substitution on a string
|
18 |
-
"""
|
19 |
-
return lambda s: s.replace(old, new)
|
20 |
-
|
21 |
-
|
22 |
-
def multi_substitution(*substitutions):
|
23 |
-
"""
|
24 |
-
Take a sequence of pairs specifying substitutions, and create
|
25 |
-
a function that performs those substitutions.
|
26 |
-
|
27 |
-
>>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo')
|
28 |
-
'baz'
|
29 |
-
"""
|
30 |
-
substitutions = itertools.starmap(substitution, substitutions)
|
31 |
-
# compose function applies last function first, so reverse the
|
32 |
-
# substitutions to get the expected order.
|
33 |
-
substitutions = reversed(tuple(substitutions))
|
34 |
-
return compose(*substitutions)
|
35 |
-
|
36 |
-
|
37 |
-
class FoldedCase(str):
|
38 |
-
"""
|
39 |
-
A case insensitive string class; behaves just like str
|
40 |
-
except compares equal when the only variation is case.
|
41 |
-
|
42 |
-
>>> s = FoldedCase('hello world')
|
43 |
-
|
44 |
-
>>> s == 'Hello World'
|
45 |
-
True
|
46 |
-
|
47 |
-
>>> 'Hello World' == s
|
48 |
-
True
|
49 |
-
|
50 |
-
>>> s != 'Hello World'
|
51 |
-
False
|
52 |
-
|
53 |
-
>>> s.index('O')
|
54 |
-
4
|
55 |
-
|
56 |
-
>>> s.split('O')
|
57 |
-
['hell', ' w', 'rld']
|
58 |
-
|
59 |
-
>>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta']))
|
60 |
-
['alpha', 'Beta', 'GAMMA']
|
61 |
-
|
62 |
-
Sequence membership is straightforward.
|
63 |
-
|
64 |
-
>>> "Hello World" in [s]
|
65 |
-
True
|
66 |
-
>>> s in ["Hello World"]
|
67 |
-
True
|
68 |
-
|
69 |
-
You may test for set inclusion, but candidate and elements
|
70 |
-
must both be folded.
|
71 |
-
|
72 |
-
>>> FoldedCase("Hello World") in {s}
|
73 |
-
True
|
74 |
-
>>> s in {FoldedCase("Hello World")}
|
75 |
-
True
|
76 |
-
|
77 |
-
String inclusion works as long as the FoldedCase object
|
78 |
-
is on the right.
|
79 |
-
|
80 |
-
>>> "hello" in FoldedCase("Hello World")
|
81 |
-
True
|
82 |
-
|
83 |
-
But not if the FoldedCase object is on the left:
|
84 |
-
|
85 |
-
>>> FoldedCase('hello') in 'Hello World'
|
86 |
-
False
|
87 |
-
|
88 |
-
In that case, use ``in_``:
|
89 |
-
|
90 |
-
>>> FoldedCase('hello').in_('Hello World')
|
91 |
-
True
|
92 |
-
|
93 |
-
>>> FoldedCase('hello') > FoldedCase('Hello')
|
94 |
-
False
|
95 |
-
"""
|
96 |
-
|
97 |
-
def __lt__(self, other):
|
98 |
-
return self.lower() < other.lower()
|
99 |
-
|
100 |
-
def __gt__(self, other):
|
101 |
-
return self.lower() > other.lower()
|
102 |
-
|
103 |
-
def __eq__(self, other):
|
104 |
-
return self.lower() == other.lower()
|
105 |
-
|
106 |
-
def __ne__(self, other):
|
107 |
-
return self.lower() != other.lower()
|
108 |
-
|
109 |
-
def __hash__(self):
|
110 |
-
return hash(self.lower())
|
111 |
-
|
112 |
-
def __contains__(self, other):
|
113 |
-
return super().lower().__contains__(other.lower())
|
114 |
-
|
115 |
-
def in_(self, other):
|
116 |
-
"Does self appear in other?"
|
117 |
-
return self in FoldedCase(other)
|
118 |
-
|
119 |
-
# cache lower since it's likely to be called frequently.
|
120 |
-
@method_cache
|
121 |
-
def lower(self):
|
122 |
-
return super().lower()
|
123 |
-
|
124 |
-
def index(self, sub):
|
125 |
-
return self.lower().index(sub.lower())
|
126 |
-
|
127 |
-
def split(self, splitter=' ', maxsplit=0):
|
128 |
-
pattern = re.compile(re.escape(splitter), re.I)
|
129 |
-
return pattern.split(self, maxsplit)
|
130 |
-
|
131 |
-
|
132 |
-
# Python 3.8 compatibility
|
133 |
-
_unicode_trap = ExceptionTrap(UnicodeDecodeError)
|
134 |
-
|
135 |
-
|
136 |
-
@_unicode_trap.passes
|
137 |
-
def is_decodable(value):
|
138 |
-
r"""
|
139 |
-
Return True if the supplied value is decodable (using the default
|
140 |
-
encoding).
|
141 |
-
|
142 |
-
>>> is_decodable(b'\xff')
|
143 |
-
False
|
144 |
-
>>> is_decodable(b'\x32')
|
145 |
-
True
|
146 |
-
"""
|
147 |
-
value.decode()
|
148 |
-
|
149 |
-
|
150 |
-
def is_binary(value):
|
151 |
-
r"""
|
152 |
-
Return True if the value appears to be binary (that is, it's a byte
|
153 |
-
string and isn't decodable).
|
154 |
-
|
155 |
-
>>> is_binary(b'\xff')
|
156 |
-
True
|
157 |
-
>>> is_binary('\xff')
|
158 |
-
False
|
159 |
-
"""
|
160 |
-
return isinstance(value, bytes) and not is_decodable(value)
|
161 |
-
|
162 |
-
|
163 |
-
def trim(s):
|
164 |
-
r"""
|
165 |
-
Trim something like a docstring to remove the whitespace that
|
166 |
-
is common due to indentation and formatting.
|
167 |
-
|
168 |
-
>>> trim("\n\tfoo = bar\n\t\tbar = baz\n")
|
169 |
-
'foo = bar\n\tbar = baz'
|
170 |
-
"""
|
171 |
-
return textwrap.dedent(s).strip()
|
172 |
-
|
173 |
-
|
174 |
-
def wrap(s):
|
175 |
-
"""
|
176 |
-
Wrap lines of text, retaining existing newlines as
|
177 |
-
paragraph markers.
|
178 |
-
|
179 |
-
>>> print(wrap(lorem_ipsum))
|
180 |
-
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do
|
181 |
-
eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad
|
182 |
-
minim veniam, quis nostrud exercitation ullamco laboris nisi ut
|
183 |
-
aliquip ex ea commodo consequat. Duis aute irure dolor in
|
184 |
-
reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla
|
185 |
-
pariatur. Excepteur sint occaecat cupidatat non proident, sunt in
|
186 |
-
culpa qui officia deserunt mollit anim id est laborum.
|
187 |
-
<BLANKLINE>
|
188 |
-
Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam
|
189 |
-
varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus
|
190 |
-
magna felis sollicitudin mauris. Integer in mauris eu nibh euismod
|
191 |
-
gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis
|
192 |
-
risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue,
|
193 |
-
eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas
|
194 |
-
fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla
|
195 |
-
a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis,
|
196 |
-
neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing
|
197 |
-
sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque
|
198 |
-
nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus
|
199 |
-
quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis,
|
200 |
-
molestie eu, feugiat in, orci. In hac habitasse platea dictumst.
|
201 |
-
"""
|
202 |
-
paragraphs = s.splitlines()
|
203 |
-
wrapped = ('\n'.join(textwrap.wrap(para)) for para in paragraphs)
|
204 |
-
return '\n\n'.join(wrapped)
|
205 |
-
|
206 |
-
|
207 |
-
def unwrap(s):
|
208 |
-
r"""
|
209 |
-
Given a multi-line string, return an unwrapped version.
|
210 |
-
|
211 |
-
>>> wrapped = wrap(lorem_ipsum)
|
212 |
-
>>> wrapped.count('\n')
|
213 |
-
20
|
214 |
-
>>> unwrapped = unwrap(wrapped)
|
215 |
-
>>> unwrapped.count('\n')
|
216 |
-
1
|
217 |
-
>>> print(unwrapped)
|
218 |
-
Lorem ipsum dolor sit amet, consectetur adipiscing ...
|
219 |
-
Curabitur pretium tincidunt lacus. Nulla gravida orci ...
|
220 |
-
|
221 |
-
"""
|
222 |
-
paragraphs = re.split(r'\n\n+', s)
|
223 |
-
cleaned = (para.replace('\n', ' ') for para in paragraphs)
|
224 |
-
return '\n'.join(cleaned)
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
class Splitter(object):
|
230 |
-
"""object that will split a string with the given arguments for each call
|
231 |
-
|
232 |
-
>>> s = Splitter(',')
|
233 |
-
>>> s('hello, world, this is your, master calling')
|
234 |
-
['hello', ' world', ' this is your', ' master calling']
|
235 |
-
"""
|
236 |
-
|
237 |
-
def __init__(self, *args):
|
238 |
-
self.args = args
|
239 |
-
|
240 |
-
def __call__(self, s):
|
241 |
-
return s.split(*self.args)
|
242 |
-
|
243 |
-
|
244 |
-
def indent(string, prefix=' ' * 4):
|
245 |
-
"""
|
246 |
-
>>> indent('foo')
|
247 |
-
' foo'
|
248 |
-
"""
|
249 |
-
return prefix + string
|
250 |
-
|
251 |
-
|
252 |
-
class WordSet(tuple):
|
253 |
-
"""
|
254 |
-
Given an identifier, return the words that identifier represents,
|
255 |
-
whether in camel case, underscore-separated, etc.
|
256 |
-
|
257 |
-
>>> WordSet.parse("camelCase")
|
258 |
-
('camel', 'Case')
|
259 |
-
|
260 |
-
>>> WordSet.parse("under_sep")
|
261 |
-
('under', 'sep')
|
262 |
-
|
263 |
-
Acronyms should be retained
|
264 |
-
|
265 |
-
>>> WordSet.parse("firstSNL")
|
266 |
-
('first', 'SNL')
|
267 |
-
|
268 |
-
>>> WordSet.parse("you_and_I")
|
269 |
-
('you', 'and', 'I')
|
270 |
-
|
271 |
-
>>> WordSet.parse("A simple test")
|
272 |
-
('A', 'simple', 'test')
|
273 |
-
|
274 |
-
Multiple caps should not interfere with the first cap of another word.
|
275 |
-
|
276 |
-
>>> WordSet.parse("myABCClass")
|
277 |
-
('my', 'ABC', 'Class')
|
278 |
-
|
279 |
-
The result is a WordSet, so you can get the form you need.
|
280 |
-
|
281 |
-
>>> WordSet.parse("myABCClass").underscore_separated()
|
282 |
-
'my_ABC_Class'
|
283 |
-
|
284 |
-
>>> WordSet.parse('a-command').camel_case()
|
285 |
-
'ACommand'
|
286 |
-
|
287 |
-
>>> WordSet.parse('someIdentifier').lowered().space_separated()
|
288 |
-
'some identifier'
|
289 |
-
|
290 |
-
Slices of the result should return another WordSet.
|
291 |
-
|
292 |
-
>>> WordSet.parse('taken-out-of-context')[1:].underscore_separated()
|
293 |
-
'out_of_context'
|
294 |
-
|
295 |
-
>>> WordSet.from_class_name(WordSet()).lowered().space_separated()
|
296 |
-
'word set'
|
297 |
-
|
298 |
-
>>> example = WordSet.parse('figured it out')
|
299 |
-
>>> example.headless_camel_case()
|
300 |
-
'figuredItOut'
|
301 |
-
>>> example.dash_separated()
|
302 |
-
'figured-it-out'
|
303 |
-
|
304 |
-
"""
|
305 |
-
|
306 |
-
_pattern = re.compile('([A-Z]?[a-z]+)|([A-Z]+(?![a-z]))')
|
307 |
-
|
308 |
-
def capitalized(self):
|
309 |
-
return WordSet(word.capitalize() for word in self)
|
310 |
-
|
311 |
-
def lowered(self):
|
312 |
-
return WordSet(word.lower() for word in self)
|
313 |
-
|
314 |
-
def camel_case(self):
|
315 |
-
return ''.join(self.capitalized())
|
316 |
-
|
317 |
-
def headless_camel_case(self):
|
318 |
-
words = iter(self)
|
319 |
-
first = next(words).lower()
|
320 |
-
new_words = itertools.chain((first,), WordSet(words).camel_case())
|
321 |
-
return ''.join(new_words)
|
322 |
-
|
323 |
-
def underscore_separated(self):
|
324 |
-
return '_'.join(self)
|
325 |
-
|
326 |
-
def dash_separated(self):
|
327 |
-
return '-'.join(self)
|
328 |
-
|
329 |
-
def space_separated(self):
|
330 |
-
return ' '.join(self)
|
331 |
-
|
332 |
-
def trim_right(self, item):
|
333 |
-
"""
|
334 |
-
Remove the item from the end of the set.
|
335 |
-
|
336 |
-
>>> WordSet.parse('foo bar').trim_right('foo')
|
337 |
-
('foo', 'bar')
|
338 |
-
>>> WordSet.parse('foo bar').trim_right('bar')
|
339 |
-
('foo',)
|
340 |
-
>>> WordSet.parse('').trim_right('bar')
|
341 |
-
()
|
342 |
-
"""
|
343 |
-
return self[:-1] if self and self[-1] == item else self
|
344 |
-
|
345 |
-
def trim_left(self, item):
|
346 |
-
"""
|
347 |
-
Remove the item from the beginning of the set.
|
348 |
-
|
349 |
-
>>> WordSet.parse('foo bar').trim_left('foo')
|
350 |
-
('bar',)
|
351 |
-
>>> WordSet.parse('foo bar').trim_left('bar')
|
352 |
-
('foo', 'bar')
|
353 |
-
>>> WordSet.parse('').trim_left('bar')
|
354 |
-
()
|
355 |
-
"""
|
356 |
-
return self[1:] if self and self[0] == item else self
|
357 |
-
|
358 |
-
def trim(self, item):
|
359 |
-
"""
|
360 |
-
>>> WordSet.parse('foo bar').trim('foo')
|
361 |
-
('bar',)
|
362 |
-
"""
|
363 |
-
return self.trim_left(item).trim_right(item)
|
364 |
-
|
365 |
-
def __getitem__(self, item):
|
366 |
-
result = super(WordSet, self).__getitem__(item)
|
367 |
-
if isinstance(item, slice):
|
368 |
-
result = WordSet(result)
|
369 |
-
return result
|
370 |
-
|
371 |
-
@classmethod
|
372 |
-
def parse(cls, identifier):
|
373 |
-
matches = cls._pattern.finditer(identifier)
|
374 |
-
return WordSet(match.group(0) for match in matches)
|
375 |
-
|
376 |
-
@classmethod
|
377 |
-
def from_class_name(cls, subject):
|
378 |
-
return cls.parse(subject.__class__.__name__)
|
379 |
-
|
380 |
-
|
381 |
-
# for backward compatibility
|
382 |
-
words = WordSet.parse
|
383 |
-
|
384 |
-
|
385 |
-
def simple_html_strip(s):
|
386 |
-
r"""
|
387 |
-
Remove HTML from the string `s`.
|
388 |
-
|
389 |
-
>>> str(simple_html_strip(''))
|
390 |
-
''
|
391 |
-
|
392 |
-
>>> print(simple_html_strip('A <bold>stormy</bold> day in paradise'))
|
393 |
-
A stormy day in paradise
|
394 |
-
|
395 |
-
>>> print(simple_html_strip('Somebody <!-- do not --> tell the truth.'))
|
396 |
-
Somebody tell the truth.
|
397 |
-
|
398 |
-
>>> print(simple_html_strip('What about<br/>\nmultiple lines?'))
|
399 |
-
What about
|
400 |
-
multiple lines?
|
401 |
-
"""
|
402 |
-
html_stripper = re.compile('(<!--.*?-->)|(<[^>]*>)|([^<]+)', re.DOTALL)
|
403 |
-
texts = (match.group(3) or '' for match in html_stripper.finditer(s))
|
404 |
-
return ''.join(texts)
|
405 |
-
|
406 |
-
|
407 |
-
class SeparatedValues(str):
|
408 |
-
"""
|
409 |
-
A string separated by a separator. Overrides __iter__ for getting
|
410 |
-
the values.
|
411 |
-
|
412 |
-
>>> list(SeparatedValues('a,b,c'))
|
413 |
-
['a', 'b', 'c']
|
414 |
-
|
415 |
-
Whitespace is stripped and empty values are discarded.
|
416 |
-
|
417 |
-
>>> list(SeparatedValues(' a, b , c, '))
|
418 |
-
['a', 'b', 'c']
|
419 |
-
"""
|
420 |
-
|
421 |
-
separator = ','
|
422 |
-
|
423 |
-
def __iter__(self):
|
424 |
-
parts = self.split(self.separator)
|
425 |
-
return filter(None, (part.strip() for part in parts))
|
426 |
-
|
427 |
-
|
428 |
-
class Stripper:
|
429 |
-
r"""
|
430 |
-
Given a series of lines, find the common prefix and strip it from them.
|
431 |
-
|
432 |
-
>>> lines = [
|
433 |
-
... 'abcdefg\n',
|
434 |
-
... 'abc\n',
|
435 |
-
... 'abcde\n',
|
436 |
-
... ]
|
437 |
-
>>> res = Stripper.strip_prefix(lines)
|
438 |
-
>>> res.prefix
|
439 |
-
'abc'
|
440 |
-
>>> list(res.lines)
|
441 |
-
['defg\n', '\n', 'de\n']
|
442 |
-
|
443 |
-
If no prefix is common, nothing should be stripped.
|
444 |
-
|
445 |
-
>>> lines = [
|
446 |
-
... 'abcd\n',
|
447 |
-
... '1234\n',
|
448 |
-
... ]
|
449 |
-
>>> res = Stripper.strip_prefix(lines)
|
450 |
-
>>> res.prefix = ''
|
451 |
-
>>> list(res.lines)
|
452 |
-
['abcd\n', '1234\n']
|
453 |
-
"""
|
454 |
-
|
455 |
-
def __init__(self, prefix, lines):
|
456 |
-
self.prefix = prefix
|
457 |
-
self.lines = map(self, lines)
|
458 |
-
|
459 |
-
@classmethod
|
460 |
-
def strip_prefix(cls, lines):
|
461 |
-
prefix_lines, lines = itertools.tee(lines)
|
462 |
-
prefix = functools.reduce(cls.common_prefix, prefix_lines)
|
463 |
-
return cls(prefix, lines)
|
464 |
-
|
465 |
-
def __call__(self, line):
|
466 |
-
if not self.prefix:
|
467 |
-
return line
|
468 |
-
null, prefix, rest = line.partition(self.prefix)
|
469 |
-
return rest
|
470 |
-
|
471 |
-
@staticmethod
|
472 |
-
def common_prefix(s1, s2):
|
473 |
-
"""
|
474 |
-
Return the common prefix of two lines.
|
475 |
-
"""
|
476 |
-
index = min(len(s1), len(s2))
|
477 |
-
while s1[:index] != s2[:index]:
|
478 |
-
index -= 1
|
479 |
-
return s1[:index]
|
480 |
-
|
481 |
-
|
482 |
-
def remove_prefix(text, prefix):
|
483 |
-
"""
|
484 |
-
Remove the prefix from the text if it exists.
|
485 |
-
|
486 |
-
>>> remove_prefix('underwhelming performance', 'underwhelming ')
|
487 |
-
'performance'
|
488 |
-
|
489 |
-
>>> remove_prefix('something special', 'sample')
|
490 |
-
'something special'
|
491 |
-
"""
|
492 |
-
null, prefix, rest = text.rpartition(prefix)
|
493 |
-
return rest
|
494 |
-
|
495 |
-
|
496 |
-
def remove_suffix(text, suffix):
|
497 |
-
"""
|
498 |
-
Remove the suffix from the text if it exists.
|
499 |
-
|
500 |
-
>>> remove_suffix('name.git', '.git')
|
501 |
-
'name'
|
502 |
-
|
503 |
-
>>> remove_suffix('something special', 'sample')
|
504 |
-
'something special'
|
505 |
-
"""
|
506 |
-
rest, suffix, null = text.partition(suffix)
|
507 |
-
return rest
|
508 |
-
|
509 |
-
|
510 |
-
def normalize_newlines(text):
|
511 |
-
r"""
|
512 |
-
Replace alternate newlines with the canonical newline.
|
513 |
-
|
514 |
-
>>> normalize_newlines('Lorem Ipsum\u2029')
|
515 |
-
'Lorem Ipsum\n'
|
516 |
-
>>> normalize_newlines('Lorem Ipsum\r\n')
|
517 |
-
'Lorem Ipsum\n'
|
518 |
-
>>> normalize_newlines('Lorem Ipsum\x85')
|
519 |
-
'Lorem Ipsum\n'
|
520 |
-
"""
|
521 |
-
newlines = ['\r\n', '\r', '\n', '\u0085', '\u2028', '\u2029']
|
522 |
-
pattern = '|'.join(newlines)
|
523 |
-
return re.sub(pattern, '\n', text)
|
524 |
-
|
525 |
-
|
526 |
-
def _nonblank(str):
|
527 |
-
return str and not str.startswith('#')
|
528 |
-
|
529 |
-
|
530 |
-
@functools.singledispatch
|
531 |
-
def yield_lines(iterable):
|
532 |
-
r"""
|
533 |
-
Yield valid lines of a string or iterable.
|
534 |
-
|
535 |
-
>>> list(yield_lines(''))
|
536 |
-
[]
|
537 |
-
>>> list(yield_lines(['foo', 'bar']))
|
538 |
-
['foo', 'bar']
|
539 |
-
>>> list(yield_lines('foo\nbar'))
|
540 |
-
['foo', 'bar']
|
541 |
-
>>> list(yield_lines('\nfoo\n#bar\nbaz #comment'))
|
542 |
-
['foo', 'baz #comment']
|
543 |
-
>>> list(yield_lines(['foo\nbar', 'baz', 'bing\n\n\n']))
|
544 |
-
['foo', 'bar', 'baz', 'bing']
|
545 |
-
"""
|
546 |
-
return itertools.chain.from_iterable(map(yield_lines, iterable))
|
547 |
-
|
548 |
-
|
549 |
-
@yield_lines.register(str)
|
550 |
-
def _(text):
|
551 |
-
return filter(_nonblank, map(str.strip, text.splitlines()))
|
552 |
-
|
553 |
-
|
554 |
-
def drop_comment(line):
|
555 |
-
"""
|
556 |
-
Drop comments.
|
557 |
-
|
558 |
-
>>> drop_comment('foo # bar')
|
559 |
-
'foo'
|
560 |
-
|
561 |
-
A hash without a space may be in a URL.
|
562 |
-
|
563 |
-
>>> drop_comment('http://example.com/foo#bar')
|
564 |
-
'http://example.com/foo#bar'
|
565 |
-
"""
|
566 |
-
return line.partition(' #')[0]
|
567 |
-
|
568 |
-
|
569 |
-
def join_continuation(lines):
|
570 |
-
r"""
|
571 |
-
Join lines continued by a trailing backslash.
|
572 |
-
|
573 |
-
>>> list(join_continuation(['foo \\', 'bar', 'baz']))
|
574 |
-
['foobar', 'baz']
|
575 |
-
>>> list(join_continuation(['foo \\', 'bar', 'baz']))
|
576 |
-
['foobar', 'baz']
|
577 |
-
>>> list(join_continuation(['foo \\', 'bar \\', 'baz']))
|
578 |
-
['foobarbaz']
|
579 |
-
|
580 |
-
Not sure why, but...
|
581 |
-
The character preceeding the backslash is also elided.
|
582 |
-
|
583 |
-
>>> list(join_continuation(['goo\\', 'dly']))
|
584 |
-
['godly']
|
585 |
-
|
586 |
-
A terrible idea, but...
|
587 |
-
If no line is available to continue, suppress the lines.
|
588 |
-
|
589 |
-
>>> list(join_continuation(['foo', 'bar\\', 'baz\\']))
|
590 |
-
['foo']
|
591 |
-
"""
|
592 |
-
lines = iter(lines)
|
593 |
-
for item in lines:
|
594 |
-
while item.endswith('\\'):
|
595 |
-
try:
|
596 |
-
item = item[:-2].strip() + next(lines)
|
597 |
-
except StopIteration:
|
598 |
-
return
|
599 |
-
yield item
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BwayKC/prompthero-openjourney-v2/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/prompthero/openjourney-v2").launch()
|
|
|
|
|
|
|
|
spaces/CK42/sentiment-model-comparison/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Sentiment Model Comparison
|
3 |
-
emoji: 📚
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: green
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.1.4
|
8 |
-
python_version: 3.9.13
|
9 |
-
app_file: app.py
|
10 |
-
pinned: false
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/DualStyleGAN/dualstylegan.py
DELETED
@@ -1,206 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import argparse
|
4 |
-
import os
|
5 |
-
import pathlib
|
6 |
-
import subprocess
|
7 |
-
import sys
|
8 |
-
from typing import Callable
|
9 |
-
|
10 |
-
import dlib
|
11 |
-
import huggingface_hub
|
12 |
-
import numpy as np
|
13 |
-
import PIL.Image
|
14 |
-
import torch
|
15 |
-
import torch.nn as nn
|
16 |
-
import torchvision.transforms as T
|
17 |
-
|
18 |
-
if os.getenv('SYSTEM') == 'spaces':
|
19 |
-
os.system("sed -i '10,17d' DualStyleGAN/model/stylegan/op/fused_act.py")
|
20 |
-
os.system("sed -i '10,17d' DualStyleGAN/model/stylegan/op/upfirdn2d.py")
|
21 |
-
|
22 |
-
app_dir = pathlib.Path(__file__).parent
|
23 |
-
submodule_dir = app_dir / 'DualStyleGAN'
|
24 |
-
sys.path.insert(0, submodule_dir.as_posix())
|
25 |
-
|
26 |
-
from model.dualstylegan import DualStyleGAN
|
27 |
-
from model.encoder.align_all_parallel import align_face
|
28 |
-
from model.encoder.psp import pSp
|
29 |
-
|
30 |
-
MODEL_REPO = 'CVPR/DualStyleGAN'
|
31 |
-
|
32 |
-
|
33 |
-
class Model:
|
34 |
-
def __init__(self):
|
35 |
-
self.device = torch.device(
|
36 |
-
'cuda:0' if torch.cuda.is_available() else 'cpu')
|
37 |
-
self.landmark_model = self._create_dlib_landmark_model()
|
38 |
-
self.encoder_dict = self._load_encoder()
|
39 |
-
self.transform = self._create_transform()
|
40 |
-
self.encoder_type = 'z+'
|
41 |
-
|
42 |
-
self.style_types = [
|
43 |
-
'cartoon',
|
44 |
-
'caricature',
|
45 |
-
'anime',
|
46 |
-
'arcane',
|
47 |
-
'comic',
|
48 |
-
'pixar',
|
49 |
-
'slamdunk',
|
50 |
-
]
|
51 |
-
self.generator_dict = {
|
52 |
-
style_type: self._load_generator(style_type)
|
53 |
-
for style_type in self.style_types
|
54 |
-
}
|
55 |
-
self.exstyle_dict = {
|
56 |
-
style_type: self._load_exstylecode(style_type)
|
57 |
-
for style_type in self.style_types
|
58 |
-
}
|
59 |
-
|
60 |
-
@staticmethod
|
61 |
-
def _create_dlib_landmark_model():
|
62 |
-
url = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2'
|
63 |
-
path = pathlib.Path('shape_predictor_68_face_landmarks.dat')
|
64 |
-
if not path.exists():
|
65 |
-
bz2_path = 'shape_predictor_68_face_landmarks.dat.bz2'
|
66 |
-
torch.hub.download_url_to_file(url, bz2_path)
|
67 |
-
subprocess.run(f'bunzip2 -d {bz2_path}'.split())
|
68 |
-
return dlib.shape_predictor(path.as_posix())
|
69 |
-
|
70 |
-
def _load_encoder(self) -> nn.Module:
|
71 |
-
ckpt_path = huggingface_hub.hf_hub_download(MODEL_REPO,
|
72 |
-
'models/encoder.pt')
|
73 |
-
ckpt = torch.load(ckpt_path, map_location='cpu')
|
74 |
-
opts = ckpt['opts']
|
75 |
-
opts['device'] = self.device.type
|
76 |
-
opts['checkpoint_path'] = ckpt_path
|
77 |
-
opts = argparse.Namespace(**opts)
|
78 |
-
model = pSp(opts)
|
79 |
-
model.to(self.device)
|
80 |
-
model.eval()
|
81 |
-
|
82 |
-
ckpt_path = huggingface_hub.hf_hub_download(MODEL_REPO,
|
83 |
-
'models/encoder_wplus.pt')
|
84 |
-
ckpt = torch.load(ckpt_path, map_location='cpu')
|
85 |
-
opts = ckpt['opts']
|
86 |
-
opts['device'] = self.device.type
|
87 |
-
opts['checkpoint_path'] = ckpt_path
|
88 |
-
opts['output_size'] = 1024
|
89 |
-
opts = argparse.Namespace(**opts)
|
90 |
-
model2 = pSp(opts)
|
91 |
-
model2.to(self.device)
|
92 |
-
model2.eval()
|
93 |
-
|
94 |
-
return {'z+': model, 'w+': model2}
|
95 |
-
|
96 |
-
@staticmethod
|
97 |
-
def _create_transform() -> Callable:
|
98 |
-
transform = T.Compose([
|
99 |
-
T.Resize(256),
|
100 |
-
T.CenterCrop(256),
|
101 |
-
T.ToTensor(),
|
102 |
-
T.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
|
103 |
-
])
|
104 |
-
return transform
|
105 |
-
|
106 |
-
def _load_generator(self, style_type: str) -> nn.Module:
|
107 |
-
model = DualStyleGAN(1024, 512, 8, 2, res_index=6)
|
108 |
-
ckpt_path = huggingface_hub.hf_hub_download(
|
109 |
-
MODEL_REPO, f'models/{style_type}/generator.pt')
|
110 |
-
ckpt = torch.load(ckpt_path, map_location='cpu')
|
111 |
-
model.load_state_dict(ckpt['g_ema'])
|
112 |
-
model.to(self.device)
|
113 |
-
model.eval()
|
114 |
-
return model
|
115 |
-
|
116 |
-
@staticmethod
|
117 |
-
def _load_exstylecode(style_type: str) -> dict[str, np.ndarray]:
|
118 |
-
if style_type in ['cartoon', 'caricature', 'anime']:
|
119 |
-
filename = 'refined_exstyle_code.npy'
|
120 |
-
else:
|
121 |
-
filename = 'exstyle_code.npy'
|
122 |
-
path = huggingface_hub.hf_hub_download(
|
123 |
-
MODEL_REPO, f'models/{style_type}/{filename}')
|
124 |
-
exstyles = np.load(path, allow_pickle=True).item()
|
125 |
-
return exstyles
|
126 |
-
|
127 |
-
def detect_and_align_face(self, image_path) -> np.ndarray:
|
128 |
-
image = align_face(filepath=image_path, predictor=self.landmark_model)
|
129 |
-
x, y = np.random.randint(255), np.random.randint(255)
|
130 |
-
r, g, b = image.getpixel((x, y))
|
131 |
-
image.putpixel(
|
132 |
-
(x, y), (r, g + 1, b)
|
133 |
-
) # trick to make sure run reconstruct_face() once any input setting changes
|
134 |
-
return image
|
135 |
-
|
136 |
-
@staticmethod
|
137 |
-
def denormalize(tensor: torch.Tensor) -> torch.Tensor:
|
138 |
-
return torch.clamp((tensor + 1) / 2 * 255, 0, 255).to(torch.uint8)
|
139 |
-
|
140 |
-
def postprocess(self, tensor: torch.Tensor) -> np.ndarray:
|
141 |
-
tensor = self.denormalize(tensor)
|
142 |
-
return tensor.cpu().numpy().transpose(1, 2, 0)
|
143 |
-
|
144 |
-
@torch.inference_mode()
|
145 |
-
def reconstruct_face(self, image: np.ndarray,
|
146 |
-
encoder_type: str) -> tuple[np.ndarray, torch.Tensor]:
|
147 |
-
if encoder_type == 'Z+ encoder (better stylization)':
|
148 |
-
self.encoder_type = 'z+'
|
149 |
-
z_plus_latent = True
|
150 |
-
return_z_plus_latent = True
|
151 |
-
else:
|
152 |
-
self.encoder_type = 'w+'
|
153 |
-
z_plus_latent = False
|
154 |
-
return_z_plus_latent = False
|
155 |
-
image = PIL.Image.fromarray(image)
|
156 |
-
input_data = self.transform(image).unsqueeze(0).to(self.device)
|
157 |
-
img_rec, instyle = self.encoder_dict[self.encoder_type](
|
158 |
-
input_data,
|
159 |
-
randomize_noise=False,
|
160 |
-
return_latents=True,
|
161 |
-
z_plus_latent=z_plus_latent,
|
162 |
-
return_z_plus_latent=return_z_plus_latent,
|
163 |
-
resize=False)
|
164 |
-
img_rec = torch.clamp(img_rec.detach(), -1, 1)
|
165 |
-
img_rec = self.postprocess(img_rec[0])
|
166 |
-
return img_rec, instyle
|
167 |
-
|
168 |
-
@torch.inference_mode()
|
169 |
-
def generate(self, style_type: str, style_id: int, structure_weight: float,
|
170 |
-
color_weight: float, structure_only: bool,
|
171 |
-
instyle: torch.Tensor) -> np.ndarray:
|
172 |
-
|
173 |
-
if self.encoder_type == 'z+':
|
174 |
-
z_plus_latent = True
|
175 |
-
input_is_latent = False
|
176 |
-
else:
|
177 |
-
z_plus_latent = False
|
178 |
-
input_is_latent = True
|
179 |
-
|
180 |
-
generator = self.generator_dict[style_type]
|
181 |
-
exstyles = self.exstyle_dict[style_type]
|
182 |
-
|
183 |
-
style_id = int(style_id)
|
184 |
-
stylename = list(exstyles.keys())[style_id]
|
185 |
-
|
186 |
-
latent = torch.tensor(exstyles[stylename]).to(self.device)
|
187 |
-
if structure_only and self.encoder_type == 'z+':
|
188 |
-
latent[0, 7:18] = instyle[0, 7:18]
|
189 |
-
exstyle = generator.generator.style(
|
190 |
-
latent.reshape(latent.shape[0] * latent.shape[1],
|
191 |
-
latent.shape[2])).reshape(latent.shape)
|
192 |
-
if structure_only and self.encoder_type == 'w+':
|
193 |
-
exstyle[:, 7:18] = instyle[:, 7:18]
|
194 |
-
|
195 |
-
img_gen, _ = generator([instyle],
|
196 |
-
exstyle,
|
197 |
-
input_is_latent=input_is_latent,
|
198 |
-
z_plus_latent=z_plus_latent,
|
199 |
-
truncation=0.7,
|
200 |
-
truncation_latent=0,
|
201 |
-
use_res=True,
|
202 |
-
interp_weights=[structure_weight] * 7 +
|
203 |
-
[color_weight] * 11)
|
204 |
-
img_gen = torch.clamp(img_gen.detach(), -1, 1)
|
205 |
-
img_gen = self.postprocess(img_gen[0])
|
206 |
-
return img_gen
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/cmake/FindThrust.cmake
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
##=============================================================================
|
2 |
-
##
|
3 |
-
## Copyright (c) Kitware, Inc.
|
4 |
-
## All rights reserved.
|
5 |
-
## See LICENSE.txt for details.
|
6 |
-
##
|
7 |
-
## This software is distributed WITHOUT ANY WARRANTY; without even
|
8 |
-
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
|
9 |
-
## PURPOSE. See the above copyright notice for more information.
|
10 |
-
##
|
11 |
-
## Copyright 2012 Sandia Corporation.
|
12 |
-
## Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
|
13 |
-
## the U.S. Government retains certain rights in this software.
|
14 |
-
##
|
15 |
-
##=============================================================================
|
16 |
-
|
17 |
-
#
|
18 |
-
# FindThrust
|
19 |
-
#
|
20 |
-
# This module finds the Thrust header files and extrats their version. It
|
21 |
-
# sets the following variables.
|
22 |
-
#
|
23 |
-
# THRUST_INCLUDE_DIR - Include directory for thrust header files. (All header
|
24 |
-
# files will actually be in the thrust subdirectory.)
|
25 |
-
# THRUST_VERSION - Version of thrust in the form "major.minor.patch".
|
26 |
-
#
|
27 |
-
|
28 |
-
find_path(THRUST_INCLUDE_DIR
|
29 |
-
HINTS /usr/include/cuda
|
30 |
-
/usr/local/include
|
31 |
-
/usr/local/cuda/include
|
32 |
-
${CUDA_INCLUDE_DIRS}
|
33 |
-
./thrust
|
34 |
-
../thrust
|
35 |
-
NAMES thrust/version.h
|
36 |
-
)
|
37 |
-
|
38 |
-
if (THRUST_INCLUDE_DIR)
|
39 |
-
set(THRUST_FOUND TRUE)
|
40 |
-
endif ()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/async/reduce.h
DELETED
@@ -1,441 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2018 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
/*! \file async/reduce.h
|
18 |
-
* \brief Functions for asynchronously reducing a range to a single value.
|
19 |
-
*/
|
20 |
-
|
21 |
-
#pragma once
|
22 |
-
|
23 |
-
#include <thrust/detail/config.h>
|
24 |
-
#include <thrust/detail/cpp14_required.h>
|
25 |
-
|
26 |
-
#if THRUST_CPP_DIALECT >= 2014
|
27 |
-
|
28 |
-
#include <thrust/detail/static_assert.h>
|
29 |
-
#include <thrust/detail/select_system.h>
|
30 |
-
#include <thrust/type_traits/logical_metafunctions.h>
|
31 |
-
#include <thrust/type_traits/remove_cvref.h>
|
32 |
-
#include <thrust/type_traits/is_execution_policy.h>
|
33 |
-
#include <thrust/system/detail/adl/async/reduce.h>
|
34 |
-
|
35 |
-
#include <thrust/future.h>
|
36 |
-
|
37 |
-
namespace thrust
|
38 |
-
{
|
39 |
-
|
40 |
-
namespace async
|
41 |
-
{
|
42 |
-
|
43 |
-
namespace unimplemented
|
44 |
-
{
|
45 |
-
|
46 |
-
template <
|
47 |
-
typename DerivedPolicy
|
48 |
-
, typename ForwardIt, typename Sentinel, typename T, typename BinaryOp
|
49 |
-
>
|
50 |
-
__host__
|
51 |
-
future<DerivedPolicy, T>
|
52 |
-
async_reduce(
|
53 |
-
thrust::execution_policy<DerivedPolicy>&, ForwardIt, Sentinel, T, BinaryOp
|
54 |
-
)
|
55 |
-
{
|
56 |
-
THRUST_STATIC_ASSERT_MSG(
|
57 |
-
(thrust::detail::depend_on_instantiation<ForwardIt, false>::value)
|
58 |
-
, "this algorithm is not implemented for the specified system"
|
59 |
-
);
|
60 |
-
return {};
|
61 |
-
}
|
62 |
-
|
63 |
-
} // namespace unimplemented
|
64 |
-
|
65 |
-
namespace reduce_detail
|
66 |
-
{
|
67 |
-
|
68 |
-
using thrust::async::unimplemented::async_reduce;
|
69 |
-
|
70 |
-
struct reduce_fn final
|
71 |
-
{
|
72 |
-
template <
|
73 |
-
typename DerivedPolicy
|
74 |
-
, typename ForwardIt, typename Sentinel, typename T, typename BinaryOp
|
75 |
-
>
|
76 |
-
__host__
|
77 |
-
static auto call(
|
78 |
-
thrust::detail::execution_policy_base<DerivedPolicy> const& exec
|
79 |
-
, ForwardIt&& first, Sentinel&& last
|
80 |
-
, T&& init
|
81 |
-
, BinaryOp&& op
|
82 |
-
)
|
83 |
-
// ADL dispatch.
|
84 |
-
THRUST_RETURNS(
|
85 |
-
async_reduce(
|
86 |
-
thrust::detail::derived_cast(thrust::detail::strip_const(exec))
|
87 |
-
, THRUST_FWD(first), THRUST_FWD(last)
|
88 |
-
, THRUST_FWD(init)
|
89 |
-
, THRUST_FWD(op)
|
90 |
-
)
|
91 |
-
)
|
92 |
-
|
93 |
-
template <
|
94 |
-
typename DerivedPolicy
|
95 |
-
, typename ForwardIt, typename Sentinel, typename T
|
96 |
-
>
|
97 |
-
__host__
|
98 |
-
static auto call4(
|
99 |
-
thrust::detail::execution_policy_base<DerivedPolicy> const& exec
|
100 |
-
, ForwardIt&& first, Sentinel&& last
|
101 |
-
, T&& init
|
102 |
-
, thrust::true_type
|
103 |
-
)
|
104 |
-
// ADL dispatch.
|
105 |
-
THRUST_RETURNS(
|
106 |
-
async_reduce(
|
107 |
-
thrust::detail::derived_cast(thrust::detail::strip_const(exec))
|
108 |
-
, THRUST_FWD(first), THRUST_FWD(last)
|
109 |
-
, THRUST_FWD(init)
|
110 |
-
, thrust::plus<remove_cvref_t<T>>{}
|
111 |
-
)
|
112 |
-
)
|
113 |
-
|
114 |
-
template <
|
115 |
-
typename DerivedPolicy
|
116 |
-
, typename ForwardIt, typename Sentinel
|
117 |
-
>
|
118 |
-
__host__
|
119 |
-
static auto
|
120 |
-
call3(
|
121 |
-
thrust::detail::execution_policy_base<DerivedPolicy> const& exec
|
122 |
-
, ForwardIt&& first, Sentinel&& last
|
123 |
-
, thrust::true_type
|
124 |
-
)
|
125 |
-
// ADL dispatch.
|
126 |
-
THRUST_RETURNS(
|
127 |
-
async_reduce(
|
128 |
-
thrust::detail::derived_cast(thrust::detail::strip_const(exec))
|
129 |
-
, THRUST_FWD(first), THRUST_FWD(last)
|
130 |
-
, typename iterator_traits<remove_cvref_t<ForwardIt>>::value_type{}
|
131 |
-
, thrust::plus<
|
132 |
-
remove_cvref_t<
|
133 |
-
typename iterator_traits<remove_cvref_t<ForwardIt>>::value_type
|
134 |
-
>
|
135 |
-
>{}
|
136 |
-
)
|
137 |
-
)
|
138 |
-
|
139 |
-
template <typename ForwardIt, typename Sentinel, typename T, typename BinaryOp>
|
140 |
-
__host__
|
141 |
-
static auto call4(ForwardIt&& first, Sentinel&& last,
|
142 |
-
T&& init,
|
143 |
-
BinaryOp&& op,
|
144 |
-
thrust::false_type)
|
145 |
-
THRUST_RETURNS(
|
146 |
-
reduce_fn::call(
|
147 |
-
thrust::detail::select_system(
|
148 |
-
typename iterator_system<remove_cvref_t<ForwardIt>>::type{}
|
149 |
-
)
|
150 |
-
, THRUST_FWD(first), THRUST_FWD(last)
|
151 |
-
, THRUST_FWD(init)
|
152 |
-
, THRUST_FWD(op)
|
153 |
-
)
|
154 |
-
)
|
155 |
-
|
156 |
-
template <typename ForwardIt, typename Sentinel, typename T>
|
157 |
-
__host__
|
158 |
-
static auto call3(ForwardIt&& first, Sentinel&& last,
|
159 |
-
T&& init,
|
160 |
-
thrust::false_type)
|
161 |
-
THRUST_RETURNS(
|
162 |
-
reduce_fn::call(
|
163 |
-
thrust::detail::select_system(
|
164 |
-
typename iterator_system<remove_cvref_t<ForwardIt>>::type{}
|
165 |
-
)
|
166 |
-
, THRUST_FWD(first), THRUST_FWD(last)
|
167 |
-
, THRUST_FWD(init)
|
168 |
-
, thrust::plus<remove_cvref_t<T>>{}
|
169 |
-
)
|
170 |
-
)
|
171 |
-
|
172 |
-
// MSVC WAR: MSVC gets angsty and eats all available RAM when we try to detect
|
173 |
-
// if T1 is an execution_policy by using SFINAE. Switching to a static
|
174 |
-
// dispatch pattern to prevent this.
|
175 |
-
template <typename T1, typename T2, typename T3>
|
176 |
-
__host__
|
177 |
-
static auto call(T1&& t1, T2&& t2, T3&& t3)
|
178 |
-
THRUST_RETURNS(
|
179 |
-
reduce_fn::call3(THRUST_FWD(t1), THRUST_FWD(t2), THRUST_FWD(t3),
|
180 |
-
thrust::is_execution_policy<thrust::remove_cvref_t<T1>>{})
|
181 |
-
)
|
182 |
-
|
183 |
-
template <typename T1, typename T2, typename T3, typename T4>
|
184 |
-
__host__
|
185 |
-
static auto call(T1&& t1, T2&& t2, T3&& t3, T4&& t4)
|
186 |
-
THRUST_RETURNS(
|
187 |
-
reduce_fn::call4(THRUST_FWD(t1), THRUST_FWD(t2), THRUST_FWD(t3), THRUST_FWD(t4),
|
188 |
-
thrust::is_execution_policy<thrust::remove_cvref_t<T1>>{})
|
189 |
-
)
|
190 |
-
|
191 |
-
template <typename ForwardIt, typename Sentinel>
|
192 |
-
__host__
|
193 |
-
static auto call(ForwardIt&& first, Sentinel&& last)
|
194 |
-
THRUST_RETURNS(
|
195 |
-
reduce_fn::call(
|
196 |
-
thrust::detail::select_system(
|
197 |
-
typename iterator_system<remove_cvref_t<ForwardIt>>::type{}
|
198 |
-
)
|
199 |
-
, THRUST_FWD(first), THRUST_FWD(last)
|
200 |
-
, typename iterator_traits<remove_cvref_t<ForwardIt>>::value_type{}
|
201 |
-
, thrust::plus<
|
202 |
-
remove_cvref_t<
|
203 |
-
typename iterator_traits<remove_cvref_t<ForwardIt>>::value_type
|
204 |
-
>
|
205 |
-
>{}
|
206 |
-
)
|
207 |
-
)
|
208 |
-
|
209 |
-
template <typename... Args>
|
210 |
-
THRUST_NODISCARD __host__
|
211 |
-
auto operator()(Args&&... args) const
|
212 |
-
THRUST_RETURNS(
|
213 |
-
call(THRUST_FWD(args)...)
|
214 |
-
)
|
215 |
-
};
|
216 |
-
|
217 |
-
} // namespace reduce_detail
|
218 |
-
|
219 |
-
THRUST_INLINE_CONSTANT reduce_detail::reduce_fn reduce{};
|
220 |
-
|
221 |
-
///////////////////////////////////////////////////////////////////////////////
|
222 |
-
|
223 |
-
namespace unimplemented
|
224 |
-
{
|
225 |
-
|
226 |
-
template <
|
227 |
-
typename DerivedPolicy
|
228 |
-
, typename ForwardIt, typename Sentinel, typename OutputIt
|
229 |
-
, typename T, typename BinaryOp
|
230 |
-
>
|
231 |
-
__host__
|
232 |
-
event<DerivedPolicy>
|
233 |
-
async_reduce_into(
|
234 |
-
thrust::execution_policy<DerivedPolicy>&
|
235 |
-
, ForwardIt, Sentinel, OutputIt, T, BinaryOp
|
236 |
-
)
|
237 |
-
{
|
238 |
-
THRUST_STATIC_ASSERT_MSG(
|
239 |
-
(thrust::detail::depend_on_instantiation<ForwardIt, false>::value)
|
240 |
-
, "this algorithm is not implemented for the specified system"
|
241 |
-
);
|
242 |
-
return {};
|
243 |
-
}
|
244 |
-
|
245 |
-
} // namespace unimplemented
|
246 |
-
|
247 |
-
namespace reduce_into_detail
|
248 |
-
{
|
249 |
-
|
250 |
-
using thrust::async::unimplemented::async_reduce_into;
|
251 |
-
|
252 |
-
struct reduce_into_fn final
|
253 |
-
{
|
254 |
-
template <
|
255 |
-
typename DerivedPolicy
|
256 |
-
, typename ForwardIt, typename Sentinel, typename OutputIt
|
257 |
-
, typename T, typename BinaryOp
|
258 |
-
>
|
259 |
-
__host__
|
260 |
-
static auto call(
|
261 |
-
thrust::detail::execution_policy_base<DerivedPolicy> const& exec
|
262 |
-
, ForwardIt&& first, Sentinel&& last
|
263 |
-
, OutputIt&& output
|
264 |
-
, T&& init
|
265 |
-
, BinaryOp&& op
|
266 |
-
)
|
267 |
-
// ADL dispatch.
|
268 |
-
THRUST_RETURNS(
|
269 |
-
async_reduce_into(
|
270 |
-
thrust::detail::derived_cast(thrust::detail::strip_const(exec))
|
271 |
-
, THRUST_FWD(first), THRUST_FWD(last)
|
272 |
-
, THRUST_FWD(output)
|
273 |
-
, THRUST_FWD(init)
|
274 |
-
, THRUST_FWD(op)
|
275 |
-
)
|
276 |
-
)
|
277 |
-
|
278 |
-
template <
|
279 |
-
typename DerivedPolicy
|
280 |
-
, typename ForwardIt, typename Sentinel, typename OutputIt
|
281 |
-
, typename T
|
282 |
-
>
|
283 |
-
__host__
|
284 |
-
static auto call5(
|
285 |
-
thrust::detail::execution_policy_base<DerivedPolicy> const& exec
|
286 |
-
, ForwardIt&& first, Sentinel&& last
|
287 |
-
, OutputIt&& output
|
288 |
-
, T&& init
|
289 |
-
, thrust::true_type
|
290 |
-
)
|
291 |
-
// ADL dispatch.
|
292 |
-
THRUST_RETURNS(
|
293 |
-
async_reduce_into(
|
294 |
-
thrust::detail::derived_cast(thrust::detail::strip_const(exec))
|
295 |
-
, THRUST_FWD(first), THRUST_FWD(last)
|
296 |
-
, THRUST_FWD(output)
|
297 |
-
, THRUST_FWD(init)
|
298 |
-
, thrust::plus<remove_cvref_t<T>>{}
|
299 |
-
)
|
300 |
-
)
|
301 |
-
|
302 |
-
template <
|
303 |
-
typename DerivedPolicy
|
304 |
-
, typename ForwardIt, typename Sentinel, typename OutputIt
|
305 |
-
>
|
306 |
-
__host__
|
307 |
-
static auto
|
308 |
-
call4(
|
309 |
-
thrust::detail::execution_policy_base<DerivedPolicy> const& exec
|
310 |
-
, ForwardIt&& first, Sentinel&& last
|
311 |
-
, OutputIt&& output
|
312 |
-
, thrust::true_type
|
313 |
-
)
|
314 |
-
// ADL dispatch.
|
315 |
-
THRUST_RETURNS(
|
316 |
-
async_reduce_into(
|
317 |
-
thrust::detail::derived_cast(thrust::detail::strip_const(exec))
|
318 |
-
, THRUST_FWD(first), THRUST_FWD(last)
|
319 |
-
, THRUST_FWD(output)
|
320 |
-
, typename iterator_traits<remove_cvref_t<ForwardIt>>::value_type{}
|
321 |
-
, thrust::plus<
|
322 |
-
remove_cvref_t<
|
323 |
-
typename iterator_traits<remove_cvref_t<ForwardIt>>::value_type
|
324 |
-
>
|
325 |
-
>{}
|
326 |
-
)
|
327 |
-
)
|
328 |
-
|
329 |
-
template <
|
330 |
-
typename ForwardIt, typename Sentinel, typename OutputIt
|
331 |
-
, typename T, typename BinaryOp
|
332 |
-
>
|
333 |
-
__host__
|
334 |
-
static auto call5(
|
335 |
-
ForwardIt&& first, Sentinel&& last
|
336 |
-
, OutputIt&& output
|
337 |
-
, T&& init
|
338 |
-
, BinaryOp&& op
|
339 |
-
, thrust::false_type
|
340 |
-
)
|
341 |
-
THRUST_RETURNS(
|
342 |
-
reduce_into_fn::call(
|
343 |
-
thrust::detail::select_system(
|
344 |
-
typename iterator_system<remove_cvref_t<ForwardIt>>::type{}
|
345 |
-
, typename iterator_system<remove_cvref_t<OutputIt>>::type{}
|
346 |
-
)
|
347 |
-
, THRUST_FWD(first), THRUST_FWD(last)
|
348 |
-
, THRUST_FWD(output)
|
349 |
-
, THRUST_FWD(init)
|
350 |
-
, THRUST_FWD(op)
|
351 |
-
)
|
352 |
-
)
|
353 |
-
|
354 |
-
template <
|
355 |
-
typename ForwardIt, typename Sentinel, typename OutputIt
|
356 |
-
, typename T
|
357 |
-
>
|
358 |
-
__host__
|
359 |
-
static auto call4(
|
360 |
-
ForwardIt&& first, Sentinel&& last
|
361 |
-
, OutputIt&& output
|
362 |
-
, T&& init
|
363 |
-
, thrust::false_type
|
364 |
-
)
|
365 |
-
THRUST_RETURNS(
|
366 |
-
reduce_into_fn::call(
|
367 |
-
thrust::detail::select_system(
|
368 |
-
typename iterator_system<remove_cvref_t<ForwardIt>>::type{}
|
369 |
-
, typename iterator_system<remove_cvref_t<OutputIt>>::type{}
|
370 |
-
)
|
371 |
-
, THRUST_FWD(first), THRUST_FWD(last)
|
372 |
-
, THRUST_FWD(output)
|
373 |
-
, THRUST_FWD(init)
|
374 |
-
, thrust::plus<remove_cvref_t<T>>{}
|
375 |
-
)
|
376 |
-
)
|
377 |
-
|
378 |
-
template <
|
379 |
-
typename ForwardIt, typename Sentinel, typename OutputIt
|
380 |
-
>
|
381 |
-
__host__
|
382 |
-
static auto call(
|
383 |
-
ForwardIt&& first, Sentinel&& last
|
384 |
-
, OutputIt&& output
|
385 |
-
)
|
386 |
-
THRUST_RETURNS(
|
387 |
-
reduce_into_fn::call(
|
388 |
-
thrust::detail::select_system(
|
389 |
-
typename iterator_system<remove_cvref_t<ForwardIt>>::type{}
|
390 |
-
, typename iterator_system<remove_cvref_t<OutputIt>>::type{}
|
391 |
-
)
|
392 |
-
, THRUST_FWD(first), THRUST_FWD(last)
|
393 |
-
, THRUST_FWD(output)
|
394 |
-
, typename iterator_traits<remove_cvref_t<ForwardIt>>::value_type{}
|
395 |
-
, thrust::plus<
|
396 |
-
remove_cvref_t<
|
397 |
-
typename iterator_traits<remove_cvref_t<ForwardIt>>::value_type
|
398 |
-
>
|
399 |
-
>{}
|
400 |
-
)
|
401 |
-
)
|
402 |
-
|
403 |
-
// MSVC WAR: MSVC gets angsty and eats all available RAM when we try to detect
|
404 |
-
// if T1 is an execution_policy by using SFINAE. Switching to a static
|
405 |
-
// dispatch pattern to prevent this.
|
406 |
-
template <typename T1, typename T2, typename T3, typename T4>
|
407 |
-
__host__
|
408 |
-
static auto call(T1&& t1, T2&& t2, T3&& t3, T4&& t4)
|
409 |
-
THRUST_RETURNS(
|
410 |
-
reduce_into_fn::call4(
|
411 |
-
THRUST_FWD(t1), THRUST_FWD(t2), THRUST_FWD(t3), THRUST_FWD(t4),
|
412 |
-
thrust::is_execution_policy<thrust::remove_cvref_t<T1>>{})
|
413 |
-
)
|
414 |
-
|
415 |
-
template <typename T1, typename T2, typename T3, typename T4, typename T5>
|
416 |
-
__host__
|
417 |
-
static auto call(T1&& t1, T2&& t2, T3&& t3, T4&& t4, T5&& t5)
|
418 |
-
THRUST_RETURNS(
|
419 |
-
reduce_into_fn::call5(
|
420 |
-
THRUST_FWD(t1), THRUST_FWD(t2), THRUST_FWD(t3), THRUST_FWD(t4),
|
421 |
-
THRUST_FWD(t5), thrust::is_execution_policy<thrust::remove_cvref_t<T1>>{})
|
422 |
-
)
|
423 |
-
|
424 |
-
template <typename... Args>
|
425 |
-
THRUST_NODISCARD __host__
|
426 |
-
auto operator()(Args&&... args) const
|
427 |
-
THRUST_RETURNS(
|
428 |
-
call(THRUST_FWD(args)...)
|
429 |
-
)
|
430 |
-
};
|
431 |
-
|
432 |
-
} // namespace reduce_into_detail
|
433 |
-
|
434 |
-
THRUST_INLINE_CONSTANT reduce_into_detail::reduce_into_fn reduce_into{};
|
435 |
-
|
436 |
-
} // namespace async
|
437 |
-
|
438 |
-
} // end namespace thrust
|
439 |
-
|
440 |
-
#endif
|
441 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/iterator/detail/iterator_category_with_system_and_traversal.h
DELETED
@@ -1,57 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
namespace thrust
|
22 |
-
{
|
23 |
-
namespace detail
|
24 |
-
{
|
25 |
-
|
26 |
-
|
27 |
-
template<typename Category, typename System, typename Traversal>
|
28 |
-
struct iterator_category_with_system_and_traversal
|
29 |
-
: Category
|
30 |
-
{
|
31 |
-
}; // end iterator_category_with_system_and_traversal
|
32 |
-
|
33 |
-
|
34 |
-
// specialize iterator_category_to_system for iterator_category_with_system_and_traversal
|
35 |
-
template<typename Category> struct iterator_category_to_system;
|
36 |
-
|
37 |
-
template<typename Category, typename System, typename Traversal>
|
38 |
-
struct iterator_category_to_system<iterator_category_with_system_and_traversal<Category,System,Traversal> >
|
39 |
-
{
|
40 |
-
typedef System type;
|
41 |
-
}; // end iterator_category_to_system
|
42 |
-
|
43 |
-
|
44 |
-
// specialize iterator_category_to_traversal for iterator_category_with_system_and_traversal
|
45 |
-
template<typename Category> struct iterator_category_to_traversal;
|
46 |
-
|
47 |
-
template<typename Category, typename System, typename Traversal>
|
48 |
-
struct iterator_category_to_traversal<iterator_category_with_system_and_traversal<Category,System,Traversal> >
|
49 |
-
{
|
50 |
-
typedef Traversal type;
|
51 |
-
}; // end iterator_category_to_traversal
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
} // end detail
|
56 |
-
} // end thrust
|
57 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/copy_if.h
DELETED
@@ -1,857 +0,0 @@
|
|
1 |
-
/******************************************************************************
|
2 |
-
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
|
3 |
-
*
|
4 |
-
* Redistribution and use in source and binary forms, with or without
|
5 |
-
* modification, are permitted provided that the following conditions are met:
|
6 |
-
* * Redistributions of source code must retain the above copyright
|
7 |
-
* notice, this list of conditions and the following disclaimer.
|
8 |
-
* * Redistributions in binary form must reproduce the above copyright
|
9 |
-
* notice, this list of conditions and the following disclaimer in the
|
10 |
-
* documentation and/or other materials provided with the distribution.
|
11 |
-
* * Neither the name of the NVIDIA CORPORATION nor the
|
12 |
-
* names of its contributors may be used to endorse or promote products
|
13 |
-
* derived from this software without specific prior written permission.
|
14 |
-
*
|
15 |
-
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
16 |
-
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
17 |
-
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
18 |
-
* ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
|
19 |
-
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
20 |
-
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
21 |
-
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
22 |
-
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
23 |
-
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
24 |
-
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
25 |
-
*
|
26 |
-
******************************************************************************/
|
27 |
-
#pragma once
|
28 |
-
|
29 |
-
|
30 |
-
#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
|
31 |
-
#include <thrust/system/cuda/config.h>
|
32 |
-
|
33 |
-
#include <thrust/detail/cstdint.h>
|
34 |
-
#include <thrust/detail/temporary_array.h>
|
35 |
-
#include <thrust/system/cuda/detail/util.h>
|
36 |
-
#include <cub/device/device_select.cuh>
|
37 |
-
#include <thrust/system/cuda/detail/core/agent_launcher.h>
|
38 |
-
#include <thrust/system/cuda/detail/core/util.h>
|
39 |
-
#include <thrust/system/cuda/detail/par_to_seq.h>
|
40 |
-
#include <thrust/detail/function.h>
|
41 |
-
#include <thrust/distance.h>
|
42 |
-
#include <thrust/detail/alignment.h>
|
43 |
-
|
44 |
-
namespace thrust
|
45 |
-
{
|
46 |
-
// XXX declare generic copy_if interface
|
47 |
-
// to avoid circulular dependency from thrust/copy.h
|
48 |
-
template <typename DerivedPolicy, typename InputIterator, typename OutputIterator, typename Predicate>
|
49 |
-
__host__ __device__
|
50 |
-
OutputIterator
|
51 |
-
copy_if(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
52 |
-
InputIterator first,
|
53 |
-
InputIterator last,
|
54 |
-
OutputIterator result,
|
55 |
-
Predicate pred);
|
56 |
-
|
57 |
-
template <typename DerivedPolicy, typename InputIterator1, typename InputIterator2, typename OutputIterator, typename Predicate>
|
58 |
-
__host__ __device__
|
59 |
-
OutputIterator
|
60 |
-
copy_if(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
61 |
-
InputIterator1 first,
|
62 |
-
InputIterator1 last,
|
63 |
-
InputIterator2 stencil,
|
64 |
-
OutputIterator result,
|
65 |
-
Predicate pred);
|
66 |
-
|
67 |
-
namespace cuda_cub {
|
68 |
-
|
69 |
-
namespace __copy_if {
|
70 |
-
|
71 |
-
template <int _BLOCK_THREADS,
|
72 |
-
int _ITEMS_PER_THREAD = 1,
|
73 |
-
cub::BlockLoadAlgorithm _LOAD_ALGORITHM = cub::BLOCK_LOAD_DIRECT,
|
74 |
-
cub::CacheLoadModifier _LOAD_MODIFIER = cub::LOAD_LDG,
|
75 |
-
cub::BlockScanAlgorithm _SCAN_ALGORITHM = cub::BLOCK_SCAN_WARP_SCANS>
|
76 |
-
struct PtxPolicy
|
77 |
-
{
|
78 |
-
enum
|
79 |
-
{
|
80 |
-
BLOCK_THREADS = _BLOCK_THREADS,
|
81 |
-
ITEMS_PER_THREAD = _ITEMS_PER_THREAD,
|
82 |
-
ITEMS_PER_TILE = _BLOCK_THREADS * _ITEMS_PER_THREAD,
|
83 |
-
};
|
84 |
-
static const cub::BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM;
|
85 |
-
static const cub::CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER;
|
86 |
-
static const cub::BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM;
|
87 |
-
}; // struct PtxPolicy
|
88 |
-
|
89 |
-
template<class, class>
|
90 |
-
struct Tuning;
|
91 |
-
|
92 |
-
template<class T>
|
93 |
-
struct Tuning<sm52, T>
|
94 |
-
{
|
95 |
-
const static int INPUT_SIZE = sizeof(T);
|
96 |
-
|
97 |
-
enum
|
98 |
-
{
|
99 |
-
NOMINAL_4B_ITEMS_PER_THREAD = 9,
|
100 |
-
ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(T)))),
|
101 |
-
};
|
102 |
-
|
103 |
-
typedef PtxPolicy<128,
|
104 |
-
ITEMS_PER_THREAD,
|
105 |
-
cub::BLOCK_LOAD_WARP_TRANSPOSE,
|
106 |
-
cub::LOAD_LDG,
|
107 |
-
cub::BLOCK_SCAN_WARP_SCANS>
|
108 |
-
type;
|
109 |
-
}; // Tuning<350>
|
110 |
-
|
111 |
-
|
112 |
-
template<class T>
|
113 |
-
struct Tuning<sm35, T>
|
114 |
-
{
|
115 |
-
const static int INPUT_SIZE = sizeof(T);
|
116 |
-
|
117 |
-
enum
|
118 |
-
{
|
119 |
-
NOMINAL_4B_ITEMS_PER_THREAD = 10,
|
120 |
-
ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(T)))),
|
121 |
-
};
|
122 |
-
|
123 |
-
typedef PtxPolicy<128,
|
124 |
-
ITEMS_PER_THREAD,
|
125 |
-
cub::BLOCK_LOAD_WARP_TRANSPOSE,
|
126 |
-
cub::LOAD_LDG,
|
127 |
-
cub::BLOCK_SCAN_WARP_SCANS>
|
128 |
-
type;
|
129 |
-
}; // Tuning<350>
|
130 |
-
|
131 |
-
template<class T>
|
132 |
-
struct Tuning<sm30, T>
|
133 |
-
{
|
134 |
-
const static int INPUT_SIZE = sizeof(T);
|
135 |
-
|
136 |
-
enum
|
137 |
-
{
|
138 |
-
NOMINAL_4B_ITEMS_PER_THREAD = 7,
|
139 |
-
ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(3, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(T)))),
|
140 |
-
};
|
141 |
-
|
142 |
-
typedef PtxPolicy<128,
|
143 |
-
ITEMS_PER_THREAD,
|
144 |
-
cub::BLOCK_LOAD_WARP_TRANSPOSE,
|
145 |
-
cub::LOAD_DEFAULT,
|
146 |
-
cub::BLOCK_SCAN_WARP_SCANS>
|
147 |
-
type;
|
148 |
-
}; // Tuning<300>
|
149 |
-
|
150 |
-
struct no_stencil_tag_ {};
|
151 |
-
typedef no_stencil_tag_* no_stencil_tag;
|
152 |
-
template <class ItemsIt,
|
153 |
-
class StencilIt,
|
154 |
-
class OutputIt,
|
155 |
-
class Predicate,
|
156 |
-
class Size,
|
157 |
-
class NumSelectedOutputIt>
|
158 |
-
struct CopyIfAgent
|
159 |
-
{
|
160 |
-
typedef typename iterator_traits<ItemsIt>::value_type item_type;
|
161 |
-
typedef typename iterator_traits<StencilIt>::value_type stencil_type;
|
162 |
-
|
163 |
-
typedef cub::ScanTileState<Size> ScanTileState;
|
164 |
-
|
165 |
-
template <class Arch>
|
166 |
-
struct PtxPlan : Tuning<Arch, item_type>::type
|
167 |
-
{
|
168 |
-
typedef Tuning<Arch,item_type> tuning;
|
169 |
-
|
170 |
-
typedef typename core::LoadIterator<PtxPlan, ItemsIt>::type ItemsLoadIt;
|
171 |
-
typedef typename core::LoadIterator<PtxPlan, StencilIt>::type StencilLoadIt;
|
172 |
-
|
173 |
-
typedef typename core::BlockLoad<PtxPlan, ItemsLoadIt>::type BlockLoadItems;
|
174 |
-
typedef typename core::BlockLoad<PtxPlan, StencilLoadIt>::type BlockLoadStencil;
|
175 |
-
|
176 |
-
typedef cub::TilePrefixCallbackOp<Size,
|
177 |
-
cub::Sum,
|
178 |
-
ScanTileState,
|
179 |
-
Arch::ver>
|
180 |
-
TilePrefixCallback;
|
181 |
-
|
182 |
-
typedef cub::BlockScan<Size,
|
183 |
-
PtxPlan::BLOCK_THREADS,
|
184 |
-
PtxPlan::SCAN_ALGORITHM,
|
185 |
-
1,
|
186 |
-
1,
|
187 |
-
Arch::ver>
|
188 |
-
BlockScan;
|
189 |
-
|
190 |
-
|
191 |
-
union TempStorage
|
192 |
-
{
|
193 |
-
struct
|
194 |
-
{
|
195 |
-
typename BlockScan::TempStorage scan;
|
196 |
-
typename TilePrefixCallback::TempStorage prefix;
|
197 |
-
};
|
198 |
-
|
199 |
-
typename BlockLoadItems::TempStorage load_items;
|
200 |
-
typename BlockLoadStencil::TempStorage load_stencil;
|
201 |
-
|
202 |
-
core::uninitialized_array<item_type, PtxPlan::ITEMS_PER_TILE> raw_exchange;
|
203 |
-
}; // union TempStorage
|
204 |
-
}; // struct PtxPlan
|
205 |
-
|
206 |
-
typedef typename core::specialize_plan_msvc10_war<PtxPlan>::type::type ptx_plan;
|
207 |
-
|
208 |
-
typedef typename ptx_plan::ItemsLoadIt ItemsLoadIt;
|
209 |
-
typedef typename ptx_plan::StencilLoadIt StencilLoadIt;
|
210 |
-
typedef typename ptx_plan::BlockLoadItems BlockLoadItems;
|
211 |
-
typedef typename ptx_plan::BlockLoadStencil BlockLoadStencil;
|
212 |
-
typedef typename ptx_plan::TilePrefixCallback TilePrefixCallback;
|
213 |
-
typedef typename ptx_plan::BlockScan BlockScan;
|
214 |
-
typedef typename ptx_plan::TempStorage TempStorage;
|
215 |
-
|
216 |
-
enum
|
217 |
-
{
|
218 |
-
USE_STENCIL = !thrust::detail::is_same<StencilIt, no_stencil_tag>::value,
|
219 |
-
BLOCK_THREADS = ptx_plan::BLOCK_THREADS,
|
220 |
-
ITEMS_PER_THREAD = ptx_plan::ITEMS_PER_THREAD,
|
221 |
-
ITEMS_PER_TILE = ptx_plan::ITEMS_PER_TILE
|
222 |
-
};
|
223 |
-
|
224 |
-
struct impl
|
225 |
-
{
|
226 |
-
//---------------------------------------------------------------------
|
227 |
-
// Per-thread fields
|
228 |
-
//---------------------------------------------------------------------
|
229 |
-
|
230 |
-
TempStorage & storage;
|
231 |
-
ScanTileState &tile_state;
|
232 |
-
ItemsLoadIt items_load_it;
|
233 |
-
StencilLoadIt stencil_load_it;
|
234 |
-
OutputIt output_it;
|
235 |
-
Predicate predicate;
|
236 |
-
Size num_items;
|
237 |
-
|
238 |
-
//------------------------------------------
|
239 |
-
// scatter results to memory
|
240 |
-
//------------------------------------------
|
241 |
-
|
242 |
-
THRUST_DEVICE_FUNCTION void
|
243 |
-
scatter(item_type (&items)[ITEMS_PER_THREAD],
|
244 |
-
Size (&selection_flags)[ITEMS_PER_THREAD],
|
245 |
-
Size (&selection_indices)[ITEMS_PER_THREAD],
|
246 |
-
int num_tile_selections,
|
247 |
-
Size num_selections_prefix)
|
248 |
-
{
|
249 |
-
using core::sync_threadblock;
|
250 |
-
|
251 |
-
#pragma unroll
|
252 |
-
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
|
253 |
-
{
|
254 |
-
int local_scatter_offset = selection_indices[ITEM] -
|
255 |
-
num_selections_prefix;
|
256 |
-
if (selection_flags[ITEM])
|
257 |
-
{
|
258 |
-
new (&storage.raw_exchange[local_scatter_offset]) item_type(items[ITEM]);
|
259 |
-
}
|
260 |
-
}
|
261 |
-
|
262 |
-
sync_threadblock();
|
263 |
-
|
264 |
-
for (int item = threadIdx.x;
|
265 |
-
item < num_tile_selections;
|
266 |
-
item += BLOCK_THREADS)
|
267 |
-
{
|
268 |
-
output_it[num_selections_prefix + item] = storage.raw_exchange[item];
|
269 |
-
}
|
270 |
-
} // func scatter
|
271 |
-
|
272 |
-
//------------------------------------------
|
273 |
-
// specialize predicate on different types
|
274 |
-
//------------------------------------------
|
275 |
-
|
276 |
-
template <int T>
|
277 |
-
struct __tag {};
|
278 |
-
|
279 |
-
enum ItemStencil
|
280 |
-
{
|
281 |
-
ITEM,
|
282 |
-
STENCIL
|
283 |
-
};
|
284 |
-
|
285 |
-
template <bool TAG, class T>
|
286 |
-
struct wrap_value
|
287 |
-
{
|
288 |
-
T const & x;
|
289 |
-
THRUST_DEVICE_FUNCTION wrap_value(T const &x) : x(x) {}
|
290 |
-
|
291 |
-
THRUST_DEVICE_FUNCTION T const &operator()() const { return x; };
|
292 |
-
}; // struct wrap_type
|
293 |
-
|
294 |
-
//------- item
|
295 |
-
|
296 |
-
THRUST_DEVICE_FUNCTION bool
|
297 |
-
predicate_wrapper(wrap_value<ITEM, item_type> const &x,
|
298 |
-
__tag<false /* USE_STENCIL */>)
|
299 |
-
{
|
300 |
-
return predicate(x());
|
301 |
-
}
|
302 |
-
|
303 |
-
THRUST_DEVICE_FUNCTION bool
|
304 |
-
predicate_wrapper(wrap_value<ITEM, item_type> const &,
|
305 |
-
__tag<true>)
|
306 |
-
{
|
307 |
-
return false;
|
308 |
-
}
|
309 |
-
|
310 |
-
//-------- stencil
|
311 |
-
|
312 |
-
template <class T>
|
313 |
-
THRUST_DEVICE_FUNCTION bool
|
314 |
-
predicate_wrapper(wrap_value<STENCIL, T> const &x,
|
315 |
-
__tag<true>)
|
316 |
-
{
|
317 |
-
return predicate(x());
|
318 |
-
}
|
319 |
-
|
320 |
-
THRUST_DEVICE_FUNCTION bool
|
321 |
-
predicate_wrapper(wrap_value<STENCIL, no_stencil_tag_> const &,
|
322 |
-
__tag<true>)
|
323 |
-
{
|
324 |
-
return false;
|
325 |
-
}
|
326 |
-
|
327 |
-
|
328 |
-
THRUST_DEVICE_FUNCTION bool
|
329 |
-
predicate_wrapper(wrap_value<STENCIL, stencil_type> const &,
|
330 |
-
__tag<false>)
|
331 |
-
{
|
332 |
-
return false;
|
333 |
-
}
|
334 |
-
|
335 |
-
template <bool IS_LAST_TILE, ItemStencil TYPE, class T>
|
336 |
-
THRUST_DEVICE_FUNCTION void
|
337 |
-
compute_selection_flags(int num_tile_items,
|
338 |
-
T (&values)[ITEMS_PER_THREAD],
|
339 |
-
Size (&selection_flags)[ITEMS_PER_THREAD])
|
340 |
-
{
|
341 |
-
#pragma unroll
|
342 |
-
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
|
343 |
-
{
|
344 |
-
// Out-of-bounds items are selection_flags
|
345 |
-
selection_flags[ITEM] = 1;
|
346 |
-
|
347 |
-
if (!IS_LAST_TILE ||
|
348 |
-
(Size(threadIdx.x * ITEMS_PER_THREAD) + ITEM < num_tile_items))
|
349 |
-
{
|
350 |
-
selection_flags[ITEM] =
|
351 |
-
predicate_wrapper(wrap_value<TYPE, T>(values[ITEM]),
|
352 |
-
__tag<USE_STENCIL>());
|
353 |
-
}
|
354 |
-
}
|
355 |
-
}
|
356 |
-
|
357 |
-
//------------------------------------------
|
358 |
-
// consume tiles
|
359 |
-
//------------------------------------------
|
360 |
-
|
361 |
-
template <bool IS_LAST_TILE, bool IS_FIRST_TILE>
|
362 |
-
Size THRUST_DEVICE_FUNCTION
|
363 |
-
consume_tile_impl(int num_tile_items,
|
364 |
-
int tile_idx,
|
365 |
-
Size tile_base)
|
366 |
-
{
|
367 |
-
item_type items_loc[ITEMS_PER_THREAD];
|
368 |
-
Size selection_flags[ITEMS_PER_THREAD];
|
369 |
-
Size selection_idx[ITEMS_PER_THREAD];
|
370 |
-
|
371 |
-
if (IS_LAST_TILE) {
|
372 |
-
BlockLoadItems(storage.load_items)
|
373 |
-
.Load(items_load_it + tile_base,
|
374 |
-
items_loc,
|
375 |
-
num_tile_items);
|
376 |
-
}
|
377 |
-
else
|
378 |
-
{
|
379 |
-
BlockLoadItems(storage.load_items)
|
380 |
-
.Load(items_load_it + tile_base,
|
381 |
-
items_loc);
|
382 |
-
}
|
383 |
-
|
384 |
-
core::sync_threadblock();
|
385 |
-
|
386 |
-
if (USE_STENCIL)
|
387 |
-
{
|
388 |
-
stencil_type stencil_loc[ITEMS_PER_THREAD];
|
389 |
-
|
390 |
-
if (IS_LAST_TILE)
|
391 |
-
{
|
392 |
-
BlockLoadStencil(storage.load_stencil)
|
393 |
-
.Load(stencil_load_it + tile_base,
|
394 |
-
stencil_loc,
|
395 |
-
num_tile_items);
|
396 |
-
}
|
397 |
-
else
|
398 |
-
{
|
399 |
-
BlockLoadStencil(storage.load_stencil)
|
400 |
-
.Load(stencil_load_it + tile_base,
|
401 |
-
stencil_loc);
|
402 |
-
}
|
403 |
-
|
404 |
-
compute_selection_flags<IS_LAST_TILE, STENCIL>(num_tile_items,
|
405 |
-
stencil_loc,
|
406 |
-
selection_flags);
|
407 |
-
}
|
408 |
-
else /* Use predicate on items rather then stencil */
|
409 |
-
{
|
410 |
-
compute_selection_flags<IS_LAST_TILE, ITEM>(num_tile_items,
|
411 |
-
items_loc,
|
412 |
-
selection_flags);
|
413 |
-
}
|
414 |
-
|
415 |
-
core::sync_threadblock();
|
416 |
-
|
417 |
-
Size num_tile_selections = 0;
|
418 |
-
Size num_selections = 0;
|
419 |
-
Size num_selections_prefix = 0;
|
420 |
-
if (IS_FIRST_TILE)
|
421 |
-
{
|
422 |
-
BlockScan(storage.scan)
|
423 |
-
.ExclusiveSum(selection_flags,
|
424 |
-
selection_idx,
|
425 |
-
num_tile_selections);
|
426 |
-
|
427 |
-
if (threadIdx.x == 0)
|
428 |
-
{
|
429 |
-
// Update tile status if this is not the last tile
|
430 |
-
if (!IS_LAST_TILE)
|
431 |
-
tile_state.SetInclusive(0, num_tile_selections);
|
432 |
-
}
|
433 |
-
|
434 |
-
// Do not count any out-of-bounds selections
|
435 |
-
if (IS_LAST_TILE)
|
436 |
-
{
|
437 |
-
int num_discount = ITEMS_PER_TILE - num_tile_items;
|
438 |
-
num_tile_selections -= num_discount;
|
439 |
-
}
|
440 |
-
num_selections = num_tile_selections;
|
441 |
-
}
|
442 |
-
else
|
443 |
-
{
|
444 |
-
TilePrefixCallback prefix_cb(tile_state,
|
445 |
-
storage.prefix,
|
446 |
-
cub::Sum(),
|
447 |
-
tile_idx);
|
448 |
-
BlockScan(storage.scan)
|
449 |
-
.ExclusiveSum(selection_flags,
|
450 |
-
selection_idx,
|
451 |
-
prefix_cb);
|
452 |
-
|
453 |
-
num_selections = prefix_cb.GetInclusivePrefix();
|
454 |
-
num_tile_selections = prefix_cb.GetBlockAggregate();
|
455 |
-
num_selections_prefix = prefix_cb.GetExclusivePrefix();
|
456 |
-
|
457 |
-
if (IS_LAST_TILE)
|
458 |
-
{
|
459 |
-
int num_discount = ITEMS_PER_TILE - num_tile_items;
|
460 |
-
num_tile_selections -= num_discount;
|
461 |
-
num_selections -= num_discount;
|
462 |
-
}
|
463 |
-
}
|
464 |
-
|
465 |
-
core::sync_threadblock();
|
466 |
-
|
467 |
-
scatter(items_loc,
|
468 |
-
selection_flags,
|
469 |
-
selection_idx,
|
470 |
-
num_tile_selections,
|
471 |
-
num_selections_prefix);
|
472 |
-
|
473 |
-
|
474 |
-
return num_selections;
|
475 |
-
} // func consume_tile_impl
|
476 |
-
|
477 |
-
template <bool IS_LAST_TILE>
|
478 |
-
THRUST_DEVICE_FUNCTION Size
|
479 |
-
consume_tile(int num_tile_items,
|
480 |
-
int tile_idx,
|
481 |
-
Size tile_base)
|
482 |
-
{
|
483 |
-
if (tile_idx == 0)
|
484 |
-
{
|
485 |
-
return consume_tile_impl<IS_LAST_TILE, true>(num_tile_items,
|
486 |
-
tile_idx,
|
487 |
-
tile_base);
|
488 |
-
}
|
489 |
-
else
|
490 |
-
{
|
491 |
-
return consume_tile_impl<IS_LAST_TILE, false>(num_tile_items,
|
492 |
-
tile_idx,
|
493 |
-
tile_base);
|
494 |
-
}
|
495 |
-
} // func consume_tile
|
496 |
-
|
497 |
-
//---------------------------------------------------------------------
|
498 |
-
// Constructor
|
499 |
-
//---------------------------------------------------------------------
|
500 |
-
|
501 |
-
THRUST_DEVICE_FUNCTION impl(TempStorage & storage_,
|
502 |
-
ScanTileState & tile_state_,
|
503 |
-
ItemsIt items_it,
|
504 |
-
StencilIt stencil_it,
|
505 |
-
OutputIt output_it_,
|
506 |
-
Predicate predicate_,
|
507 |
-
Size num_items_,
|
508 |
-
int num_tiles,
|
509 |
-
NumSelectedOutputIt num_selected_out)
|
510 |
-
: storage(storage_),
|
511 |
-
tile_state(tile_state_),
|
512 |
-
items_load_it(core::make_load_iterator(ptx_plan(), items_it)),
|
513 |
-
stencil_load_it(core::make_load_iterator(ptx_plan(), stencil_it)),
|
514 |
-
output_it(output_it_),
|
515 |
-
predicate(predicate_),
|
516 |
-
num_items(num_items_)
|
517 |
-
{
|
518 |
-
int tile_idx = blockIdx.x;
|
519 |
-
Size tile_base = tile_idx * ITEMS_PER_TILE;
|
520 |
-
|
521 |
-
if (tile_idx < num_tiles - 1)
|
522 |
-
{
|
523 |
-
consume_tile<false>(ITEMS_PER_TILE,
|
524 |
-
tile_idx,
|
525 |
-
tile_base);
|
526 |
-
}
|
527 |
-
else
|
528 |
-
{
|
529 |
-
int num_remaining = static_cast<int>(num_items - tile_base);
|
530 |
-
Size num_selections = consume_tile<true>(num_remaining,
|
531 |
-
tile_idx,
|
532 |
-
tile_base);
|
533 |
-
if (threadIdx.x == 0)
|
534 |
-
{
|
535 |
-
*num_selected_out = num_selections;
|
536 |
-
}
|
537 |
-
}
|
538 |
-
} // ctor impl
|
539 |
-
};
|
540 |
-
|
541 |
-
//---------------------------------------------------------------------
|
542 |
-
// Agent entry point
|
543 |
-
//---------------------------------------------------------------------
|
544 |
-
|
545 |
-
THRUST_AGENT_ENTRY(ItemsIt items_it,
|
546 |
-
StencilIt stencil_it,
|
547 |
-
OutputIt output_it,
|
548 |
-
Predicate predicate,
|
549 |
-
Size num_items,
|
550 |
-
NumSelectedOutputIt num_selected_out,
|
551 |
-
ScanTileState tile_state,
|
552 |
-
int num_tiles,
|
553 |
-
char * shmem)
|
554 |
-
{
|
555 |
-
TempStorage &storage = *reinterpret_cast<TempStorage *>(shmem);
|
556 |
-
|
557 |
-
impl(storage,
|
558 |
-
tile_state,
|
559 |
-
items_it,
|
560 |
-
stencil_it,
|
561 |
-
output_it,
|
562 |
-
predicate,
|
563 |
-
num_items,
|
564 |
-
num_tiles,
|
565 |
-
num_selected_out);
|
566 |
-
}
|
567 |
-
}; // struct CopyIfAgent
|
568 |
-
|
569 |
-
template <class ScanTileState,
|
570 |
-
class NumSelectedIt,
|
571 |
-
class Size>
|
572 |
-
struct InitAgent
|
573 |
-
{
|
574 |
-
template <class Arch>
|
575 |
-
struct PtxPlan : PtxPolicy<128> {};
|
576 |
-
typedef core::specialize_plan<PtxPlan> ptx_plan;
|
577 |
-
|
578 |
-
//---------------------------------------------------------------------
|
579 |
-
// Agent entry point
|
580 |
-
//---------------------------------------------------------------------
|
581 |
-
|
582 |
-
THRUST_AGENT_ENTRY(ScanTileState tile_state,
|
583 |
-
Size num_tiles,
|
584 |
-
NumSelectedIt num_selected_out,
|
585 |
-
char * /*shmem*/)
|
586 |
-
{
|
587 |
-
tile_state.InitializeStatus(num_tiles);
|
588 |
-
if (blockIdx.x == 0 && threadIdx.x == 0)
|
589 |
-
*num_selected_out = 0;
|
590 |
-
}
|
591 |
-
}; // struct InitAgent
|
592 |
-
|
593 |
-
template <class ItemsIt,
|
594 |
-
class StencilIt,
|
595 |
-
class OutputIt,
|
596 |
-
class Predicate,
|
597 |
-
class Size,
|
598 |
-
class NumSelectedOutIt>
|
599 |
-
static cudaError_t THRUST_RUNTIME_FUNCTION
|
600 |
-
doit_step(void * d_temp_storage,
|
601 |
-
size_t & temp_storage_bytes,
|
602 |
-
ItemsIt items,
|
603 |
-
StencilIt stencil,
|
604 |
-
OutputIt output_it,
|
605 |
-
Predicate predicate,
|
606 |
-
NumSelectedOutIt num_selected_out,
|
607 |
-
Size num_items,
|
608 |
-
cudaStream_t stream,
|
609 |
-
bool debug_sync)
|
610 |
-
{
|
611 |
-
if (num_items == 0)
|
612 |
-
return cudaSuccess;
|
613 |
-
|
614 |
-
using core::AgentLauncher;
|
615 |
-
using core::AgentPlan;
|
616 |
-
using core::get_agent_plan;
|
617 |
-
|
618 |
-
typedef AgentLauncher<
|
619 |
-
CopyIfAgent<ItemsIt,
|
620 |
-
StencilIt,
|
621 |
-
OutputIt,
|
622 |
-
Predicate,
|
623 |
-
Size,
|
624 |
-
NumSelectedOutIt> >
|
625 |
-
copy_if_agent;
|
626 |
-
|
627 |
-
typedef typename copy_if_agent::ScanTileState ScanTileState;
|
628 |
-
|
629 |
-
typedef AgentLauncher<
|
630 |
-
InitAgent<ScanTileState, NumSelectedOutIt, Size> >
|
631 |
-
init_agent;
|
632 |
-
|
633 |
-
|
634 |
-
using core::get_plan;
|
635 |
-
typename get_plan<init_agent>::type init_plan = init_agent::get_plan();
|
636 |
-
typename get_plan<copy_if_agent>::type copy_if_plan = copy_if_agent::get_plan(stream);
|
637 |
-
|
638 |
-
int tile_size = copy_if_plan.items_per_tile;
|
639 |
-
size_t num_tiles = (num_items + tile_size - 1) / tile_size;
|
640 |
-
|
641 |
-
size_t vshmem_size = core::vshmem_size(copy_if_plan.shared_memory_size,
|
642 |
-
num_tiles);
|
643 |
-
|
644 |
-
cudaError_t status = cudaSuccess;
|
645 |
-
if (num_items == 0)
|
646 |
-
return status;
|
647 |
-
|
648 |
-
size_t allocation_sizes[2] = {0, vshmem_size};
|
649 |
-
status = ScanTileState::AllocationSize(static_cast<int>(num_tiles), allocation_sizes[0]);
|
650 |
-
CUDA_CUB_RET_IF_FAIL(status);
|
651 |
-
|
652 |
-
|
653 |
-
void* allocations[2] = {NULL, NULL};
|
654 |
-
status = cub::AliasTemporaries(d_temp_storage,
|
655 |
-
temp_storage_bytes,
|
656 |
-
allocations,
|
657 |
-
allocation_sizes);
|
658 |
-
CUDA_CUB_RET_IF_FAIL(status);
|
659 |
-
|
660 |
-
|
661 |
-
if (d_temp_storage == NULL)
|
662 |
-
{
|
663 |
-
return status;
|
664 |
-
}
|
665 |
-
|
666 |
-
ScanTileState tile_status;
|
667 |
-
status = tile_status.Init(static_cast<int>(num_tiles), allocations[0], allocation_sizes[0]);
|
668 |
-
CUDA_CUB_RET_IF_FAIL(status);
|
669 |
-
|
670 |
-
init_agent ia(init_plan, num_tiles, stream, "copy_if::init_agent", debug_sync);
|
671 |
-
|
672 |
-
char *vshmem_ptr = vshmem_size > 0 ? (char*)allocations[1] : NULL;
|
673 |
-
|
674 |
-
copy_if_agent pa(copy_if_plan, num_items, stream, vshmem_ptr, "copy_if::partition_agent", debug_sync);
|
675 |
-
|
676 |
-
ia.launch(tile_status, num_tiles, num_selected_out);
|
677 |
-
CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError());
|
678 |
-
|
679 |
-
pa.launch(items,
|
680 |
-
stencil,
|
681 |
-
output_it,
|
682 |
-
predicate,
|
683 |
-
num_items,
|
684 |
-
num_selected_out,
|
685 |
-
tile_status,
|
686 |
-
num_tiles);
|
687 |
-
CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError());
|
688 |
-
return status;
|
689 |
-
}
|
690 |
-
|
691 |
-
template <typename Derived,
|
692 |
-
typename InputIt,
|
693 |
-
typename StencilIt,
|
694 |
-
typename OutputIt,
|
695 |
-
typename Predicate>
|
696 |
-
THRUST_RUNTIME_FUNCTION
|
697 |
-
OutputIt copy_if(execution_policy<Derived>& policy,
|
698 |
-
InputIt first,
|
699 |
-
InputIt last,
|
700 |
-
StencilIt stencil,
|
701 |
-
OutputIt output,
|
702 |
-
Predicate predicate)
|
703 |
-
{
|
704 |
-
typedef int size_type;
|
705 |
-
|
706 |
-
size_type num_items = static_cast<size_type>(thrust::distance(first, last));
|
707 |
-
size_t temp_storage_bytes = 0;
|
708 |
-
cudaStream_t stream = cuda_cub::stream(policy);
|
709 |
-
bool debug_sync = THRUST_DEBUG_SYNC_FLAG;
|
710 |
-
|
711 |
-
if (num_items == 0)
|
712 |
-
return output;
|
713 |
-
|
714 |
-
cudaError_t status;
|
715 |
-
status = doit_step(NULL,
|
716 |
-
temp_storage_bytes,
|
717 |
-
first,
|
718 |
-
stencil,
|
719 |
-
output,
|
720 |
-
predicate,
|
721 |
-
reinterpret_cast<size_type*>(NULL),
|
722 |
-
num_items,
|
723 |
-
stream,
|
724 |
-
debug_sync);
|
725 |
-
cuda_cub::throw_on_error(status, "copy_if failed on 1st step");
|
726 |
-
|
727 |
-
size_t allocation_sizes[2] = {sizeof(size_type), temp_storage_bytes};
|
728 |
-
void * allocations[2] = {NULL, NULL};
|
729 |
-
|
730 |
-
size_t storage_size = 0;
|
731 |
-
|
732 |
-
status = core::alias_storage(NULL,
|
733 |
-
storage_size,
|
734 |
-
allocations,
|
735 |
-
allocation_sizes);
|
736 |
-
cuda_cub::throw_on_error(status, "copy_if failed on 1st alias_storage");
|
737 |
-
|
738 |
-
// Allocate temporary storage.
|
739 |
-
thrust::detail::temporary_array<thrust::detail::uint8_t, Derived>
|
740 |
-
tmp(policy, storage_size);
|
741 |
-
void *ptr = static_cast<void*>(tmp.data().get());
|
742 |
-
|
743 |
-
status = core::alias_storage(ptr,
|
744 |
-
storage_size,
|
745 |
-
allocations,
|
746 |
-
allocation_sizes);
|
747 |
-
cuda_cub::throw_on_error(status, "copy_if failed on 2nd alias_storage");
|
748 |
-
|
749 |
-
size_type* d_num_selected_out
|
750 |
-
= thrust::detail::aligned_reinterpret_cast<size_type*>(allocations[0]);
|
751 |
-
|
752 |
-
status = doit_step(allocations[1],
|
753 |
-
temp_storage_bytes,
|
754 |
-
first,
|
755 |
-
stencil,
|
756 |
-
output,
|
757 |
-
predicate,
|
758 |
-
d_num_selected_out,
|
759 |
-
num_items,
|
760 |
-
stream,
|
761 |
-
debug_sync);
|
762 |
-
cuda_cub::throw_on_error(status, "copy_if failed on 2nd step");
|
763 |
-
|
764 |
-
status = cuda_cub::synchronize(policy);
|
765 |
-
cuda_cub::throw_on_error(status, "copy_if failed to synchronize");
|
766 |
-
|
767 |
-
size_type num_selected = get_value(policy, d_num_selected_out);
|
768 |
-
|
769 |
-
return output + num_selected;
|
770 |
-
}
|
771 |
-
|
772 |
-
} // namespace __copy_if
|
773 |
-
|
774 |
-
//-------------------------
|
775 |
-
// Thrust API entry points
|
776 |
-
//-------------------------
|
777 |
-
|
778 |
-
__thrust_exec_check_disable__
|
779 |
-
template <class Derived,
|
780 |
-
class InputIterator,
|
781 |
-
class OutputIterator,
|
782 |
-
class Predicate>
|
783 |
-
OutputIterator __host__ __device__
|
784 |
-
copy_if(execution_policy<Derived> &policy,
|
785 |
-
InputIterator first,
|
786 |
-
InputIterator last,
|
787 |
-
OutputIterator result,
|
788 |
-
Predicate pred)
|
789 |
-
{
|
790 |
-
OutputIterator ret = result;
|
791 |
-
|
792 |
-
if (__THRUST_HAS_CUDART__)
|
793 |
-
{
|
794 |
-
ret = __copy_if::copy_if(policy,
|
795 |
-
first,
|
796 |
-
last,
|
797 |
-
__copy_if::no_stencil_tag(),
|
798 |
-
result,
|
799 |
-
pred);
|
800 |
-
}
|
801 |
-
else
|
802 |
-
{
|
803 |
-
#if !__THRUST_HAS_CUDART__
|
804 |
-
ret = thrust::copy_if(cvt_to_seq(derived_cast(policy)),
|
805 |
-
first,
|
806 |
-
last,
|
807 |
-
result,
|
808 |
-
pred);
|
809 |
-
#endif
|
810 |
-
}
|
811 |
-
return ret;
|
812 |
-
} // func copy_if
|
813 |
-
|
814 |
-
__thrust_exec_check_disable__
|
815 |
-
template <class Derived,
|
816 |
-
class InputIterator,
|
817 |
-
class StencilIterator,
|
818 |
-
class OutputIterator,
|
819 |
-
class Predicate>
|
820 |
-
OutputIterator __host__ __device__
|
821 |
-
copy_if(execution_policy<Derived> &policy,
|
822 |
-
InputIterator first,
|
823 |
-
InputIterator last,
|
824 |
-
StencilIterator stencil,
|
825 |
-
OutputIterator result,
|
826 |
-
Predicate pred)
|
827 |
-
{
|
828 |
-
OutputIterator ret = result;
|
829 |
-
|
830 |
-
if (__THRUST_HAS_CUDART__)
|
831 |
-
{
|
832 |
-
ret = __copy_if::copy_if(policy,
|
833 |
-
first,
|
834 |
-
last,
|
835 |
-
stencil,
|
836 |
-
result,
|
837 |
-
pred);
|
838 |
-
}
|
839 |
-
else
|
840 |
-
{
|
841 |
-
#if !__THRUST_HAS_CUDART__
|
842 |
-
ret = thrust::copy_if(cvt_to_seq(derived_cast(policy)),
|
843 |
-
first,
|
844 |
-
last,
|
845 |
-
stencil,
|
846 |
-
result,
|
847 |
-
pred);
|
848 |
-
#endif
|
849 |
-
}
|
850 |
-
return ret;
|
851 |
-
} // func copy_if
|
852 |
-
|
853 |
-
} // namespace cuda_cub
|
854 |
-
} // end namespace thrust
|
855 |
-
|
856 |
-
#include <thrust/copy.h>
|
857 |
-
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/set_operations.h
DELETED
@@ -1,319 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
#include <thrust/system/detail/generic/tag.h>
|
21 |
-
#include <thrust/pair.h>
|
22 |
-
|
23 |
-
namespace thrust
|
24 |
-
{
|
25 |
-
namespace system
|
26 |
-
{
|
27 |
-
namespace detail
|
28 |
-
{
|
29 |
-
namespace generic
|
30 |
-
{
|
31 |
-
|
32 |
-
|
33 |
-
template<typename ExecutionPolicy,
|
34 |
-
typename InputIterator1,
|
35 |
-
typename InputIterator2,
|
36 |
-
typename OutputIterator>
|
37 |
-
__host__ __device__
|
38 |
-
OutputIterator set_difference(thrust::execution_policy<ExecutionPolicy> &exec,
|
39 |
-
InputIterator1 first1,
|
40 |
-
InputIterator1 last1,
|
41 |
-
InputIterator2 first2,
|
42 |
-
InputIterator2 last2,
|
43 |
-
OutputIterator result);
|
44 |
-
|
45 |
-
|
46 |
-
// XXX it is an error to call this function; it has no implementation
|
47 |
-
template<typename ExecutionPolicy,
|
48 |
-
typename InputIterator1,
|
49 |
-
typename InputIterator2,
|
50 |
-
typename OutputIterator,
|
51 |
-
typename StrictWeakOrdering>
|
52 |
-
__host__ __device__
|
53 |
-
OutputIterator set_difference(thrust::execution_policy<ExecutionPolicy> &exec,
|
54 |
-
InputIterator1 first1,
|
55 |
-
InputIterator1 last1,
|
56 |
-
InputIterator2 first2,
|
57 |
-
InputIterator2 last2,
|
58 |
-
OutputIterator result,
|
59 |
-
StrictWeakOrdering comp);
|
60 |
-
|
61 |
-
|
62 |
-
template<typename ExecutionPolicy,
|
63 |
-
typename InputIterator1,
|
64 |
-
typename InputIterator2,
|
65 |
-
typename InputIterator3,
|
66 |
-
typename InputIterator4,
|
67 |
-
typename OutputIterator1,
|
68 |
-
typename OutputIterator2>
|
69 |
-
__host__ __device__
|
70 |
-
thrust::pair<OutputIterator1,OutputIterator2>
|
71 |
-
set_difference_by_key(thrust::execution_policy<ExecutionPolicy> &exec,
|
72 |
-
InputIterator1 keys_first1,
|
73 |
-
InputIterator1 keys_last1,
|
74 |
-
InputIterator2 keys_first2,
|
75 |
-
InputIterator2 keys_last2,
|
76 |
-
InputIterator3 values_first1,
|
77 |
-
InputIterator4 values_first2,
|
78 |
-
OutputIterator1 keys_result,
|
79 |
-
OutputIterator2 values_result);
|
80 |
-
|
81 |
-
|
82 |
-
template<typename ExecutionPolicy,
|
83 |
-
typename InputIterator1,
|
84 |
-
typename InputIterator2,
|
85 |
-
typename InputIterator3,
|
86 |
-
typename InputIterator4,
|
87 |
-
typename OutputIterator1,
|
88 |
-
typename OutputIterator2,
|
89 |
-
typename StrictWeakOrdering>
|
90 |
-
__host__ __device__
|
91 |
-
thrust::pair<OutputIterator1,OutputIterator2>
|
92 |
-
set_difference_by_key(thrust::execution_policy<ExecutionPolicy> &exec,
|
93 |
-
InputIterator1 keys_first1,
|
94 |
-
InputIterator1 keys_last1,
|
95 |
-
InputIterator2 keys_first2,
|
96 |
-
InputIterator2 keys_last2,
|
97 |
-
InputIterator3 values_first1,
|
98 |
-
InputIterator4 values_first2,
|
99 |
-
OutputIterator1 keys_result,
|
100 |
-
OutputIterator2 values_result,
|
101 |
-
StrictWeakOrdering comp);
|
102 |
-
|
103 |
-
|
104 |
-
template<typename ExecutionPolicy,
|
105 |
-
typename InputIterator1,
|
106 |
-
typename InputIterator2,
|
107 |
-
typename OutputIterator>
|
108 |
-
__host__ __device__
|
109 |
-
OutputIterator set_intersection(thrust::execution_policy<ExecutionPolicy> &system,
|
110 |
-
InputIterator1 first1,
|
111 |
-
InputIterator1 last1,
|
112 |
-
InputIterator2 first2,
|
113 |
-
InputIterator2 last2,
|
114 |
-
OutputIterator result);
|
115 |
-
|
116 |
-
|
117 |
-
// XXX it is an error to call this function; it has no implementation
|
118 |
-
template<typename ExecutionPolicy,
|
119 |
-
typename InputIterator1,
|
120 |
-
typename InputIterator2,
|
121 |
-
typename OutputIterator,
|
122 |
-
typename StrictWeakOrdering>
|
123 |
-
__host__ __device__
|
124 |
-
OutputIterator set_intersection(thrust::execution_policy<StrictWeakOrdering> &system,
|
125 |
-
InputIterator1 first1,
|
126 |
-
InputIterator1 last1,
|
127 |
-
InputIterator2 first2,
|
128 |
-
InputIterator2 last2,
|
129 |
-
OutputIterator result,
|
130 |
-
StrictWeakOrdering comp);
|
131 |
-
|
132 |
-
|
133 |
-
template<typename ExecutionPolicy,
|
134 |
-
typename InputIterator1,
|
135 |
-
typename InputIterator2,
|
136 |
-
typename InputIterator3,
|
137 |
-
typename OutputIterator1,
|
138 |
-
typename OutputIterator2>
|
139 |
-
__host__ __device__
|
140 |
-
thrust::pair<OutputIterator1,OutputIterator2>
|
141 |
-
set_intersection_by_key(thrust::execution_policy<ExecutionPolicy> &system,
|
142 |
-
InputIterator1 keys_first1,
|
143 |
-
InputIterator1 keys_last1,
|
144 |
-
InputIterator2 keys_first2,
|
145 |
-
InputIterator2 keys_last2,
|
146 |
-
InputIterator3 values_first1,
|
147 |
-
OutputIterator1 keys_result,
|
148 |
-
OutputIterator2 values_result);
|
149 |
-
|
150 |
-
|
151 |
-
template<typename ExecutionPolicy,
|
152 |
-
typename InputIterator1,
|
153 |
-
typename InputIterator2,
|
154 |
-
typename InputIterator3,
|
155 |
-
typename OutputIterator1,
|
156 |
-
typename OutputIterator2,
|
157 |
-
typename StrictWeakOrdering>
|
158 |
-
__host__ __device__
|
159 |
-
thrust::pair<OutputIterator1,OutputIterator2>
|
160 |
-
set_intersection_by_key(thrust::execution_policy<ExecutionPolicy> &system,
|
161 |
-
InputIterator1 keys_first1,
|
162 |
-
InputIterator1 keys_last1,
|
163 |
-
InputIterator2 keys_first2,
|
164 |
-
InputIterator2 keys_last2,
|
165 |
-
InputIterator3 values_first1,
|
166 |
-
OutputIterator1 keys_result,
|
167 |
-
OutputIterator2 values_result,
|
168 |
-
StrictWeakOrdering comp);
|
169 |
-
|
170 |
-
|
171 |
-
template<typename ExecutionPolicy,
|
172 |
-
typename InputIterator1,
|
173 |
-
typename InputIterator2,
|
174 |
-
typename OutputIterator>
|
175 |
-
__host__ __device__
|
176 |
-
OutputIterator set_symmetric_difference(thrust::execution_policy<ExecutionPolicy> &system,
|
177 |
-
InputIterator1 first1,
|
178 |
-
InputIterator1 last1,
|
179 |
-
InputIterator2 first2,
|
180 |
-
InputIterator2 last2,
|
181 |
-
OutputIterator result);
|
182 |
-
|
183 |
-
|
184 |
-
// XXX it is an error to call this function; it has no implementation
|
185 |
-
template<typename ExecutionPolicy,
|
186 |
-
typename InputIterator1,
|
187 |
-
typename InputIterator2,
|
188 |
-
typename OutputIterator,
|
189 |
-
typename StrictWeakOrdering>
|
190 |
-
__host__ __device__
|
191 |
-
OutputIterator set_symmetric_difference(thrust::execution_policy<ExecutionPolicy> &system,
|
192 |
-
InputIterator1 first1,
|
193 |
-
InputIterator1 last1,
|
194 |
-
InputIterator2 first2,
|
195 |
-
InputIterator2 last2,
|
196 |
-
OutputIterator result,
|
197 |
-
StrictWeakOrdering comp);
|
198 |
-
|
199 |
-
|
200 |
-
template<typename ExecutionPolicy,
|
201 |
-
typename InputIterator1,
|
202 |
-
typename InputIterator2,
|
203 |
-
typename InputIterator3,
|
204 |
-
typename InputIterator4,
|
205 |
-
typename OutputIterator1,
|
206 |
-
typename OutputIterator2>
|
207 |
-
__host__ __device__
|
208 |
-
thrust::pair<OutputIterator1,OutputIterator2>
|
209 |
-
set_symmetric_difference_by_key(thrust::execution_policy<ExecutionPolicy> &system,
|
210 |
-
InputIterator1 keys_first1,
|
211 |
-
InputIterator1 keys_last1,
|
212 |
-
InputIterator2 keys_first2,
|
213 |
-
InputIterator2 keys_last2,
|
214 |
-
InputIterator3 values_first1,
|
215 |
-
InputIterator4 values_first2,
|
216 |
-
OutputIterator1 keys_result,
|
217 |
-
OutputIterator2 values_result);
|
218 |
-
|
219 |
-
|
220 |
-
template<typename ExecutionPolicy,
|
221 |
-
typename InputIterator1,
|
222 |
-
typename InputIterator2,
|
223 |
-
typename InputIterator3,
|
224 |
-
typename InputIterator4,
|
225 |
-
typename OutputIterator1,
|
226 |
-
typename OutputIterator2,
|
227 |
-
typename StrictWeakOrdering>
|
228 |
-
__host__ __device__
|
229 |
-
thrust::pair<OutputIterator1,OutputIterator2>
|
230 |
-
set_symmetric_difference_by_key(thrust::execution_policy<ExecutionPolicy> &system,
|
231 |
-
InputIterator1 keys_first1,
|
232 |
-
InputIterator1 keys_last1,
|
233 |
-
InputIterator2 keys_first2,
|
234 |
-
InputIterator2 keys_last2,
|
235 |
-
InputIterator3 values_first1,
|
236 |
-
InputIterator4 values_first2,
|
237 |
-
OutputIterator1 keys_result,
|
238 |
-
OutputIterator2 values_result,
|
239 |
-
StrictWeakOrdering comp);
|
240 |
-
|
241 |
-
|
242 |
-
template<typename ExecutionPolicy,
|
243 |
-
typename InputIterator1,
|
244 |
-
typename InputIterator2,
|
245 |
-
typename OutputIterator>
|
246 |
-
__host__ __device__
|
247 |
-
OutputIterator set_union(thrust::execution_policy<ExecutionPolicy> &system,
|
248 |
-
InputIterator1 first1,
|
249 |
-
InputIterator1 last1,
|
250 |
-
InputIterator2 first2,
|
251 |
-
InputIterator2 last2,
|
252 |
-
OutputIterator result);
|
253 |
-
|
254 |
-
|
255 |
-
// XXX it is an error to call this function; it has no implementation
|
256 |
-
template<typename ExecutionPolicy,
|
257 |
-
typename InputIterator1,
|
258 |
-
typename InputIterator2,
|
259 |
-
typename OutputIterator,
|
260 |
-
typename StrictWeakOrdering>
|
261 |
-
__host__ __device__
|
262 |
-
OutputIterator set_union(thrust::execution_policy<ExecutionPolicy> &system,
|
263 |
-
InputIterator1 first1,
|
264 |
-
InputIterator1 last1,
|
265 |
-
InputIterator2 first2,
|
266 |
-
InputIterator2 last2,
|
267 |
-
OutputIterator result,
|
268 |
-
StrictWeakOrdering comp);
|
269 |
-
|
270 |
-
|
271 |
-
template<typename ExecutionPolicy,
|
272 |
-
typename InputIterator1,
|
273 |
-
typename InputIterator2,
|
274 |
-
typename InputIterator3,
|
275 |
-
typename InputIterator4,
|
276 |
-
typename OutputIterator1,
|
277 |
-
typename OutputIterator2>
|
278 |
-
__host__ __device__
|
279 |
-
thrust::pair<OutputIterator1,OutputIterator2>
|
280 |
-
set_union_by_key(thrust::execution_policy<ExecutionPolicy> &system,
|
281 |
-
InputIterator1 keys_first1,
|
282 |
-
InputIterator1 keys_last1,
|
283 |
-
InputIterator2 keys_first2,
|
284 |
-
InputIterator2 keys_last2,
|
285 |
-
InputIterator3 values_first1,
|
286 |
-
InputIterator4 values_first2,
|
287 |
-
OutputIterator1 keys_result,
|
288 |
-
OutputIterator2 values_result);
|
289 |
-
|
290 |
-
|
291 |
-
template<typename ExecutionPolicy,
|
292 |
-
typename InputIterator1,
|
293 |
-
typename InputIterator2,
|
294 |
-
typename InputIterator3,
|
295 |
-
typename InputIterator4,
|
296 |
-
typename OutputIterator1,
|
297 |
-
typename OutputIterator2,
|
298 |
-
typename StrictWeakOrdering>
|
299 |
-
__host__ __device__
|
300 |
-
thrust::pair<OutputIterator1,OutputIterator2>
|
301 |
-
set_union_by_key(thrust::execution_policy<ExecutionPolicy> &system,
|
302 |
-
InputIterator1 keys_first1,
|
303 |
-
InputIterator1 keys_last1,
|
304 |
-
InputIterator2 keys_first2,
|
305 |
-
InputIterator2 keys_last2,
|
306 |
-
InputIterator3 values_first1,
|
307 |
-
InputIterator4 values_first2,
|
308 |
-
OutputIterator1 keys_result,
|
309 |
-
OutputIterator2 values_result,
|
310 |
-
StrictWeakOrdering comp);
|
311 |
-
|
312 |
-
|
313 |
-
} // end namespace generic
|
314 |
-
} // end namespace detail
|
315 |
-
} // end namespace system
|
316 |
-
} // end namespace thrust
|
317 |
-
|
318 |
-
#include <thrust/system/detail/generic/set_operations.inl>
|
319 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cboudreau/AI_ZeroToHero/app.py
DELETED
@@ -1,58 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import graphviz as gv
|
3 |
-
from graphviz import Graph
|
4 |
-
import folium
|
5 |
-
from streamlit_folium import folium_static
|
6 |
-
|
7 |
-
# Define the cluster relations graph using gvmap
|
8 |
-
g = Graph(format='svg')
|
9 |
-
g.graph_attr['bgcolor'] = '#FFFFFF'
|
10 |
-
g.graph_attr['outputorder'] = 'edgesfirst'
|
11 |
-
g.graph_attr['size'] = '10,10'
|
12 |
-
g.node_attr['style'] = 'filled'
|
13 |
-
g.node_attr['shape'] = 'box'
|
14 |
-
g.node_attr['fillcolor'] = '#FFDAB9'
|
15 |
-
|
16 |
-
with g.subgraph(name='cluster_NJ') as c:
|
17 |
-
c.graph_attr['bgcolor'] = '#ADD8E6'
|
18 |
-
c.node_attr['color'] = '#000000'
|
19 |
-
c.node_attr['fontcolor'] = '#000000'
|
20 |
-
c.attr(label='New Jersey', fontsize='24')
|
21 |
-
c.node('Hackensack Meridian Health', URL='https://www.hackensackmeridianhealth.org/', target='_blank', tooltip='Hackensack Meridian Health: Hackensack University Medical Center')
|
22 |
-
c.node('RWJBarnabas Health', URL='https://www.rwjbh.org/', target='_blank', tooltip='RWJBarnabas Health: Robert Wood Johnson University Hospital')
|
23 |
-
c.node('Atlantic Health System', URL='https://www.atlantichealth.org/', target='_blank', tooltip='Atlantic Health System: Morristown Medical Center')
|
24 |
-
c.node('Virtua Health', URL='https://www.virtua.org/', target='_blank', tooltip='Virtua Health: Virtua Memorial Hospital')
|
25 |
-
c.node('Inspira Health', URL='https://www.inspirahealthnetwork.org/', target='_blank', tooltip='Inspira Health: Inspira Medical Center Vineland')
|
26 |
-
c.node('Cooper University Health Care', URL='https://www.cooperhealth.org/', target='_blank', tooltip='Cooper University Health Care: Cooper University Hospital')
|
27 |
-
c.node('University Hospital', URL='https://www.uhnj.org/', target='_blank', tooltip='University Hospital: University Hospital')
|
28 |
-
c.node('Robert Wood Johnson University Hospital Hamilton', URL='https://www.rwjbh.org/robert-wood-johnson-university-hospital-hamilton/', target='_blank', tooltip='Robert Wood Johnson University Hospital Hamilton: Robert Wood Johnson University Hospital Hamilton')
|
29 |
-
c.node('Trinitas Regional Medical Center', URL='https://www.trinitasrmc.org/', target='_blank', tooltip='Trinitas Regional Medical Center: Trinitas Regional Medical Center')
|
30 |
-
c.node('Capital Health Regional Medical Center', URL='https://www.capitalhealth.org/', target='_blank', tooltip='Capital Health Regional Medical Center: Capital Health Regional Medical Center')
|
31 |
-
|
32 |
-
# Render the graph using streamlit
|
33 |
-
st.graphviz_chart(g)
|
34 |
-
|
35 |
-
# Define hospitals data
|
36 |
-
hospitals = [('Hackensack Meridian Health', 'Hackensack University Medical Center', 40.899886, -74.039179),
|
37 |
-
('RWJBarnabas Health', 'Robert Wood Johnson University Hospital', 40.491301, -74.450611),
|
38 |
-
('Atlantic Health System', 'Morristown Medical Center', 40.787231, -74.473851),
|
39 |
-
('Virtua Health', 'Virtua Memorial Hospital', 39.931229, -75.025831),
|
40 |
-
('Inspira Health', 'Inspira Medical Center Vineland', 39.460225, -75.035542),
|
41 |
-
('Cooper University Health Care', 'Cooper University Hospital', 39.942743, -75.119090),
|
42 |
-
('University Hospital', 'University Hospital', 40.742310, -74.177609),
|
43 |
-
('Robert Wood Johnson University Hospital Hamilton', 'Robert Wood Johnson University Hospital Hamilton', 40.214008, -74.679619),
|
44 |
-
('Trinitas Regional Medical Center', 'Trinitas Regional Medical Center', 40.661474, -74.215013),
|
45 |
-
('Capital Health Regional Medical Center', 'Capital Health Regional Medical Center', 40.266778, -74.796452)]
|
46 |
-
|
47 |
-
#Create a map centered on New Jersey
|
48 |
-
m = folium.Map(location=[40.0583, -74.4057], zoom_start=8)
|
49 |
-
|
50 |
-
#Add markers for each hospital
|
51 |
-
for hospital in hospitals:
|
52 |
-
folium.Marker(
|
53 |
-
location=[hospital[2], hospital[3]],
|
54 |
-
popup=f'{hospital[1]}<br>{hospital[2]},{hospital[3]}'
|
55 |
-
).add_to(m)
|
56 |
-
|
57 |
-
#Display the map in Streamlit
|
58 |
-
folium_static(m)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/meme-api/meme_generator/memes/mihoyo/__init__.py
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
from pathlib import Path
|
2 |
-
from typing import List
|
3 |
-
|
4 |
-
from pil_utils import BuildImage
|
5 |
-
|
6 |
-
from meme_generator import add_meme
|
7 |
-
from meme_generator.utils import make_png_or_gif
|
8 |
-
|
9 |
-
img_dir = Path(__file__).parent / "images"
|
10 |
-
|
11 |
-
|
12 |
-
def mihoyo(images: List[BuildImage], texts, args):
|
13 |
-
mask = BuildImage.new("RGBA", (500, 60), (53, 49, 65, 230))
|
14 |
-
logo = BuildImage.open(img_dir / "logo.png").resize_height(50)
|
15 |
-
|
16 |
-
def make(img: BuildImage) -> BuildImage:
|
17 |
-
img = img.convert("RGBA").resize((500, 500), keep_ratio=True)
|
18 |
-
img.paste(mask, (0, 440), alpha=True)
|
19 |
-
img.paste(logo, ((img.width - logo.width) // 2, 445), alpha=True)
|
20 |
-
return img.circle_corner(100)
|
21 |
-
|
22 |
-
return make_png_or_gif(images[0], make)
|
23 |
-
|
24 |
-
|
25 |
-
add_meme("mihoyo", mihoyo, min_images=1, max_images=1, keywords=["米哈游"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CofAI/sd-2.1/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Stable Diffusion 2.1
|
3 |
-
emoji: 🔥
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.39.0
|
8 |
-
app_file: sd-2-1.py
|
9 |
-
pinned: true
|
10 |
-
license: openrail
|
11 |
-
---
|
12 |
-
|
13 |
-
This is the space for image generation!
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|