Commit
·
0307911
1
Parent(s):
02cee30
Update parquet files (step 45 of 476)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/101-5/gpt4free/g4f/.v1/SECURITY.md +0 -4
- spaces/101-5/gpt4free/models_for_langchain/model.py +0 -67
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 The Best Software Emulator for DirectX 9.md +0 -122
- spaces/1gistliPinn/ChatGPT4/Examples/Csi.column.v8.4.0 Patch.rar.md +0 -6
- spaces/1phancelerku/anime-remove-background/Bus Simulator Ultimate APK MOD Unlimited Money and More Features for Bus Lovers.md +0 -81
- spaces/1phancelerku/anime-remove-background/Download 39 40 Meye Yie.md +0 -70
- spaces/1phancelerku/anime-remove-background/Download Mario Kart Tour Mod Apk and Enjoy Unlimited Coins and Rubies.md +0 -89
- spaces/1phancelerku/anime-remove-background/Download Mod Incredibox and Explore the Galaxy the Ocean and the Nightmare.md +0 -112
- spaces/2ndelement/voicevox/voicevox_engine/synthesis_engine/__init__.py +0 -12
- spaces/801artistry/RVC801/Fixes/tensor-launch.py +0 -15
- spaces/AIConsultant/MusicGen/audiocraft/optim/dadam.py +0 -252
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/fastspeech/pe.py +0 -149
- spaces/Abhilashvj/planogram-compliance/utils/callbacks.py +0 -86
- spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/prisoner_dilemma.py +0 -49
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/bbcodetext/BBCodeText.d.ts +0 -2
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollablepanel/scrollableblock/LayoutChildren.js +0 -29
- spaces/Akmyradov/TurkmenTTSweSTT/vits/__init__.py +0 -1
- spaces/AlexWang/lama/bin/gen_debug_mask_dataset.py +0 -61
- spaces/AlexWang/lama/models/ade20k/__init__.py +0 -1
- spaces/Aloento/9Nine-PITS/pqmf.py +0 -136
- spaces/Ameaou/academic-chatgpt3.1/request_llm/bridge_chatglm.py +0 -140
- spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/tflib/network.py +0 -781
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/dpm_sde.md +0 -23
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/__init__.py +0 -0
- spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py +0 -65
- spaces/Andy1621/uniformer_image_detection/configs/scnet/scnet_r101_fpn_20e_coco.py +0 -2
- spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/losses.py +0 -77
- spaces/AnshuK23/Customer-review-analysis/README.md +0 -13
- spaces/Anthony7906/MengHuiMXD_GPT/modules/presets.py +0 -222
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/build/metadata.py +0 -39
- spaces/BAAI/AltDiffusion-m9/README.md +0 -13
- spaces/Benson/text-generation/Examples/Descarga Zktime.net Lite 2.0.3.md +0 -109
- spaces/BhagatSurya/convet_pdf_to_txt/app.py +0 -82
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/main.py +0 -12
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/pyparsing/unicode.py +0 -352
- spaces/Billyosoro/ESRGAN/inference_realesrgan_video.py +0 -199
- spaces/CVPR/LIVE/thrust/thrust/detail/complex/ctanh.h +0 -200
- spaces/CVPR/LIVE/thrust/thrust/detail/modern_gcc_required.h +0 -26
- spaces/CVPR/MonoScene/monoscene/.ipynb_checkpoints/monoscene-checkpoint.py +0 -123
- spaces/CVPR/WALT/mmdet/models/backbones/detectors_resnet.py +0 -305
- spaces/CVPR/WALT/mmdet/models/detectors/scnet.py +0 -10
- spaces/CVPR/drawings-to-human/.github/README.md +0 -1
- spaces/Cat125/text-generator-v2/generation/generators.py +0 -48
- spaces/Chrysoula/voice_to_text_swedish/app.py +0 -59
- spaces/CikeyQI/Yunzai/Yunzai/lib/events/connect.js +0 -23
- spaces/Cloudyy/bark-voice-cloning/hubert/hubert_manager.py +0 -33
- spaces/CofAI/chat.b4/client/html/index.html +0 -126
- spaces/Cpp4App/Cpp4App/SEM/retention_pp_processing.py +0 -24
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-5605d000.js +0 -2
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py +0 -230
spaces/101-5/gpt4free/g4f/.v1/SECURITY.md
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
## Reporting a Vulnerability
|
2 |
-
|
3 |
-
Reporting a Vulnerability
|
4 |
-
Please report (suspected) security vulnerabilities to https://t.me/xtekky. You will receive a response within 48 hours. If the issue is confirmed, we will release a patch as soon as possible depending on complexity but historically within a few days.
|
|
|
|
|
|
|
|
|
|
spaces/101-5/gpt4free/models_for_langchain/model.py
DELETED
@@ -1,67 +0,0 @@
|
|
1 |
-
from typing import Any, List, Mapping, Optional
|
2 |
-
from g4f.Provider import (
|
3 |
-
Ails,
|
4 |
-
You,
|
5 |
-
Bing,
|
6 |
-
Yqcloud,
|
7 |
-
Theb,
|
8 |
-
Aichat,
|
9 |
-
Bard,
|
10 |
-
Vercel,
|
11 |
-
Forefront,
|
12 |
-
Lockchat,
|
13 |
-
Liaobots,
|
14 |
-
H2o,
|
15 |
-
ChatgptLogin,
|
16 |
-
DeepAi,
|
17 |
-
GetGpt
|
18 |
-
)
|
19 |
-
import g4f
|
20 |
-
from langchain.callbacks.manager import CallbackManagerForLLMRun
|
21 |
-
from langchain.llms.base import LLM
|
22 |
-
provider_dict = {
|
23 |
-
'Ails': Ails,
|
24 |
-
'You': You,
|
25 |
-
'Bing': Bing,
|
26 |
-
'Yqcloud': Yqcloud,
|
27 |
-
'Theb': Theb,
|
28 |
-
'Aichat': Aichat,
|
29 |
-
'Bard': Bard,
|
30 |
-
'Vercel': Vercel,
|
31 |
-
'Forefront': Forefront,
|
32 |
-
'Lockchat': Lockchat,
|
33 |
-
'Liaobots': Liaobots,
|
34 |
-
'H2o': H2o,
|
35 |
-
'ChatgptLogin': ChatgptLogin,
|
36 |
-
'DeepAi': DeepAi,
|
37 |
-
'GetGpt': GetGpt
|
38 |
-
}
|
39 |
-
|
40 |
-
class CustomLLM(LLM):
|
41 |
-
model_name: str="gpt-3.5-turbo"
|
42 |
-
provider_name: str="GetGpt"
|
43 |
-
@property
|
44 |
-
def _llm_type(self) -> str:
|
45 |
-
return "custom"
|
46 |
-
|
47 |
-
def _call(
|
48 |
-
self,
|
49 |
-
prompt: str,
|
50 |
-
stop: Optional[List[str]] = None,
|
51 |
-
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
52 |
-
model_name = 'gpt-3.5-turbo',
|
53 |
-
provider = GetGpt
|
54 |
-
) -> str:
|
55 |
-
if stop is not None:
|
56 |
-
raise ValueError("stop kwargs are not permitted.")
|
57 |
-
bot_msg = g4f.ChatCompletion.create(model=self.model_name,
|
58 |
-
provider=provider_dict[self.provider_name],
|
59 |
-
messages=[{"role": "user",
|
60 |
-
"content": prompt}],
|
61 |
-
stream=False)
|
62 |
-
return bot_msg
|
63 |
-
|
64 |
-
@property
|
65 |
-
def _identifying_params(self) -> Mapping[str, Any]:
|
66 |
-
"""Get the identifying parameters."""
|
67 |
-
return {"model:": "gpt-3.5-turbo"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 The Best Software Emulator for DirectX 9.md
DELETED
@@ -1,122 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32: A Guide for Gamers</h1>
|
3 |
-
<p>If you are a gamer who loves playing high-end games on your PC, but you don't have a powerful graphics card, you might have encountered some problems with running some games smoothly. You might have experienced lagging, stuttering, crashing, or even not being able to launch some games at all. This can be very frustrating and disappointing, especially if you have spent a lot of money on buying those games.</p>
|
4 |
-
<h2>Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32</h2><br /><p><b><b>DOWNLOAD</b> 🌟 <a href="https://byltly.com/2uKySu">https://byltly.com/2uKySu</a></b></p><br /><br />
|
5 |
-
<p>Fortunately, there is a solution that can help you overcome these issues and enjoy your favorite games without any hassle. It is called Swift Shader DX9 SM3, and it is a software that can emulate the DirectX 9 features on your CPU, allowing you to run games that require DirectX 9 support even if your graphics card does not support it. In this article, we will explain what Swift Shader DX9 SM3 is, why you need it, how to download and install it, and how to use it for gaming.</p>
|
6 |
-
<h2>What is Swift Shader DX9 SM3?</h2>
|
7 |
-
<h3>A brief introduction to Swift Shader DX9 SM3 and its features</h3>
|
8 |
-
<p>Swift Shader DX9 SM3 is a software developed by TransGaming Technologies, a company that specializes in creating cross-platform gaming solutions. It is part of their SwiftShader product line, which also includes SwiftShader DX8 and SwiftShader OpenGL.</p>
|
9 |
-
<p>SwiftShader is a high-performance CPU-based implementation of various graphics APIs, such as DirectX and OpenGL. It can emulate the features and functions of these APIs on any CPU, regardless of its capabilities or architecture. This means that it can run games that require these APIs even if your graphics card does not support them or if you don't have a graphics card at all.</p>
|
10 |
-
<p>SwiftShader DX9 SM3 is specifically designed to emulate the DirectX 9 features on your CPU. It supports all the DirectX 9 features, such as pixel shaders, vertex shaders, texture filtering, alpha blending, fogging, lighting, etc. It also supports Shader Model 3.0, which is required by some games that use advanced graphics effects.</p>
|
11 |
-
<h3>Why do you need Swift Shader DX9 SM3?</h3>
|
12 |
-
<h4>The benefits of using Swift Shader DX9 SM3 for gaming</h4>
|
13 |
-
<p>The main benefit of using Swift Shader DX9 SM3 for gaming is that it allows you to run games that require DirectX 9 support even if your graphics card does not support it or if you don't have a graphics card at all. This means that you can play games that would otherwise be impossible or very difficult to play on your PC.</p>
|
14 |
-
<p>Some examples of games that require DirectX 9 support are GTA IV, FIFA 14, Call of Duty 4: Modern Warfare, Assassin's Creed, Bioshock, Crysis, etc. These are some of the most popular and acclaimed games in the history of gaming, and they offer amazing gameplay and graphics that you don't want to miss out on.</p>
|
15 |
-
<p>By using Swift Shader DX9 SM3, you can enjoy these games without having to upgrade your hardware or buy a new PC. You can also save money on buying expensive graphics cards that might become obsolete soon. You can also avoid compatibility issues that might arise from using different graphics cards or drivers.</p>
|
16 |
-
<p>How to download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 for free<br />
|
17 |
-
ATH Swift Shader DX9 SM3 Build 3383(x86) 32 download link<br />
|
18 |
-
ATH Swift Shader DX9 SM3 Build 3383(x86) 32 full version download<br />
|
19 |
-
ATH Swift Shader DX9 SM3 Build 3383(x86) 32 crack download<br />
|
20 |
-
ATH Swift Shader DX9 SM3 Build 3383(x86) 32 serial key download<br />
|
21 |
-
ATH Swift Shader DX9 SM3 Build 3383(x86) 32 torrent download<br />
|
22 |
-
ATH Swift Shader DX9 SM3 Build 3383(x86) 32 rar download<br />
|
23 |
-
ATH Swift Shader DX9 SM3 Build 3383(x86) 32 zip download<br />
|
24 |
-
ATH Swift Shader DX9 SM3 Build 3383(x86) 32 iso download<br />
|
25 |
-
ATH Swift Shader DX9 SM3 Build 3383(x86) 32 direct download<br />
|
26 |
-
Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 from official website<br />
|
27 |
-
Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 from softonic<br />
|
28 |
-
Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 from filehippo<br />
|
29 |
-
Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 from cnet<br />
|
30 |
-
Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 from softpedia<br />
|
31 |
-
Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 for windows xp<br />
|
32 |
-
Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 for windows vista<br />
|
33 |
-
Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 for windows 7<br />
|
34 |
-
Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 for windows 8<br />
|
35 |
-
Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 for windows 10<br />
|
36 |
-
Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 for mac os x<br />
|
37 |
-
Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 for linux<br />
|
38 |
-
Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 for android<br />
|
39 |
-
Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 for ios<br />
|
40 |
-
Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 for ps4<br />
|
41 |
-
Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 for xbox one<br />
|
42 |
-
Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 for nintendo switch<br />
|
43 |
-
Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 for pc games<br />
|
44 |
-
Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 for online games<br />
|
45 |
-
Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 for offline games<br />
|
46 |
-
Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 for emulator games<br />
|
47 |
-
Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 for low-end pc games<br />
|
48 |
-
Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 for high-end pc games<br />
|
49 |
-
Download ATH Swift Shader DX9 SM3 Build 3383(x86) 32 for action games<br />
|
50 |
-
Download ATH Swift Shader DX9 SM3 Build</p>
|
51 |
-
<h4>The drawbacks of using Swift Shader DX9 SM3 for gaming</h4>
|
52 |
-
<p>While using Swift Shader DX9 SM3 for gaming has many benefits, it also has some drawbacks that you should be aware of before using it. The main drawback is that it can affect the performance and quality of your games.</p>
|
53 |
-
<p>Since Swift Shader DX9 SM3 emulates the DirectX 9 features on your CPU, it consumes a lot of CPU resources and power. This means that it can slow down your PC and reduce its battery life if you are using a laptop. It can also cause overheating issues if your CPU is not well ventilated or cooled.</p>
|
54 |
-
<p>Moreover, since Swift Shader DX9 SM3 emulates the DirectX 9 features on your CPU, it cannot match the quality and accuracy of a real graphics card. This means that it can cause some graphical glitches, artifacts, or distortions in your games. It can also lower the resolution, frame rate, or detail level of your games.</p>
|
55 |
-
<p>Therefore, using Swift Shader DX9 SM3 for gaming is not recommended if you have a high-end PC or if you are very particular about the performance and quality of your games. It is only suitable for low-end PCs or casual gamers who just want to play some games without spending too much money or effort.</p>
|
56 |
-
<h2>How to download and install Swift Shader DX9 SM3?</h2>
|
57 |
-
<h3>The steps to download Swift Shader DX9 SM3 from Google Drive</h3>
|
58 |
-
<p>If you want to download Swift Shader DX9 SM3 from Google Drive, you can follow these steps:</p>
|
59 |
-
<ol>
|
60 |
-
<li>Go to this link: <a href="https://drive.google.com/file/d/0B0ZiaCxLbr17U0tkNVR0bUlmUXc/edit">https://drive.google.com/file/d/0B0ZiaCxLbr17U0tkNVR0bUlmUXc/edit</a>. This is a Google Drive link that contains the file named "SwiftShader_DX9_SM3 Build_3383(x86).rar".</li>
|
61 |
-
<li>Click on the "Download" button at the top right corner of the page. This will start downloading the file to your PC.</li>
|
62 |
-
<li>Wait for the download to finish. The file size is about 1.17 MB.</li>
|
63 |
-
</ol>
|
64 |
-
<h4>How to verify the authenticity and safety of the file</h4>
|
65 |
-
<p>Before installing any file from unknown sources, it is always advisable to verify its authenticity and safety. This can prevent potential malware infections or data breaches on your PC. To verify the authenticity and safety of the file named "SwiftShader_DX9_SM3 Build_3383(x86).rar", you can do these things:</p>
|
66 |
-
<ul>
|
67 |
-
<li>Check the file name and extension. The file name should be exactly "SwiftShader_DX9_SM3 Build_3383(x86).rar" and the extension should be ".rar". If it is different or has an extra extension like ".exe" or ".zip", do not open it.</li>
|
68 |
-
<li>Check the file size. The file size should be about 1.17 MB. If it is too large or too small, do not open it.</li>
|
69 |
-
<li>Check the file source. The file source should be Google Drive. If it is from another website or platform, do not open it.</li>
|
70 |
-
<li>Check the file content. You can use a tool like WinRAR or 7-Zip to open the file and see its content. The file should contain only one folder named "SwiftShader_DX9_SM3 Build_3383(x86)". If it contains other files or folders, do not open it.</li>
|
71 |
-
<h4>How to extract and copy the file to the game folder</h4>
|
72 |
-
<p>After verifying the authenticity and safety of the file named "SwiftShader_DX9_SM3 Build_3383(x86).rar", you can extract and copy it to the game folder. To do this, you can follow these steps:</p>
|
73 |
-
<ol>
|
74 |
-
<li>Open the file with a tool like WinRAR or 7-Zip. You will see a folder named "SwiftShader_DX9_SM3 Build_3383(x86)" inside the file.</li>
|
75 |
-
<li>Extract the folder to a location of your choice on your PC. You can do this by right-clicking on the folder and choosing "Extract to SwiftShader_DX9_SM3 Build_3383(x86)\" or by dragging and dropping the folder to your desired location.</li>
|
76 |
-
<li>Locate the game folder of the game that you want to play with Swift Shader DX9 SM3. The game folder is usually located in the "Program Files" or "Program Files (x86)" directory on your PC. For example, if you want to play GTA IV, the game folder might be "C:\Program Files (x86)\Rockstar Games\Grand Theft Auto IV".</li>
|
77 |
-
<li>Copy the folder named "SwiftShader_DX9_SM3 Build_3383(x86)" to the game folder. You can do this by right-clicking on the folder and choosing "Copy" or by pressing Ctrl+C on your keyboard, and then right-clicking on the game folder and choosing "Paste" or by pressing Ctrl+V on your keyboard.</li>
|
78 |
-
<li>Rename the file named "d3d9.dll" inside the folder "SwiftShader_DX9_SM3 Build_3383(x86)" to something else. You can do this by right-clicking on the file and choosing "Rename" or by pressing F2 on your keyboard, and then typing a new name for the file. For example, you can rename it to "d3d9_swiftshader.dll". This is to avoid conflicts with other files that might have the same name.</li>
|
79 |
-
</ol>
|
80 |
-
<h3>The steps to download Swift Shader DX9 SM3 from Clifton Road Car Sal</h3>
|
81 |
-
<p>If you want to download Swift Shader DX9 SM3 from Clifton Road Car Sal, you can follow these steps:</p>
|
82 |
-
<ol>
|
83 |
-
<li>Go to this link: <a href="https://www.cliftonroadcarsales.co.uk/forum/general-discussions/swiftshader-dx9-sm3-build-3383-zip">https://www.cliftonroadcarsales.co.uk/forum/general-discussions/swiftshader-dx9-sm3-build-3383-zip</a>. This is a website that contains a forum post that has a download link for the file named "SwiftShader DX9 SM3 Build 3383.zip".</li>
|
84 |
-
<li>Scroll down to the bottom of the page and find the download link. The download link is a blue button that says "Download Now". It is located under a banner that says "Download SwiftShader DX9 SM3 Build 3383.zip".</li>
|
85 |
-
<li>Click on the download link. This will open a new tab or window that will redirect you to another website.</li>
|
86 |
-
</ol>
|
87 |
-
<h4>How to deal with potential pop-ups and ads</h4>
|
88 |
-
<p>Before downloading the file from Clifton Road Car Sal, you should be aware that you might encounter some pop-ups and ads on the website. These are not part of the file download process and they might be annoying or harmful. To deal with them, you can do these things:</p>
|
89 |
-
<ul>
|
90 |
-
<li>Use an ad blocker. An ad blocker is a software or extension that can block or remove ads from websites. This can make your browsing experience faster and safer. Some examples of ad blockers are uBlock Origin, AdBlock Plus, or AdGuard.</li>
|
91 |
-
<li>Use a pop-up blocker. A pop-up blocker is a software or extension that can block or close pop-up windows that might appear on websites. These are usually unwanted or malicious windows that might contain viruses or malware. Some examples of pop-up blockers are Pop-up Blocker Pro, Pop up Blocker for Chrome, or Poper Blocker.</li>
|
92 |
-
<li>Use common sense. If you see any suspicious or irrelevant windows, tabs, messages, or buttons on the website, do not click on them or follow their instructions. They might be scams or phishing attempts that might try to steal your personal or financial information.</li>
|
93 |
-
</ul>
|
94 |
-
<h4>How to unzip and copy the file to the game folder</h4>
|
95 |
-
the file named "SwiftShader DX9 SM3 Build 3383.zip" from the website. To do this, you can follow these steps:</p>
|
96 |
-
<ol>
|
97 |
-
<li>Wait for the download to finish. The file size is about 1.17 MB.</li>
|
98 |
-
<li>Open the file with a tool like WinRAR or 7-Zip. You will see a folder named "SwiftShader_DX9_SM3 Build_3383(x86)" inside the file.</li>
|
99 |
-
<li>Extract the folder to a location of your choice on your PC. You can do this by right-clicking on the folder and choosing "Extract to SwiftShader_DX9_SM3 Build_3383(x86)\" or by dragging and dropping the folder to your desired location.</li>
|
100 |
-
<li>Follow the same steps as above to copy and rename the file to the game folder.</li>
|
101 |
-
</ol>
|
102 |
-
<h2>How to use Swift Shader DX9 SM3 for gaming?</h2>
|
103 |
-
<h3>The settings and options available for Swift Shader DX9 SM3</h3>
|
104 |
-
<p>After installing Swift Shader DX9 SM3 to the game folder, you can use it for gaming. To do this, you need to configure some settings and options for Swift Shader DX9 SM3. These settings and options are located in a file named "SwiftShader.ini" inside the folder "SwiftShader_DX9_SM3 Build_3383(x86)". You can open this file with a text editor like Notepad or WordPad.</p>
|
105 |
-
<p>The file contains several sections that correspond to different aspects of Swift Shader DX9 SM3, such as SystemInfo, Renderer, PixelShader, VertexShader, etc. Each section has several parameters that you can modify to change the behavior and performance of Swift Shader DX9 SM3. For example, you can change the value of "NumThreads" under SystemInfo to change the number of CPU threads that Swift Shader DX9 SM3 uses.</p>
|
106 |
-
<p>You can find a detailed explanation of each section and parameter in this link: <a href="https://github.com/google/swiftshader/blob/master/docs/SwiftShader.ini.md">https://github.com/google/swiftshader/blob/master/docs/SwiftShader.ini.md</a>. This is a documentation page from the official GitHub repository of SwiftShader, which is the source code of Swift Shader DX9 SM3 and other versions.</p>
|
107 |
-
<h4>How to adjust the performance and quality of Swift Shader DX9 SM3</h4>
|
108 |
-
<p>The most important settings and options that you need to adjust for Swift Shader DX9 SM3 are those that affect its performance and quality. These are mainly located in the sections Renderer, PixelShader, and VertexShader. By changing these settings and options, you can optimize Swift Shader DX9 SM3 for your PC and game.</p>
|
109 |
-
<p>The general rule of thumb is that higher values mean higher quality but lower performance, and lower values mean lower quality but higher performance. Therefore, you need to find a balance between quality and performance that suits your preferences and needs. You can also experiment with different values and see how they affect your game.</p>
|
110 |
-
<p>Here are some examples of settings and options that you can adjust for performance and quality:</p>
|
111 |
-
<ul>
|
112 |
-
<li>"EnableVSync" under Renderer: This parameter controls whether Swift Shader DX9 SM3 synchronizes its frame rate with your monitor's refresh rate. If you set it to 1, it will enable VSync, which can prevent screen tearing but also limit your frame rate. If you set it to 0, it will disable VSync, which can increase your frame rate but also cause screen tearing.</li>
|
113 |
-
<li>"MaxAnisotropy" under Renderer: This parameter controls the level of anisotropic filtering that Swift Shader DX9 SM3 applies to textures. Anisotropic filtering is a technique that improves the quality and sharpness of textures at oblique angles. The higher the value, the better the texture quality but also the more CPU resources required. The valid values are 1, 2, 4, 8, or 16.</li>
|
114 |
-
<li>"PixelShaderModel" under PixelShader: This parameter controls the pixel shader model that Swift Shader DX9 SM3 emulates. Pixel shaders are programs that determine how pixels are rendered on the screen. The higher the model number, the more advanced and complex pixel shaders that Swift Shader DX9 SM3 can emulate but also the more CPU resources required. The valid values are 2 or 3.</li>
|
115 |
-
SwiftShader emulates, having a lower shader model can reduce its workload and improve its performance and quality.</li>
|
116 |
-
</ul>
|
117 |
-
<p>These are just some tips that might help. You can also experiment with different settings and options and see how they affect your game.</p>
|
118 |
-
<h4>Can I use SwiftShader with other graphics enhancers or mods?</h4>
|
119 |
-
<p>Yes, you can use SwiftShader with other graphics enhancers or mods that are compatible with your game and PC. However, you should be careful and cautious when doing so, as some graphics enhancers or mods might conflict or interfere with SwiftShader or cause other problems. You should always backup your game files and settings before installing or using any graphics enhancers or mods.</p>
|
120 |
-
<h2></h2></p> 0a6ba089eb<br />
|
121 |
-
<br />
|
122 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Csi.column.v8.4.0 Patch.rar.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>csi.column.v8.4.0 patch.rar</h2><br /><p><b><b>DOWNLOAD</b> » <a href="https://imgfil.com/2uy25y">https://imgfil.com/2uy25y</a></b></p><br /><br />
|
2 |
-
|
3 |
-
... bz1637248 libvma [8.7.5-1] - Rebase to upstream v8.7.5 release - Resolves: ... Rebase to latest upstream release v4.11.0-5 - mstconfig support for prio_tag and ... usage column - Related: #1300852 gnome-weather [3.26.0-1] - Update to ... error [3.1.2-13] - Fix CVE-2019-18408: RAR use-after-free This update is available ... 4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Bus Simulator Ultimate APK MOD Unlimited Money and More Features for Bus Lovers.md
DELETED
@@ -1,81 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Bus Simulator Ultimate Dinero Infinito APK: How to Get Unlimited Money in the Best Bus Simulator Game</h1>
|
3 |
-
<p>Do you love driving buses and exploring different cities? Do you want to experience the thrill of running your own bus company and competing with other players? Do you wish you had unlimited money to buy any bus, upgrade, or item you want in the game? If you answered yes to any of these questions, then you need to try Bus Simulator Ultimate Dinero Infinito APK.</p>
|
4 |
-
<p>Bus Simulator Ultimate is one of the most popular and realistic bus simulator games on Android. It lets you drive various types of buses across different countries and continents, pick up and drop off passengers, customize your bus interior and exterior, and manage your own bus company. It also has a multiplayer mode where you can race with other players online and rank up in the global leaderboard.</p>
|
5 |
-
<h2>bus simulator ultimate dinero infinito apk</h2><br /><p><b><b>Download Zip</b> ->>->>->> <a href="https://jinyurl.com/2uNTiE">https://jinyurl.com/2uNTiE</a></b></p><br /><br />
|
6 |
-
<p>However, as fun as it is, Bus Simulator Ultimate also requires a lot of money to unlock all the features and items in the game. You need to earn money by completing missions, driving carefully, and satisfying your passengers. But this can take a long time and be quite tedious. That's why many players look for ways to get unlimited money in the game without spending real money.</p>
|
7 |
-
<p>That's where Bus Simulator Ultimate Dinero Infinito APK comes in. This is a modified version of the original game that gives you unlimited money and resources. You can download and install it on your Android device for free and enjoy all the benefits of having unlimited money in Bus Simulator Ultimate. In this article, we will tell you more about Bus Simulator Ultimate Dinero Infinito APK, its features, how to use it, and where to download it.</p>
|
8 |
-
<h2>Features of Bus Simulator Ultimate Dinero Infinito APK</h2>
|
9 |
-
<p>Bus Simulator Ultimate Dinero Infinito APK has all the features of the original game plus some extra ones that make it more enjoyable and convenient. Here are some of the main features of Bus Simulator Ultimate Dinero Infinito APK:</p>
|
10 |
-
<h3>Realistic bus driving experience</ <p>One of the best things about Bus Simulator Ultimate is that it gives you a realistic and immersive bus driving experience. You can choose from different types of buses, such as city buses, school buses, double-decker buses, and more. You can also customize your bus with various skins, accessories, and interior designs. You can even change the license plate, horn sound, and driver name of your bus.</p>
|
11 |
-
<p>But that's not all. You can also drive your bus across different routes and cities around the world, such as Germany, Turkey, Italy, France, Spain, USA, and more. You can see the landmarks, scenery, and culture of each country as you drive. You can also pick up and drop off passengers at different bus stops and stations. You have to follow the traffic rules, signals, and signs, as well as the speed limit and the time schedule. You also have to deal with different weather conditions, such as rain, snow, fog, and night.</p>
|
12 |
-
<p>And if that's not enough, you can also enjoy the realistic sound effects of the engine, brakes, horns, passengers, and traffic. You can also listen to radio stations from different countries while driving. You can even use the voice navigation system to guide you along the way. All these features make Bus Simulator Ultimate a very realistic and fun bus simulator game.</p>
|
13 |
-
<h3>Multiplayer mode and online ranking</h3>
|
14 |
-
<p>Another great feature of Bus Simulator Ultimate is that it has a multiplayer mode where you can compete with other players online. You can join or create your own bus races and challenge other drivers to see who is the fastest and the best. You can also chat with other players and make friends or rivals.</p>
|
15 |
-
<p>bus simulator ultimate mod apk unlimited money and gold<br />
|
16 |
-
descargar bus simulator ultimate hackeado para android<br />
|
17 |
-
bus simulator ultimate apk premium unlocked free download<br />
|
18 |
-
como tener dinero infinito en bus simulator ultimate<br />
|
19 |
-
bus simulator ultimate mod menu apk latest version<br />
|
20 |
-
bus simulator ultimate apk full mega mod<br />
|
21 |
-
trucos para bus simulator ultimate android<br />
|
22 |
-
bus simulator ultimate hack apk 2021<br />
|
23 |
-
bus simulator ultimate apk sin anuncios<br />
|
24 |
-
bus simulator ultimate mod apk todo desbloqueado<br />
|
25 |
-
descargar bus simulator ultimate ultima version apk<br />
|
26 |
-
bus simulator ultimate apk mod dinero y oro infinito<br />
|
27 |
-
como descargar bus simulator ultimate hackeado<br />
|
28 |
-
bus simulator ultimate apk mod menu 2021<br />
|
29 |
-
bus simulator ultimate apk obb download<br />
|
30 |
-
bus simulator ultimate hack apk mediafıre<br />
|
31 |
-
bus simulator ultimate mod apk unlimited xp<br />
|
32 |
-
como instalar bus simulator ultimate mod apk<br />
|
33 |
-
bus simulator ultimate apk mod offline<br />
|
34 |
-
bus simulator ultimate hack apk sin root<br />
|
35 |
-
bus simulator ultimate mod apk revdl<br />
|
36 |
-
descargar bus simulator ultimate gratis para android<br />
|
37 |
-
bus simulator ultimate mod apk unlimited fuel<br />
|
38 |
-
como actualizar bus simulator ultimate hackeado<br />
|
39 |
-
bus simulator ultimate apk mod no ads</p>
|
40 |
-
<p>But that's not all. You can also join or create your own bus company and cooperate with other drivers. You can share your routes, buses, and profits with your teammates. You can also hire or fire drivers as you wish. You can also compete with other bus companies and see who has the most passengers, revenue, and reputation.</p>
|
41 |
-
<p>And if that's not enough, you can also earn rewards and rank up in the global leaderboard. You can see your position and stats compared to other players around the world. You can also unlock achievements and trophies as you play. All these features make Bus Simulator Ultimate a very competitive and social bus simulator game.</p>
|
42 |
-
<h3>Unlimited money and resources</h3>
|
43 |
-
<p>The best feature of Bus Simulator Ultimate Dinero Infinito APK is that it gives you unlimited money and resources in the game. This means that you can buy any bus, upgrade, or item you want without worrying about the cost. You can also unlock all the achievements and trophies in the game without any effort.</p>
|
44 |
-
<p>But that's not all. You can also enjoy the game without ads or limitations. You don't have to watch annoying ads or wait for energy or lives to refill. You can play the game as much as you want without any interruption or restriction.</p>
|
45 |
-
<p>And if that's not enough, you can also use the money to support your favorite bus company or driver in the multiplayer mode. You can donate money to your teammates or friends to help them grow their business or improve their performance. You can also use the money to sabotage your rivals or enemies to make them lose their customers or reputation. All these features make Bus Simulator Ultimate Dinero Infinito APK a very enjoyable and convenient bus simulator game.</p>
|
46 |
-
<h2>How to use Bus Simulator Ultimate Dinero Infinito APK</h2>
|
47 |
-
<p>If you are interested in using Bus Simulator Ultimate Dinero Infinito APK, here are some simple steps on how to download and install it on your Android device:</p>
|
48 |
-
<h3>Download and install the APK file from a trusted source</h3>
|
49 |
-
<p>The first step is to download the APK file from a trusted source. There are many websites that offer Bus Simulator Ultimate Dinero Infinito APK for free download, but not all of them are safe or reliable. Some of them may contain viruses, malware, or spyware that can harm your device or steal your personal information.</p>
|
50 |
-
<p>Therefore, you need to be careful when choosing where to download Bus Simulator Ultimate Dinero Infinito APK from. You need to check the compatibility and security of the file before downloading it. You need to make sure that it is compatible with your device model and Android version. You also need to scan it with an antivirus program or a malware detector to ensure that it is free from any harmful software.</p>
|
51 |
-
<p>One of the best sources to download Bus Simulator Ultimate Dinero Infinito APK from is [text]. This website provides safe and secure downloads of various APK files for Android games and apps. It also provides detailed information about each file, such as its size, version, developer, rating, reviews, screenshots, and more. You can easily find Bus Simulator Ultimate Din ero Infinito APK by searching for it on the website or clicking on this link: [Bus Simulator Ultimate Dinero Infinito APK].</p>
|
52 |
-
<p>Once you have found the file, you need to download it to your device. You can do this by clicking on the download button or scanning the QR code on the website. The file will be saved in your device's download folder or any other location you choose.</p>
|
53 |
-
<h3>Enable unknown sources in your device settings to allow installation</h3>
|
54 |
-
<p>The next step is to enable unknown sources in your device settings to allow installation of Bus Simulator Ultimate Dinero Infinito APK. This is because the file is not from the official Google Play Store and your device may block it by default.</p>
|
55 |
-
<p>To enable unknown sources, you need to go to your device settings and look for the security or privacy option. There, you will find a toggle or checkbox that says "allow installation of apps from unknown sources" or something similar. You need to turn it on or check it to enable it.</p>
|
56 |
-
<p>If you are not sure how to do this, you can follow this guide: [How to enable unknown sources on Android]. This guide will show you how to enable unknown sources on different Android versions and devices.</p>
|
57 |
-
<h3>Follow the instructions on the screen to complete the installation</h3>
|
58 |
-
<p>The final step is to follow the instructions on the screen to complete the installation of Bus Simulator Ultimate Dinero Infinito APK. This is very easy and straightforward. You just need to locate the file in your device and tap on it to open it. Then, you will see a pop-up window that asks you if you want to install the app. You need to tap on "install" or "yes" to confirm.</p>
|
59 |
-
<p>The installation process will take a few seconds or minutes depending on your device and internet speed. Once it is done, you will see a message that says "app installed" or "installation successful". You can then tap on "open" or "done" to launch the game or exit the window.</p> <h2>Launch the game and enjoy unlimited money</h2>
|
60 |
-
<p>The last step is to launch the game and enjoy unlimited money in Bus Simulator Ultimate. This is the most fun and exciting part. You can start the game normally and log in with your account or create a new one. You can then check your balance and see how much money you have. You will be amazed by the amount of money you have in the game.</p>
|
61 |
-
<p>You can then spend your money as you wish and have fun. You can buy any bus, upgrade, or item you want without worrying about the cost. You can also unlock all the achievements and trophies in the game without any effort. You can also enjoy the game without ads or limitations. You can play the game as much as you want without any interruption or restriction.</p>
|
62 |
-
<p>You can also use the money to support your favorite bus company or driver in the multiplayer mode. You can donate money to your teammates or friends to help them grow their business or improve their performance. You can also use the money to sabotage your rivals or enemies to make them lose their customers or reputation.</p>
|
63 |
-
<p>All these features make Bus Simulator Ultimate Dinero Infinito APK a very enjoyable and convenient bus simulator game. You can experience the thrill of driving buses and exploring different cities with unlimited money and resources.</p>
|
64 |
-
<h2>Conclusion</h2>
|
65 |
-
<p>In conclusion, Bus Simulator Ultimate Dinero Infinito APK is a modified version of the original game that gives you unlimited money and resources. It has all the features of the original game plus some extra ones that make it more enjoyable and convenient. It lets you drive various types of buses across different countries and continents, pick up and drop off passengers, customize your bus interior and exterior, and manage your own bus company. It also has a multiplayer mode where you can race with other players online and rank up in the global leaderboard.</p>
|
66 |
-
<p>But the best feature of Bus Simulator Ultimate Dinero Infinito APK is that it gives you unlimited money and resources in the game. You can buy any bus, upgrade, or item you want without worrying about the cost. You can also unlock all the achievements and trophies in the game without any effort. You can also enjoy the game without ads or limitations. You can play the game as much as you want without any interruption or restriction.</p>
|
67 |
-
<p>If you are interested in using Bus Simulator Ultimate Dinero Infinito APK, you just need to download and install it on your Android device from a trusted source. Then, you need to enable unknown sources in your device settings to allow installation. After that, you need to follow the instructions on the screen to complete the installation. Finally, you need to launch the game and enjoy unlimited money.</p>
|
68 |
-
<p>Bus Simulator Ultimate Dinero Infinito APK is a great way to have fun and experience the realistic and immersive bus driving experience with unlimited money and resources. It is one of the best bus simulator games on Android that you should not miss. So, what are you waiting for? Download Bus Simulator Ultimate Dinero Infinito APK now and start driving!</p>
|
69 |
-
<h2>FAQs</h2>
|
70 |
-
<h3>Is Bus Simulator Ultimate Dinero Infinito APK safe to use?</h3>
|
71 |
-
<p>Yes, Bus Simulator Ultimate Dinero Infinito APK is safe to use if you download it from a trusted source like [text]. However, you should always check the compatibility and security of the file before downloading it. You should also scan it with an antivirus program or a malware detector to ensure that it is free from any harmful software.</p>
|
72 |
-
<h3>Will I get banned for using Bus Simulator Ultimate Dinero Infinito APK?</h3>
|
73 |
-
<p>No, you will not get banned for using Bus Simulator Ultimate Dinero Infinito APK if you use it wisely and responsibly. However, you should not abuse or exploit the unlimited money feature in the multiplayer mode or online ranking. You should also not brag or boast about your money or items in the chat or social media. This may annoy other players or attract unwanted attention from the game developers or moderators.</p>
|
74 |
-
<h3>Can I play online with Bus Simulator Ultimate Dinero Infinito APK?</h3>
|
75 |
-
<p>Yes, you can play online with Bus Simulator Ultimate Dinero Infinito APK if you have a stable internet connection and a valid account. You can join or create your own bus races and challenge other drivers to see who is the fastest and the best. You can also join or create your own bus company and cooperate with other drivers. You can also earn rewards and rank up in the global leaderboard.</p>
|
76 |
-
<h3>How can I update Bus Simulator Ultimate Dinero Infinito APK?</h3>
|
77 |
-
<p>You can update Bus Simulator Ultimate Dinero Infinito APK by downloading and installing the latest version of the file from [text]. However, you should always backup your data before updating to avoid losing your progress or settings. You should also check if there are any changes or new features in the new version before updating. You should also make sure that the new version is compatible with your device and Android version.</p>
|
78 |
-
<h3>Where can I download Bus Simulator Ultimate Dinero Infinito APK?</h3>
|
79 |
-
<p>You can download Bus Simulator Ultimate Dinero Infinito APK from [text]. This website provides safe and secure downloads of various APK files for Android games and apps. It also provides detailed information about each file, such as its size, version, developer, rating, reviews, screenshots, and more. You can easily find Bus Simulator Ultimate Dinero Infinito APK by searching for it on the website or clicking on this link: [Bus Simulator Ultimate Dinero Infinito APK].</p> 197e85843d<br />
|
80 |
-
<br />
|
81 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download 39 40 Meye Yie.md
DELETED
@@ -1,70 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download 39/40 Meye Yie: A New Song by King Plaxma</h1>
|
3 |
-
<p>If you are looking for a new song to add to your playlist, you might want to check out Meye Yie by King Plaxma featuring 39/40. This is a new song that was released in March 2023 and has been gaining popularity among music lovers. In this article, we will tell you what Meye Yie is, who King Plaxma and 39/40 are, how to download Meye Yie, and why you should listen to it.</p>
|
4 |
-
<h2>Introduction</h2>
|
5 |
-
<p>Meye Yie is a Ghanaian phrase that means "I will prosper" or "I will succeed". It is a song that expresses the hope and determination of the artists to achieve their goals and dreams. The song is a fusion of afrobeat, dancehall, and hip hop genres, creating a unique and appealing sound. The song was produced by Kobby Berry, a renowned Ghanaian producer who has worked with many other artists.</p>
|
6 |
-
<h2>download 39 40 meye yie</h2><br /><p><b><b>Download</b> ✶ <a href="https://jinyurl.com/2uNSAE">https://jinyurl.com/2uNSAE</a></b></p><br /><br />
|
7 |
-
<h3>What is Meye Yie?</h3>
|
8 |
-
<p>Meye Yie is a song that was released by King Plaxma, a talented artist from Ghana, on March 15, 2023. The song features 39/40, a duo of young rappers who are also from Ghana. The song is part of King Plaxma's upcoming album, which is expected to be released later this year. The song has been well received by fans and critics alike, who have praised its catchy chorus, smooth flow, and motivational message.</p>
|
9 |
-
<h3>Who is King Plaxma?</h3>
|
10 |
-
<p>King Plaxma is a Ghanaian singer, songwriter, and rapper who started his music career in 2019. He is known for his versatile style and his ability to blend different genres of music. He has released several singles and collaborations with other artists, such as Tulenkey, Omah Lay, Davido, and more. Some of his popular songs include Breaks, Upgraded, I See You, Pretty Girl, and I Don't Fear Dem. He is also the founder of Plaxma Nation, a music label that supports upcoming artists.</p>
|
11 |
-
<h3>Who are 39/40?</h3>
|
12 |
-
<p>39/40 are a Ghanaian rap duo who are making waves in the music industry. They are composed of O'Kenneth and Reggie, two young rappers who are part of the Asakaa Boys collective. They are known for their streetwise lyrics, energetic delivery, and hard-hitting beats. They have collaborated with other artists such as Jay Bahd, Black Sherif, Gyakie, Fameye, and more. Some of their popular songs include This Song, Easy, Mercy, Mama Yie, and Meye Guy.</p>
|
13 |
-
<h2>How to download Meye Yie?</h2>
|
14 |
-
<p>If you want to download Meye Yie to your device or computer, there are several ways you can do it. Here are some of the options you can choose from:</p>
|
15 |
-
<h3>Download from Zacknation.net</h3>
|
16 |
-
<p>Zacknation.net is a Ghanaian website that offers free mp3 downloads of various songs from different genres and artists. You can download Meye Yie from Zacknation.net by following these steps:</p>
|
17 |
-
<ol>
|
18 |
-
<li>Go to [Zacknation.net](^1^) on your browser.</li>
|
19 |
-
<li>Type "Meye Yie" in the search box and click on the magnifying glass icon.</li>
|
20 |
-
<li>Scroll down until you find the post titled "Download: King Plaxma Ft 39/40 – Meye Yie Mp3 (New Song)".</li>
|
21 |
-
<li>Click on the link that says "Meye Yie By King Pl axma Ft 39/40 – Meye Yie.mp3" to start the download.</li>
|
22 |
-
<li>Wait for the download to finish and enjoy the song.</li>
|
23 |
-
</ol>
|
24 |
-
<h3>Download from YouTube</h3>
|
25 |
-
<p>You can also download Meye Yie from YouTube, where you can watch the official video of the song. The video was uploaded by King Plaxma on his YouTube channel on March 16, 2023. It has over 1 million views and thousands of likes and comments. You can download Meye Yie from YouTube by following these steps:</p>
|
26 |
-
<ol>
|
27 |
-
<li>Go to [YouTube.com] on your browser.</li>
|
28 |
-
<li>Type "Meye Yie King Plaxma" in the search box and click on the magnifying glass icon.</li>
|
29 |
-
<li>Click on the video titled "King Plaxma - Meye Yie ft. 39/40 (Official Video)".</li>
|
30 |
-
<li>Copy the URL of the video from the address bar.</li>
|
31 |
-
<li>Go to a YouTube to mp3 converter website, such as [ytmp3.cc] or [y2mate.com].</li>
|
32 |
-
<li>Paste the URL of the video in the box and click on "Convert" or "Start".</li>
|
33 |
-
<li>Choose the format and quality of the mp3 file and click on "Download" or "Save".</li>
|
34 |
-
<li>Wait for the download to finish and enjoy the song.</li>
|
35 |
-
</ol>
|
36 |
-
<h3>Download from other sources</h3>
|
37 |
-
<p>If you prefer to download Meye Yie from other sources, you can also find it on various music streaming platforms and websites. Some of the options you can choose from are:</p>
|
38 |
-
<ul>
|
39 |
-
<li>[Spotify]: You can listen to Meye Yie on Spotify, a popular music streaming service that offers millions of songs and podcasts. You can also download Meye Yie to your device if you have a Spotify Premium account, which costs $9.99 per month.</li>
|
40 |
-
<li>[Apple Music]: You can listen to Meye Yie on Apple Music, a music streaming service that is integrated with iTunes and iCloud. You can also download Meye Yie to your device if you have an Apple Music subscription, which costs $9.99 per month.</li>
|
41 |
-
<li>[Audiomack]: You can listen to Meye Yie on Audiomack, a music streaming and discovery platform that features emerging artists and genres. You can also download Meye Yie to your device for free if you have an Audiomack account, which is also free.</li>
|
42 |
-
<li>[SoundCloud]: You can listen to Meye Yie on SoundCloud, a music sharing and social networking platform that allows users to upload and stream their own songs. You can also download Meye Yie to your device for free if you have a SoundCloud account, which is also free.</li>
|
43 |
-
</ul>
|
44 |
-
<h2>Why you should listen to Meye Yie?</h2>
|
45 |
-
<p>Meye Yie is not just a song, but a message of hope and inspiration. Here are some of the reasons why you should listen to Meye Yie:</p>
|
46 |
-
<p></p>
|
47 |
-
<h3>It is a cool-tempo production</h3>
|
48 |
-
<p>Meye Yie is a song that has a cool-tempo production, meaning that it has a moderate pace and a relaxed vibe. The song has a smooth beat that is easy to groove to, and a melody that is pleasant to listen to. The song is suitable for any mood and occasion, whether you want to chill out, dance, or work out.</p>
|
49 |
-
<h3>It features talented artists</h3>
|
50 |
-
<p>Meye Yie is a song that features talented artists who showcase their skills and creativity. King Plaxma delivers a captivating performance with his vocals and rap verses, while 39/40 add their flair and energy with their rap lines. The artists complement each other well and create a harmonious collaboration.</p>
|
51 |
-
<h3>It has a catchy chorus and lyrics</h3>
|
52 |
-
<p>Meye Yie is a song that has a catchy chorus and lyrics that will stick in your head. The chorus repeats the phrase "Meye yie" several times, creating a memorable hook that will make you sing along. The lyrics are also meaningful and motivational, as they express the desire to prosper and overcome challenges. The song will inspire you to pursue your dreams and never give up.</p>
|
53 |
-
<h2>Conclusion</h2>
|
54 |
-
<p>Meye Yie is a song that you should not miss out on. It is a song that combines afrobeat, dancehall, and hip hop genres, creating a unique and appealing sound. It is a song that features King Plaxma and 39/40, two talented artists who deliver an amazing performance. It is a song that has a catchy chorus and lyrics that will inspire you to prosper and succeed. It is a song that you can download from various sources, such as Zacknation.net, YouTube, Spotify, Apple Music, Audiomack, and SoundCloud. If you are looking for a new song to add to your playlist, you should definitely download Meye Yie by King Plaxma featuring 39/40.</p>
|
55 |
-
<h2>FAQs</h2>
|
56 |
-
<p>Here are some of the frequently asked questions about Meye Yie:</p>
|
57 |
-
<ul>
|
58 |
-
<li>Q: What does Meye Yie mean?<br>
|
59 |
-
A: Meye Yie is a Ghanaian phrase that means "I will prosper" or "I will succeed".</li>
|
60 |
-
<li>Q: Who produced Meye Yie?<br>
|
61 |
-
A: Meye Yie was produced by Kobby Berry, a renowned Ghanaian producer who has worked with many other artists.</li>
|
62 |
-
<li>Q: When was Meye Yie released?<br>
|
63 |
-
A: Meye Yie was released by King Plaxma on March 15, 2023.</li>
|
64 |
-
<li>Q: How long is Meye Yie?<br>
|
65 |
-
A: Meye Yie is 3 minutes and 45 seconds long.</li>
|
66 |
-
<li>Q: Where can I watch the video of Meye Yie?<br>
|
67 |
-
A: You can watch the video of Meye Yie on YouTube, where it was uploaded by King Plaxma on his YouTube channel on March 16, 2023.</li>
|
68 |
-
</ul></p> 197e85843d<br />
|
69 |
-
<br />
|
70 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Mario Kart Tour Mod Apk and Enjoy Unlimited Coins and Rubies.md
DELETED
@@ -1,89 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Mario Kart Tour Mod Apk (Unlimited Money and Gems)</h1>
|
3 |
-
<p>Mario Kart Tour is a mobile game in the Mario Kart series that lets you race around courses inspired by real-world cities and classic Mario Kart tracks. The game is free to play, but it has a monetization system that involves microtransactions, gacha mechanics, and a Gold Pass subscription. If you want to enjoy the game without spending money or time, you might be interested in Mario Kart Tour mod apk (unlimited money and gems).</p>
|
4 |
-
<h2>mario kart tour mod apk (unlimited money and gems)</h2><br /><p><b><b>Download Zip</b> ===> <a href="https://jinyurl.com/2uNJMx">https://jinyurl.com/2uNJMx</a></b></p><br /><br />
|
5 |
-
<p>This is a modified version of the game that gives you unlimited rubies and coins, which are the premium currencies in the game. You can use them to fire off the pipe and get random characters, karts, and gliders. You can also use them to buy items from the shop or play coin rush mode. With unlimited rubies and coins, you can unlock all the content in the game without waiting or grinding.</p>
|
6 |
-
<h2>Features of Mario Kart Tour Mod Apk (Unlimited Money and Gems)</h2>
|
7 |
-
<p>Here are some of the features of Mario Kart Tour mod apk (unlimited money and gems) that make it different from the original game:</p>
|
8 |
-
<ul>
|
9 |
-
<li><strong>Unlimited rubies and coins</strong>: You can get as many rubies and coins as you want by using a simple hack. You don't need to watch ads or complete surveys to get them. You can use them to unlock all the drivers, karts, and gliders in the game.</li>
|
10 |
-
<li><strong>X2 points and coin bonus</strong>: You can get double the points and coins for every race you complete. This means you can level up faster and earn more rewards. You can also increase your online rank and compete with other players.</li>
|
11 |
-
<li><strong>Frenzy mode activated for any item</strong>: You can activate frenzy mode for any item you get from an item box. Frenzy mode gives you an unlimited supply of a certain item and makes you invincible for a short time. You can use this to cause chaos on the track and gain an advantage over your opponents.</li>
|
12 |
-
<li><strong>All tours and cups unlocked</strong>: You can access all the tours and cups in the game without collecting grand stars or completing challenges. You can play any course you want at any time. You can also enjoy the courses based on real-world cities that rotate every two weeks.</li>
|
13 |
-
<li><strong>No ads or subscription required</strong>: You can play Mario Kart Tour mod apk (unlimited money and gems) without seeing any ads or paying for a Gold Pass subscription. You can enjoy all the features of the game without any interruptions or limitations.</li>
|
14 |
-
</ul>
|
15 |
-
<h2>How to Download and Install Mario Kart Tour Mod Apk (Unlimited Money and Gems)</h2>
|
16 |
-
<p>If you want to download and install Mario Kart Tour mod apk (unlimited money and gems), here are the steps you need to follow:</p>
|
17 |
-
<p>mario kart tour hack apk download free unlimited rubies<br />
|
18 |
-
mario kart tour modded apk latest version with x2 points<br />
|
19 |
-
how to install mario kart tour mod apk on android device<br />
|
20 |
-
mario kart tour cheats apk for unlimited coins and gems<br />
|
21 |
-
mario kart tour apk mod unlock all characters and tracks<br />
|
22 |
-
mario kart tour mod apk offline mode with no ads<br />
|
23 |
-
mario kart tour premium apk with unlimited money and gems<br />
|
24 |
-
mario kart tour cracked apk with anti-ban feature<br />
|
25 |
-
mario kart tour mod apk unlimited everything 2023<br />
|
26 |
-
mario kart tour hack tool apk no root required<br />
|
27 |
-
mario kart tour mod menu apk with custom settings<br />
|
28 |
-
mario kart tour unlimited rubies and coins generator apk<br />
|
29 |
-
mario kart tour mod apk free download for android phone<br />
|
30 |
-
mario kart tour hack version apk with unlimited lives and tickets<br />
|
31 |
-
mario kart tour modded game apk with high graphics quality<br />
|
32 |
-
mario kart tour hack online apk no survey no human verification<br />
|
33 |
-
mario kart tour mod apk unlimited money and gems reddit<br />
|
34 |
-
mario kart tour hack apk ios compatible with iphone and ipad<br />
|
35 |
-
mario kart tour modded app apk with auto win feature<br />
|
36 |
-
mario kart tour cheat codes apk for android and ios devices<br />
|
37 |
-
mario kart tour mod apk unlimited money and gems 2022 update<br />
|
38 |
-
mario kart tour hack download apk with easy installation guide<br />
|
39 |
-
mario kart tour modded game download apk with fast speed<br />
|
40 |
-
mario kart tour cheats download apk with no password or login required<br />
|
41 |
-
mario kart tour apk mod free shopping with unlimited gold and gems<br />
|
42 |
-
mario kart tour cracked game download apk with full features unlocked<br />
|
43 |
-
mario kart tour hack online download apk with no virus or malware<br />
|
44 |
-
mario kart tour modded app download apk with safe and secure link<br />
|
45 |
-
mario kart tour cheat codes download apk with working and tested codes<br />
|
46 |
-
mario kart tour mod apk unlimited money and gems 2021 latest version</p>
|
47 |
-
<ol>
|
48 |
-
<li>Go to [11](https://play.google.com/store/apps/details?id=com.nintendo.zaka) or [12](https://apps.apple.com/us/app/mario-kart-tour/id1293634699) and download the original Mario Kart Tour game on your device.</li>
|
49 |
-
<li>Go to [1](https://apkprox.com/mario-kart-tour-mod-apk/) or another trusted source and download the Mario Kart Tour mod apk file on your device.</li>
|
50 |
-
<li>Enable unknown sources on your device settings to allow installation of third-party apps.</li>
|
51 |
-
<li>Locate the downloaded mod apk file on your device storage and tap on it to install it.</li>
|
52 |
-
<li>Launch the game and enjoy unlimited money and gems.</li>
|
53 |
-
</ol>
|
54 |
-
<h2>FAQ <h2>FAQs</h2>
|
55 |
-
<p>Here are some of the frequently asked questions and answers about Mario Kart Tour mod apk (unlimited money and gems):</p>
|
56 |
-
<table>
|
57 |
-
<tr>
|
58 |
-
<th>Question</th>
|
59 |
-
<th>Answer</th>
|
60 |
-
</tr>
|
61 |
-
<tr>
|
62 |
-
<td>Is Mario Kart Tour mod apk (unlimited money and gems) safe to use?</td>
|
63 |
-
<td>Yes, Mario Kart Tour mod apk (unlimited money and gems) is safe to use as long as you download it from a trusted source and scan it for viruses before installing it. However, you should be aware that using a modded version of the game may violate the terms of service of Nintendo and result in a ban or suspension of your account.</td>
|
64 |
-
</tr>
|
65 |
-
<tr>
|
66 |
-
<td>Does Mario Kart Tour mod apk (unlimited money and gems) work online?</td>
|
67 |
-
<td>Yes, Mario Kart Tour mod apk (unlimited money and gems) works online and offline. You can play the game with other players online or solo offline. However, you should be careful not to use the modded features too obviously or excessively, as this may alert other players or Nintendo and get you reported or banned.</td>
|
68 |
-
</tr>
|
69 |
-
<tr>
|
70 |
-
<td>Can I update Mario Kart Tour mod apk (unlimited money and gems) to the latest version?</td>
|
71 |
-
<td>Yes, you can update Mario Kart Tour mod apk (unlimited money and gems) to the latest version by downloading the new mod apk file from the same source and installing it over the existing one. However, you should always backup your game data before updating, as some updates may cause compatibility issues or data loss.</td>
|
72 |
-
</tr>
|
73 |
-
<tr>
|
74 |
-
<td>Can I use Mario Kart Tour mod apk (unlimited money and gems) on iOS devices?</td>
|
75 |
-
<td>No, Mario Kart Tour mod apk (unlimited money and gems) is only available for Android devices. If you want to use a modded version of the game on iOS devices, you will need to jailbreak your device and use a different method, such as Cydia or iFile. However, this may void your warranty or damage your device, so proceed at your own risk.</td>
|
76 |
-
</tr>
|
77 |
-
<tr>
|
78 |
-
<td>What are the alternatives to Mario Kart Tour mod apk (unlimited money and gems)?</td>
|
79 |
-
<td>If you don't want to use Mario Kart Tour mod apk (unlimited money and gems), you can try some of the alternatives, such as:</td>
|
80 |
-
<ul>
|
81 |
-
<li>Mario Kart Tour cheats: These are codes or commands that you can enter in the game to get extra rubies, coins, items, or other benefits. However, these cheats may not work on all devices or versions of the game, and they may also get you banned or suspended.</li>
|
82 |
-
<li>Mario Kart Tour hacks: These are tools or programs that you can use to modify the game files or data to get unlimited money and gems or other features. However, these hacks may require root access or special permissions on your device, and they may also be detected or blocked by Nintendo.</li>
|
83 |
-
<li>Mario Kart Tour tips and tricks: These are strategies or techniques that you can use to play the game better and earn more rubies, coins, items, or points. These tips and tricks are legal and ethical, and they do not require any modification or hacking of the game.</li>
|
84 |
-
</ul>
|
85 |
-
</tr>
|
86 |
-
</table>
|
87 |
-
<p>I hope this article has helped you learn more about Mario Kart Tour mod apk (unlimited money and gems) and how to download and install it on your device. If you have any questions or feedback, please leave a comment below. Thank you for reading!</p> 197e85843d<br />
|
88 |
-
<br />
|
89 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Mod Incredibox and Explore the Galaxy the Ocean and the Nightmare.md
DELETED
@@ -1,112 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download Mod Incredibox: A Guide for Music Lovers</h1>
|
3 |
-
<p>If you love music and creativity, you might have heard of Incredibox, a music app that lets you create your own music with the help of a merry crew of beatboxers. But did you know that you can also download mod Incredibox versions that are made by fans and offer different themes, sounds, and stories? In this article, we will show you what Incredibox is, what mods are, how to download and install them, and how to play and enjoy them.</p>
|
4 |
-
<h2>download mod incredibox</h2><br /><p><b><b>Download</b> ⇒ <a href="https://jinyurl.com/2uNSr0">https://jinyurl.com/2uNSr0</a></b></p><br /><br />
|
5 |
-
<h2>What is Incredibox and why you should try it</h2>
|
6 |
-
<h3>Incredibox is a fun, interactive music app</h3>
|
7 |
-
<p>Incredibox is a music app that was created in 2009 by a French company called So Far So Good. It is a part game, part tool, and part audio and visual experience that has quickly become a hit with people of all ages. More than 70 million players worldwide have already enjoyed it.</p>
|
8 |
-
<h3>You can create your own music with different styles and atmospheres</h3>
|
9 |
-
<p>Incredibox allows you to create your own music with the help of a merry crew of beatboxers. You can choose your musical style among 8 impressive atmospheres, such as Alpha, Little Miss, Jeevan, Brazil, Alive, The Love, Sunrise, and Voxel. Each atmosphere has its own sounds, graphics, animation, and interactivity. You can drag and drop icons onto the avatars to make them sing and start to compose your own mix. You can also find the right sound combos to unlock animated choruses that will enhance your tune.</p>
|
10 |
-
<h3>You can share your mix and join the top 50 chart</h3>
|
11 |
-
<p>Once your composition sounds great, you can save it and get a link to your mix. You can easily share it with anybody so they can listen and even vote for it. If you share your mix a lot and get enough votes from other users, you may go down in Incredibox history by joining the top 50 chart. You can also browse other users' mixes and get inspired by their creativity.</p>
|
12 |
-
<h2>What are mods and how they can enhance your experience</h2>
|
13 |
-
<h3>Mods are fan-made versions or modifications of the game</h3>
|
14 |
-
<p>Mods are fan-made versions or modifications of the game that change some aspects of it, such as the look, the sounds, or the gameplay. Mods are not official versions of Incredibox, but they are made by fans who love the game and want to add their own touch to it. Mods are usually free to download and play, but they may not work on all devices or browsers.</p>
|
15 |
-
<p>download evadare mod incredibox<br />
|
16 |
-
download the bells mod incredibox<br />
|
17 |
-
download galaxy mod incredibox<br />
|
18 |
-
download evadare demo mod incredibox<br />
|
19 |
-
download halloween mod incredibox<br />
|
20 |
-
download christmas mod incredibox<br />
|
21 |
-
download ocean mod incredibox<br />
|
22 |
-
download pirate mod incredibox<br />
|
23 |
-
download fan-made versions of incredibox<br />
|
24 |
-
download mods from incredirem website<br />
|
25 |
-
download mods from rem incredibox website<br />
|
26 |
-
download spooky mods for incredibox<br />
|
27 |
-
download scary mods for incredibox<br />
|
28 |
-
download peaceful mods for incredibox<br />
|
29 |
-
download calm mods for incredibox<br />
|
30 |
-
download original music mods for incredibox<br />
|
31 |
-
download new sounds and bonuses for incredibox<br />
|
32 |
-
download monster-themed mods for incredibox<br />
|
33 |
-
download human-themed mods for incredibox<br />
|
34 |
-
download village-themed mods for incredibox<br />
|
35 |
-
download space-themed mods for incredibox<br />
|
36 |
-
download star-themed mods for incredibox<br />
|
37 |
-
download story-based mods for incredibox<br />
|
38 |
-
download chapter-based mods for incredibox<br />
|
39 |
-
download full version of evadare mod incredibox<br />
|
40 |
-
download demo version of evadare mod incredibox<br />
|
41 |
-
download online version of evadare mod incredibox<br />
|
42 |
-
download online version of galaxy mod incredibox<br />
|
43 |
-
download free mods for incredibox game<br />
|
44 |
-
download unofficial mods for incredibox game<br />
|
45 |
-
download modification of incredibox game<br />
|
46 |
-
how to download mod incredibox on pc<br />
|
47 |
-
how to download mod incredibox on android<br />
|
48 |
-
how to download mod incredibox on ios<br />
|
49 |
-
how to install mod incredibox on pc<br />
|
50 |
-
how to install mod incredibox on android<br />
|
51 |
-
how to install mod incredibox on ios<br />
|
52 |
-
where to find mod incredibox downloads<br />
|
53 |
-
where to get mod incredibox downloads<br />
|
54 |
-
where to access mod incredibox downloads<br />
|
55 |
-
best sites to download mod incredibox <br />
|
56 |
-
best sources to download mod incredibox <br />
|
57 |
-
best places to download mod incredibox <br />
|
58 |
-
latest mods to download for incredibox <br />
|
59 |
-
newest mods to download for incredibox <br />
|
60 |
-
most popular mods to download for incredibox <br />
|
61 |
-
most liked mods to download for incredibox <br />
|
62 |
-
most downloaded mods for incredibox <br />
|
63 |
-
most played mods for incredibox</p>
|
64 |
-
<h3>You can find different themes, sounds, and stories in mods</h3>
|
65 |
-
<p>Mods offer different themes, sounds, and stories that are not available in the original version of Incredibox. For example, some mods have a spooky theme for Halloween, a Christmas theme for the holidays, or a galaxy theme for sci -fi adventure. Some mods have different sounds and instruments that are not in the original game, such as guitars, pianos, or drums. Some mods have different stories and characters that are inspired by movies, books, or games, such as Harry Potter, Star Wars, or Minecraft. You can discover the stories and secrets behind each mod by playing them and finding the right sound combos.</p>
|
66 |
-
<h3>Some of the most popular mods are Evadare, Galaxy, and The Bells</h3>
|
67 |
-
<p>Some of the most popular mods for Incredibox are Evadare, Galaxy, and The Bells. Evadare is a mod that has a dark and mysterious theme, with sounds that are inspired by horror movies and games. Galaxy is a mod that has a futuristic and space theme, with sounds that are inspired by sci-fi movies and games. The Bells is a mod that has a festive and cheerful theme, with sounds that are inspired by Christmas songs and bells. You can find these mods and more on websites like Incredirem or Google Sites.</p>
|
68 |
-
<h2>How to download and install mods for Incredibox</h2>
|
69 |
-
<h3>You need to have the app or the web version of Incredibox first</h3>
|
70 |
-
<p>To download and install mods for Incredibox, you need to have the app or the web version of Incredibox first. You can download the app from the App Store or Google Play for $3.99, or you can play the web version for free on the official website. The app version has more features and updates than the web version, but both versions are compatible with most mods.</p>
|
71 |
-
<h3>You can find mods on websites like Incredirem or Google Sites</h3>
|
72 |
-
<p>You can find mods for Incredibox on websites like Incredirem or Google Sites. These websites have a collection of mods that are made by fans and are free to download and play. You can browse the mods by categories, ratings, or popularity, and you can see screenshots and videos of each mod before downloading them. You can also leave feedback and comments for the mod creators and other players.</p>
|
73 |
-
<h3>You can download the mod files and replace them in the app folder or use a browser extension</h3>
|
74 |
-
<p>To download and install mods for Incredibox, you need to download the mod files from the websites and replace them in the app folder or use a browser extension. If you have the app version of Incredibox, you need to locate the app folder on your device and replace the original files with the mod files. You may need to use a file manager app or connect your device to a computer to do this. If you have the web version of Incredibox, you need to use a browser extension like Tampermonkey or Greasemonkey to run the mod files on your browser. You may need to enable some permissions or settings to do this.</p>
|
75 |
-
<h2>How to play and enjoy mods for Incredibox</h2>
|
76 |
-
<h3>You can drag and drop icons onto the avatars to make them sing</h3>
|
77 |
-
<p>To play mods for Incredibox, you can drag and drop icons onto the avatars to make them sing. Each icon represents a different sound or instrument that will add to your mix. You can experiment with different combinations and see what sounds good together. You can also mute or solo each avatar by clicking on them.</p>
|
78 |
-
<h3>You can find the right sound combos to unlock animated choruses</h3>
|
79 |
-
<p>To enjoy mods for Incredibox, you can find the right sound combos to unlock animated choruses. Each mod has its own set of sound combos that will trigger an animated chorus that will enhance your mix. You can see hints of the sound combos on the top right corner of the screen, or you can try to find them by yourself. Some sound combos may also reveal hidden secrets or stories behind each mod.</p>
|
80 |
-
<h3>You can discover the stories and secrets behind each mod</h3>
|
81 |
-
<p>To appreciate mods for Incredibox, you can discover the stories and secrets behind each mod. Each mod has its own theme, atmosphere, and story that is expressed through its sounds, graphics, animation, and interactivity. You can try to understand what each mod is about by playing it and finding its sound combos. You can also read more about each mod on its website or in its description.</p>
|
82 |
-
<h2>Conclusion and FAQs</h2>
|
83 |
-
<p>In conclusion, Incredibox is a fun, interactive music app that lets you create your own music with different styles and atmospheres. You can also download mod Incredibox versions that are made by fans and offer different themes, sounds, and stories. To download and install mods for Incredibox, you need to have the app or the web version of Incredibox first, then you need to find mods on websites like Incredirem or Google Sites, and then you need to download the mod files and replace them in the app folder or use a browser extension. To play and enjoy mods for Incredibox, you can drag and drop icons onto the avatars to make them sing, find the right sound combos to unlock animated choruses, and discover the stories and secrets behind each mod. We hope this guide has helped you learn how to download mod Incredibox and have fun with it. If you have any questions, you can check out the FAQs below or contact us for more information.</p>
|
84 |
-
<h4>FAQs</h4>
|
85 |
-
<table>
|
86 |
-
<tr>
|
87 |
-
<th>Question</th>
|
88 |
-
<th>Answer</th>
|
89 |
-
</tr>
|
90 |
-
<tr>
|
91 |
-
<td>Is Incredibox safe to download and play?</td>
|
92 |
-
<td>Yes, Incredibox is safe to download and play. It does not contain any viruses, malware, or inappropriate content. However, you should always be careful when downloading files from unknown sources and scan them for any potential threats.</td>
|
93 |
-
</tr>
|
94 |
-
<tr>
|
95 |
-
<td>Is Incredibox free to play?</td>
|
96 |
-
<td>Incredibox is free to play on the web version, but you need to pay $3.99 to download the app version. The web version has fewer features and updates than the app version, but both versions are compatible with most mods. Mods are usually free to download and play, but some mod creators may ask for donations or support.</td>
|
97 |
-
</tr>
|
98 |
-
<tr>
|
99 |
-
<td>How can I create my own mod for Incredibox?</td>
|
100 |
-
<td>To create your own mod for Incredibox, you need to have some skills in coding, sound design, and graphic design. You also need to have access to the original files of Incredibox and modify them according to your vision. You can use tools like Audacity, Photoshop, or Unity to create your own sounds, graphics, and animation. You can also use websites like Incredirem or Google Sites to host your mod and share it with other players.</td>
|
101 |
-
</tr>
|
102 |
-
<tr>
|
103 |
-
<td>How can I support the developers of Incredibox?</td>
|
104 |
-
<td>To support the developers of Incredibox, you can buy the app version of the game, leave a positive review or rating on the app store or website, follow them on social media, or donate to their Patreon page. You can also spread the word about Incredibox and invite your friends and family to play it.</td>
|
105 |
-
</tr>
|
106 |
-
<tr>
|
107 |
-
<td>What are some other music apps that are similar to Incredibox?</td>
|
108 |
-
<td>Some other music apps that are similar to Incredibox are Groovepad, Beat Snap, Music Maker Jam, BandLab, or GarageBand. These apps allow you to create your own music with different genres, instruments, loops, effects, and features. You can also share your music with other users and listen to their creations.</td>
|
109 |
-
</tr>
|
110 |
-
</table></p> 197e85843d<br />
|
111 |
-
<br />
|
112 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2ndelement/voicevox/voicevox_engine/synthesis_engine/__init__.py
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
from .core_wrapper import CoreWrapper, load_runtime_lib
|
2 |
-
from .make_synthesis_engines import make_synthesis_engines
|
3 |
-
from .synthesis_engine import SynthesisEngine
|
4 |
-
from .synthesis_engine_base import SynthesisEngineBase
|
5 |
-
|
6 |
-
__all__ = [
|
7 |
-
"CoreWrapper",
|
8 |
-
"load_runtime_lib",
|
9 |
-
"make_synthesis_engines",
|
10 |
-
"SynthesisEngine",
|
11 |
-
"SynthesisEngineBase",
|
12 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/Fixes/tensor-launch.py
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
import threading
|
2 |
-
import time
|
3 |
-
from tensorboard import program
|
4 |
-
import os
|
5 |
-
|
6 |
-
log_path = "logs"
|
7 |
-
|
8 |
-
if __name__ == "__main__":
|
9 |
-
tb = program.TensorBoard()
|
10 |
-
tb.configure(argv=[None, '--logdir', log_path])
|
11 |
-
url = tb.launch()
|
12 |
-
print(f'Tensorboard can be accessed at: {url}')
|
13 |
-
|
14 |
-
while True:
|
15 |
-
time.sleep(600) # Keep the main thread running
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/optim/dadam.py
DELETED
@@ -1,252 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
import logging
|
8 |
-
from typing import TYPE_CHECKING, Any
|
9 |
-
|
10 |
-
import torch
|
11 |
-
import torch.optim
|
12 |
-
import torch.distributed as dist
|
13 |
-
|
14 |
-
if TYPE_CHECKING:
|
15 |
-
from torch.optim.optimizer import _params_t
|
16 |
-
else:
|
17 |
-
_params_t = Any
|
18 |
-
|
19 |
-
|
20 |
-
logger = logging.getLogger(__name__)
|
21 |
-
|
22 |
-
|
23 |
-
def to_real(x):
|
24 |
-
if torch.is_complex(x):
|
25 |
-
return x.real
|
26 |
-
else:
|
27 |
-
return x
|
28 |
-
|
29 |
-
|
30 |
-
class DAdaptAdam(torch.optim.Optimizer):
|
31 |
-
"""Adam with D-Adaptation automatic step-sizes.
|
32 |
-
Leave LR set to 1 unless you encounter instability.
|
33 |
-
|
34 |
-
Args:
|
35 |
-
params (iterable):
|
36 |
-
Iterable of parameters to optimize or dicts defining parameter groups.
|
37 |
-
lr (float):
|
38 |
-
Learning rate adjustment parameter. Increases or decreases the D-adapted learning rate.
|
39 |
-
betas (tuple[float, float], optional): coefficients used for computing
|
40 |
-
running averages of gradient and its square (default: (0.9, 0.999))
|
41 |
-
momentum (float):
|
42 |
-
Momentum value in the range [0,1) (default: 0.9).
|
43 |
-
eps (float):
|
44 |
-
Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-8).
|
45 |
-
weight_decay (float):
|
46 |
-
Weight decay, i.e. a L2 penalty (default: 0).
|
47 |
-
log_every (int):
|
48 |
-
Log using print every k steps, default 0 (no logging).
|
49 |
-
decouple (boolean):
|
50 |
-
Use AdamW style decoupled weight decay
|
51 |
-
d0 (float):
|
52 |
-
Initial D estimate for D-adaptation (default 1e-6). Rarely needs changing.
|
53 |
-
growth_rate (float):
|
54 |
-
prevent the D estimate from growing faster than this multiplicative rate.
|
55 |
-
Default is inf, for unrestricted. Values like 1.02 give a kind of learning
|
56 |
-
rate warmup effect.
|
57 |
-
fsdp_in_use (bool):
|
58 |
-
If you're using sharded parameters, this should be set to True. The optimizer
|
59 |
-
will attempt to auto-detect this, but if you're using an implementation other
|
60 |
-
than PyTorch's builtin version, the auto-detection won't work.
|
61 |
-
"""
|
62 |
-
def __init__(self, params, lr=1.0,
|
63 |
-
betas=(0.9, 0.999),
|
64 |
-
eps=1e-8,
|
65 |
-
weight_decay=0,
|
66 |
-
log_every=0,
|
67 |
-
decouple=True,
|
68 |
-
d0=1e-6,
|
69 |
-
growth_rate=float('inf')):
|
70 |
-
if not 0.0 < d0:
|
71 |
-
raise ValueError("Invalid d0 value: {}".format(d0))
|
72 |
-
if not 0.0 < lr:
|
73 |
-
raise ValueError("Invalid learning rate: {}".format(lr))
|
74 |
-
if not 0.0 < eps:
|
75 |
-
raise ValueError("Invalid epsilon value: {}".format(eps))
|
76 |
-
if not 0.0 <= betas[0] < 1.0:
|
77 |
-
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
|
78 |
-
if not 0.0 <= betas[1] < 1.0:
|
79 |
-
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
|
80 |
-
|
81 |
-
if decouple:
|
82 |
-
logger.info("Using decoupled weight decay")
|
83 |
-
|
84 |
-
from .fsdp import is_fsdp_used
|
85 |
-
fsdp_in_use = is_fsdp_used()
|
86 |
-
defaults = dict(lr=lr, betas=betas, eps=eps,
|
87 |
-
weight_decay=weight_decay,
|
88 |
-
d=d0,
|
89 |
-
k=0,
|
90 |
-
gsq_weighted=0.0,
|
91 |
-
log_every=log_every,
|
92 |
-
decouple=decouple,
|
93 |
-
growth_rate=growth_rate,
|
94 |
-
fsdp_in_use=fsdp_in_use)
|
95 |
-
|
96 |
-
super().__init__(params, defaults)
|
97 |
-
|
98 |
-
@property
|
99 |
-
def supports_memory_efficient_fp16(self):
|
100 |
-
return False
|
101 |
-
|
102 |
-
@property
|
103 |
-
def supports_flat_params(self):
|
104 |
-
return True
|
105 |
-
|
106 |
-
def step(self, closure=None):
|
107 |
-
"""Performs a single optimization step.
|
108 |
-
|
109 |
-
Args:
|
110 |
-
closure (callable, optional): A closure that reevaluates the model
|
111 |
-
and returns the loss.
|
112 |
-
"""
|
113 |
-
loss = None
|
114 |
-
if closure is not None:
|
115 |
-
loss = closure()
|
116 |
-
|
117 |
-
g_sq = 0.0
|
118 |
-
sksq_weighted = 0.0
|
119 |
-
sk_l1 = 0.0
|
120 |
-
|
121 |
-
lr = max(group['lr'] for group in self.param_groups)
|
122 |
-
|
123 |
-
group = self.param_groups[0]
|
124 |
-
gsq_weighted = group['gsq_weighted']
|
125 |
-
d = group['d']
|
126 |
-
dlr = d*lr
|
127 |
-
|
128 |
-
growth_rate = group['growth_rate']
|
129 |
-
decouple = group['decouple']
|
130 |
-
fsdp_in_use = group['fsdp_in_use']
|
131 |
-
log_every = group['log_every']
|
132 |
-
|
133 |
-
beta1, beta2 = group['betas']
|
134 |
-
|
135 |
-
for group in self.param_groups:
|
136 |
-
group_lr = group['lr']
|
137 |
-
decay = group['weight_decay']
|
138 |
-
k = group['k']
|
139 |
-
eps = group['eps']
|
140 |
-
|
141 |
-
if group_lr not in [lr, 0.0]:
|
142 |
-
raise RuntimeError("Setting different lr values in different parameter "
|
143 |
-
"groups is only supported for values of 0")
|
144 |
-
|
145 |
-
for p in group['params']:
|
146 |
-
if p.grad is None:
|
147 |
-
continue
|
148 |
-
if hasattr(p, "_fsdp_flattened"):
|
149 |
-
fsdp_in_use = True
|
150 |
-
grad = p.grad.data
|
151 |
-
|
152 |
-
# Apply weight decay (coupled variant)
|
153 |
-
if decay != 0 and not decouple:
|
154 |
-
grad.add_(p.data, alpha=decay)
|
155 |
-
|
156 |
-
state = self.state[p]
|
157 |
-
|
158 |
-
# State initialization
|
159 |
-
if 'step' not in state:
|
160 |
-
state['step'] = 0
|
161 |
-
state['s'] = torch.zeros_like(p.data, memory_format=torch.preserve_format).detach()
|
162 |
-
# Exponential moving average of gradient values
|
163 |
-
state['exp_avg'] = torch.zeros_like(p.data, memory_format=torch.preserve_format).detach()
|
164 |
-
# Exponential moving average of squared gradient values
|
165 |
-
state['exp_avg_sq'] = torch.zeros_like(
|
166 |
-
to_real(p.data), memory_format=torch.preserve_format).detach()
|
167 |
-
|
168 |
-
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
|
169 |
-
|
170 |
-
grad_grad = to_real(grad * grad.conj())
|
171 |
-
|
172 |
-
# Adam EMA updates
|
173 |
-
if group_lr > 0:
|
174 |
-
exp_avg.mul_(beta1).add_(grad, alpha=dlr*(1-beta1))
|
175 |
-
exp_avg_sq.mul_(beta2).add_(grad_grad, alpha=1-beta2)
|
176 |
-
|
177 |
-
denom = exp_avg_sq.sqrt().add_(eps)
|
178 |
-
|
179 |
-
g_sq += grad_grad.div_(denom).sum().item()
|
180 |
-
|
181 |
-
s = state['s']
|
182 |
-
s.mul_(beta2).add_(grad, alpha=dlr*(1-beta2))
|
183 |
-
sksq_weighted += to_real(s * s.conj()).div_(denom).sum().item()
|
184 |
-
sk_l1 += s.abs().sum().item()
|
185 |
-
|
186 |
-
######
|
187 |
-
|
188 |
-
gsq_weighted = beta2*gsq_weighted + g_sq*(dlr**2)*(1-beta2)
|
189 |
-
d_hat = d
|
190 |
-
|
191 |
-
# if we have not done any progres, return
|
192 |
-
# if we have any gradients available, will have sk_l1 > 0 (unless \|g\|=0)
|
193 |
-
if sk_l1 == 0:
|
194 |
-
return loss
|
195 |
-
|
196 |
-
if lr > 0.0:
|
197 |
-
if fsdp_in_use:
|
198 |
-
dist_tensor = torch.zeros(3, device='cuda')
|
199 |
-
dist_tensor[0] = sksq_weighted
|
200 |
-
dist_tensor[1] = gsq_weighted
|
201 |
-
dist_tensor[2] = sk_l1
|
202 |
-
dist.all_reduce(dist_tensor, op=dist.ReduceOp.SUM)
|
203 |
-
global_sksq_weighted = dist_tensor[0]
|
204 |
-
global_gsq_weighted = dist_tensor[1]
|
205 |
-
global_sk_l1 = dist_tensor[2]
|
206 |
-
else:
|
207 |
-
global_sksq_weighted = sksq_weighted
|
208 |
-
global_gsq_weighted = gsq_weighted
|
209 |
-
global_sk_l1 = sk_l1
|
210 |
-
|
211 |
-
d_hat = (global_sksq_weighted/(1-beta2) - global_gsq_weighted)/global_sk_l1
|
212 |
-
d = max(d, min(d_hat, d*growth_rate))
|
213 |
-
|
214 |
-
if log_every > 0 and k % log_every == 0:
|
215 |
-
logger.info(
|
216 |
-
f"(k={k}) dlr: {dlr:1.1e} d_hat: {d_hat:1.1e}, d: {d:1.8}. "
|
217 |
-
f"sksq_weighted={global_sksq_weighted:1.1e} gsq_weighted={global_gsq_weighted:1.1e} "
|
218 |
-
f"sk_l1={global_sk_l1:1.1e}{' (FSDP)' if fsdp_in_use else ''}")
|
219 |
-
|
220 |
-
for group in self.param_groups:
|
221 |
-
group['gsq_weighted'] = gsq_weighted
|
222 |
-
group['d'] = d
|
223 |
-
|
224 |
-
group_lr = group['lr']
|
225 |
-
decay = group['weight_decay']
|
226 |
-
k = group['k']
|
227 |
-
eps = group['eps']
|
228 |
-
|
229 |
-
for p in group['params']:
|
230 |
-
if p.grad is None:
|
231 |
-
continue
|
232 |
-
grad = p.grad.data
|
233 |
-
|
234 |
-
state = self.state[p]
|
235 |
-
|
236 |
-
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
|
237 |
-
|
238 |
-
state['step'] += 1
|
239 |
-
|
240 |
-
denom = exp_avg_sq.sqrt().add_(eps)
|
241 |
-
denom = denom.type(p.type())
|
242 |
-
|
243 |
-
# Apply weight decay (decoupled variant)
|
244 |
-
if decay != 0 and decouple and group_lr > 0:
|
245 |
-
p.data.add_(p.data, alpha=-decay * dlr)
|
246 |
-
|
247 |
-
# Take step
|
248 |
-
p.data.addcdiv_(exp_avg, denom, value=-1)
|
249 |
-
|
250 |
-
group['k'] = k + 1
|
251 |
-
|
252 |
-
return loss
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/fastspeech/pe.py
DELETED
@@ -1,149 +0,0 @@
|
|
1 |
-
from modules.commons.common_layers import *
|
2 |
-
from utils.hparams import hparams
|
3 |
-
from modules.fastspeech.tts_modules import PitchPredictor
|
4 |
-
from utils.pitch_utils import denorm_f0
|
5 |
-
|
6 |
-
|
7 |
-
class Prenet(nn.Module):
|
8 |
-
def __init__(self, in_dim=80, out_dim=256, kernel=5, n_layers=3, strides=None):
|
9 |
-
super(Prenet, self).__init__()
|
10 |
-
padding = kernel // 2
|
11 |
-
self.layers = []
|
12 |
-
self.strides = strides if strides is not None else [1] * n_layers
|
13 |
-
for l in range(n_layers):
|
14 |
-
self.layers.append(nn.Sequential(
|
15 |
-
nn.Conv1d(in_dim, out_dim, kernel_size=kernel, padding=padding, stride=self.strides[l]),
|
16 |
-
nn.ReLU(),
|
17 |
-
nn.BatchNorm1d(out_dim)
|
18 |
-
))
|
19 |
-
in_dim = out_dim
|
20 |
-
self.layers = nn.ModuleList(self.layers)
|
21 |
-
self.out_proj = nn.Linear(out_dim, out_dim)
|
22 |
-
|
23 |
-
def forward(self, x):
|
24 |
-
"""
|
25 |
-
|
26 |
-
:param x: [B, T, 80]
|
27 |
-
:return: [L, B, T, H], [B, T, H]
|
28 |
-
"""
|
29 |
-
padding_mask = x.abs().sum(-1).eq(0).data # [B, T]
|
30 |
-
nonpadding_mask_TB = 1 - padding_mask.float()[:, None, :] # [B, 1, T]
|
31 |
-
x = x.transpose(1, 2)
|
32 |
-
hiddens = []
|
33 |
-
for i, l in enumerate(self.layers):
|
34 |
-
nonpadding_mask_TB = nonpadding_mask_TB[:, :, ::self.strides[i]]
|
35 |
-
x = l(x) * nonpadding_mask_TB
|
36 |
-
hiddens.append(x)
|
37 |
-
hiddens = torch.stack(hiddens, 0) # [L, B, H, T]
|
38 |
-
hiddens = hiddens.transpose(2, 3) # [L, B, T, H]
|
39 |
-
x = self.out_proj(x.transpose(1, 2)) # [B, T, H]
|
40 |
-
x = x * nonpadding_mask_TB.transpose(1, 2)
|
41 |
-
return hiddens, x
|
42 |
-
|
43 |
-
|
44 |
-
class ConvBlock(nn.Module):
|
45 |
-
def __init__(self, idim=80, n_chans=256, kernel_size=3, stride=1, norm='gn', dropout=0):
|
46 |
-
super().__init__()
|
47 |
-
self.conv = ConvNorm(idim, n_chans, kernel_size, stride=stride)
|
48 |
-
self.norm = norm
|
49 |
-
if self.norm == 'bn':
|
50 |
-
self.norm = nn.BatchNorm1d(n_chans)
|
51 |
-
elif self.norm == 'in':
|
52 |
-
self.norm = nn.InstanceNorm1d(n_chans, affine=True)
|
53 |
-
elif self.norm == 'gn':
|
54 |
-
self.norm = nn.GroupNorm(n_chans // 16, n_chans)
|
55 |
-
elif self.norm == 'ln':
|
56 |
-
self.norm = LayerNorm(n_chans // 16, n_chans)
|
57 |
-
elif self.norm == 'wn':
|
58 |
-
self.conv = torch.nn.utils.weight_norm(self.conv.conv)
|
59 |
-
self.dropout = nn.Dropout(dropout)
|
60 |
-
self.relu = nn.ReLU()
|
61 |
-
|
62 |
-
def forward(self, x):
|
63 |
-
"""
|
64 |
-
|
65 |
-
:param x: [B, C, T]
|
66 |
-
:return: [B, C, T]
|
67 |
-
"""
|
68 |
-
x = self.conv(x)
|
69 |
-
if not isinstance(self.norm, str):
|
70 |
-
if self.norm == 'none':
|
71 |
-
pass
|
72 |
-
elif self.norm == 'ln':
|
73 |
-
x = self.norm(x.transpose(1, 2)).transpose(1, 2)
|
74 |
-
else:
|
75 |
-
x = self.norm(x)
|
76 |
-
x = self.relu(x)
|
77 |
-
x = self.dropout(x)
|
78 |
-
return x
|
79 |
-
|
80 |
-
|
81 |
-
class ConvStacks(nn.Module):
|
82 |
-
def __init__(self, idim=80, n_layers=5, n_chans=256, odim=32, kernel_size=5, norm='gn',
|
83 |
-
dropout=0, strides=None, res=True):
|
84 |
-
super().__init__()
|
85 |
-
self.conv = torch.nn.ModuleList()
|
86 |
-
self.kernel_size = kernel_size
|
87 |
-
self.res = res
|
88 |
-
self.in_proj = Linear(idim, n_chans)
|
89 |
-
if strides is None:
|
90 |
-
strides = [1] * n_layers
|
91 |
-
else:
|
92 |
-
assert len(strides) == n_layers
|
93 |
-
for idx in range(n_layers):
|
94 |
-
self.conv.append(ConvBlock(
|
95 |
-
n_chans, n_chans, kernel_size, stride=strides[idx], norm=norm, dropout=dropout))
|
96 |
-
self.out_proj = Linear(n_chans, odim)
|
97 |
-
|
98 |
-
def forward(self, x, return_hiddens=False):
|
99 |
-
"""
|
100 |
-
|
101 |
-
:param x: [B, T, H]
|
102 |
-
:return: [B, T, H]
|
103 |
-
"""
|
104 |
-
x = self.in_proj(x)
|
105 |
-
x = x.transpose(1, -1) # (B, idim, Tmax)
|
106 |
-
hiddens = []
|
107 |
-
for f in self.conv:
|
108 |
-
x_ = f(x)
|
109 |
-
x = x + x_ if self.res else x_ # (B, C, Tmax)
|
110 |
-
hiddens.append(x)
|
111 |
-
x = x.transpose(1, -1)
|
112 |
-
x = self.out_proj(x) # (B, Tmax, H)
|
113 |
-
if return_hiddens:
|
114 |
-
hiddens = torch.stack(hiddens, 1) # [B, L, C, T]
|
115 |
-
return x, hiddens
|
116 |
-
return x
|
117 |
-
|
118 |
-
|
119 |
-
class PitchExtractor(nn.Module):
|
120 |
-
def __init__(self, n_mel_bins=80, conv_layers=2):
|
121 |
-
super().__init__()
|
122 |
-
self.hidden_size = hparams['hidden_size']
|
123 |
-
self.predictor_hidden = hparams['predictor_hidden'] if hparams['predictor_hidden'] > 0 else self.hidden_size
|
124 |
-
self.conv_layers = conv_layers
|
125 |
-
|
126 |
-
self.mel_prenet = Prenet(n_mel_bins, self.hidden_size, strides=[1, 1, 1])
|
127 |
-
if self.conv_layers > 0:
|
128 |
-
self.mel_encoder = ConvStacks(
|
129 |
-
idim=self.hidden_size, n_chans=self.hidden_size, odim=self.hidden_size, n_layers=self.conv_layers)
|
130 |
-
self.pitch_predictor = PitchPredictor(
|
131 |
-
self.hidden_size, n_chans=self.predictor_hidden,
|
132 |
-
n_layers=5, dropout_rate=0.1, odim=2,
|
133 |
-
padding=hparams['ffn_padding'], kernel_size=hparams['predictor_kernel'])
|
134 |
-
|
135 |
-
def forward(self, mel_input=None):
|
136 |
-
ret = {}
|
137 |
-
mel_hidden = self.mel_prenet(mel_input)[1]
|
138 |
-
if self.conv_layers > 0:
|
139 |
-
mel_hidden = self.mel_encoder(mel_hidden)
|
140 |
-
|
141 |
-
ret['pitch_pred'] = pitch_pred = self.pitch_predictor(mel_hidden)
|
142 |
-
|
143 |
-
pitch_padding = mel_input.abs().sum(-1) == 0
|
144 |
-
use_uv = hparams['pitch_type'] == 'frame' and hparams['use_uv']
|
145 |
-
|
146 |
-
ret['f0_denorm_pred'] = denorm_f0(
|
147 |
-
pitch_pred[:, :, 0], (pitch_pred[:, :, 1] > 0) if use_uv else None,
|
148 |
-
hparams, pitch_padding=pitch_padding)
|
149 |
-
return ret
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abhilashvj/planogram-compliance/utils/callbacks.py
DELETED
@@ -1,86 +0,0 @@
|
|
1 |
-
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
2 |
-
"""
|
3 |
-
Callback utils
|
4 |
-
"""
|
5 |
-
|
6 |
-
import threading
|
7 |
-
|
8 |
-
|
9 |
-
class Callbacks:
|
10 |
-
""" "
|
11 |
-
Handles all registered callbacks for YOLOv5 Hooks
|
12 |
-
"""
|
13 |
-
|
14 |
-
def __init__(self):
|
15 |
-
# Define the available callbacks
|
16 |
-
self._callbacks = {
|
17 |
-
"on_pretrain_routine_start": [],
|
18 |
-
"on_pretrain_routine_end": [],
|
19 |
-
"on_train_start": [],
|
20 |
-
"on_train_epoch_start": [],
|
21 |
-
"on_train_batch_start": [],
|
22 |
-
"optimizer_step": [],
|
23 |
-
"on_before_zero_grad": [],
|
24 |
-
"on_train_batch_end": [],
|
25 |
-
"on_train_epoch_end": [],
|
26 |
-
"on_val_start": [],
|
27 |
-
"on_val_batch_start": [],
|
28 |
-
"on_val_image_end": [],
|
29 |
-
"on_val_batch_end": [],
|
30 |
-
"on_val_end": [],
|
31 |
-
"on_fit_epoch_end": [], # fit = train + val
|
32 |
-
"on_model_save": [],
|
33 |
-
"on_train_end": [],
|
34 |
-
"on_params_update": [],
|
35 |
-
"teardown": [],
|
36 |
-
}
|
37 |
-
self.stop_training = False # set True to interrupt training
|
38 |
-
|
39 |
-
def register_action(self, hook, name="", callback=None):
|
40 |
-
"""
|
41 |
-
Register a new action to a callback hook
|
42 |
-
|
43 |
-
Args:
|
44 |
-
hook: The callback hook name to register the action to
|
45 |
-
name: The name of the action for later reference
|
46 |
-
callback: The callback to fire
|
47 |
-
"""
|
48 |
-
assert (
|
49 |
-
hook in self._callbacks
|
50 |
-
), f"hook '{hook}' not found in callbacks {self._callbacks}"
|
51 |
-
assert callable(callback), f"callback '{callback}' is not callable"
|
52 |
-
self._callbacks[hook].append({"name": name, "callback": callback})
|
53 |
-
|
54 |
-
def get_registered_actions(self, hook=None):
|
55 |
-
""" "
|
56 |
-
Returns all the registered actions by callback hook
|
57 |
-
|
58 |
-
Args:
|
59 |
-
hook: The name of the hook to check, defaults to all
|
60 |
-
"""
|
61 |
-
return self._callbacks[hook] if hook else self._callbacks
|
62 |
-
|
63 |
-
def run(self, hook, *args, thread=False, **kwargs):
|
64 |
-
"""
|
65 |
-
Loop through the registered actions and fire all callbacks on main thread
|
66 |
-
|
67 |
-
Args:
|
68 |
-
hook: The name of the hook to check, defaults to all
|
69 |
-
args: Arguments to receive from YOLOv5
|
70 |
-
thread: (boolean) Run callbacks in daemon thread
|
71 |
-
kwargs: Keyword Arguments to receive from YOLOv5
|
72 |
-
"""
|
73 |
-
|
74 |
-
assert (
|
75 |
-
hook in self._callbacks
|
76 |
-
), f"hook '{hook}' not found in callbacks {self._callbacks}"
|
77 |
-
for logger in self._callbacks[hook]:
|
78 |
-
if thread:
|
79 |
-
threading.Thread(
|
80 |
-
target=logger["callback"],
|
81 |
-
args=args,
|
82 |
-
kwargs=kwargs,
|
83 |
-
daemon=True,
|
84 |
-
).start()
|
85 |
-
else:
|
86 |
-
logger["callback"](*args, **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/prisoner_dilemma.py
DELETED
@@ -1,49 +0,0 @@
|
|
1 |
-
import asyncio
|
2 |
-
import logging
|
3 |
-
from typing import Any, Dict, List
|
4 |
-
|
5 |
-
# from agentverse.agents.agent import Agent
|
6 |
-
from agentverse.agents.simulation_agent.conversation import BaseAgent
|
7 |
-
|
8 |
-
# from agentverse.environments.simulation_env.rules.base import Rule
|
9 |
-
from agentverse.environments.simulation_env.rules.base import SimulationRule as Rule
|
10 |
-
from agentverse.message import Message
|
11 |
-
|
12 |
-
from .. import env_registry as EnvironmentRegistry
|
13 |
-
from .basic import BasicEnvironment
|
14 |
-
|
15 |
-
|
16 |
-
@EnvironmentRegistry.register("prisoner_dilemma")
|
17 |
-
class PrisonerDilemmaEnvironment(BasicEnvironment):
|
18 |
-
"""
|
19 |
-
An environment for prisoner dilemma.
|
20 |
-
"""
|
21 |
-
|
22 |
-
async def step(self) -> List[Message]:
|
23 |
-
"""Run one step of the environment"""
|
24 |
-
|
25 |
-
# Get the next agent index
|
26 |
-
agent_ids = self.rule.get_next_agent_idx(self)
|
27 |
-
|
28 |
-
# Generate current environment description
|
29 |
-
env_descriptions = self.rule.get_env_description(self)
|
30 |
-
|
31 |
-
# Generate the next message
|
32 |
-
messages = await asyncio.gather(
|
33 |
-
*[self.agents[i].astep(self, env_descriptions[i]) for i in agent_ids]
|
34 |
-
)
|
35 |
-
|
36 |
-
# Some rules will select certain messages from all the messages
|
37 |
-
selected_messages = self.rule.select_message(self, messages)
|
38 |
-
self.last_messages = selected_messages
|
39 |
-
self.print_messages(selected_messages)
|
40 |
-
|
41 |
-
# Update the memory of the agents
|
42 |
-
self.rule.update_memory(self)
|
43 |
-
|
44 |
-
# Update the set of visible agents for each agent
|
45 |
-
self.rule.update_visible_agents(self)
|
46 |
-
|
47 |
-
self.cnt_turn += 1
|
48 |
-
|
49 |
-
return selected_messages
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/bbcodetext/BBCodeText.d.ts
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import BBCodeText from '../../../plugins/bbcodetext';
|
2 |
-
export default BBCodeText;
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollablepanel/scrollableblock/LayoutChildren.js
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
import ResizeGameObject from '../../../../plugins/utils/size/ResizeGameObject.js';
|
2 |
-
|
3 |
-
var LayoutChildren = function () {
|
4 |
-
// LayoutChildren child
|
5 |
-
var child = this.child;
|
6 |
-
var childWidth, childHeight;
|
7 |
-
if (!child.rexSizer.hidden) {
|
8 |
-
// Set size
|
9 |
-
if (this.scrollMode === 0) {
|
10 |
-
childWidth = this.width;
|
11 |
-
} else {
|
12 |
-
childHeight = this.height;
|
13 |
-
}
|
14 |
-
if (child.isRexSizer) {
|
15 |
-
child.runLayout(this, childWidth, childHeight);
|
16 |
-
} else {
|
17 |
-
ResizeGameObject(child, childWidth, childHeight);
|
18 |
-
}
|
19 |
-
|
20 |
-
// Update local state
|
21 |
-
this.resetChildPosition();
|
22 |
-
// Layout children-mask
|
23 |
-
this.layoutChildrenMask();
|
24 |
-
// Re-mask children
|
25 |
-
this.maskChildren();
|
26 |
-
}
|
27 |
-
}
|
28 |
-
|
29 |
-
export default LayoutChildren;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Akmyradov/TurkmenTTSweSTT/vits/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
|
|
|
|
spaces/AlexWang/lama/bin/gen_debug_mask_dataset.py
DELETED
@@ -1,61 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
|
3 |
-
import glob
|
4 |
-
import os
|
5 |
-
|
6 |
-
import PIL.Image as Image
|
7 |
-
import cv2
|
8 |
-
import numpy as np
|
9 |
-
import tqdm
|
10 |
-
import shutil
|
11 |
-
|
12 |
-
|
13 |
-
from saicinpainting.evaluation.utils import load_yaml
|
14 |
-
|
15 |
-
|
16 |
-
def generate_masks_for_img(infile, outmask_pattern, mask_size=200, step=0.5):
|
17 |
-
inimg = Image.open(infile)
|
18 |
-
width, height = inimg.size
|
19 |
-
step_abs = int(mask_size * step)
|
20 |
-
|
21 |
-
mask = np.zeros((height, width), dtype='uint8')
|
22 |
-
mask_i = 0
|
23 |
-
|
24 |
-
for start_vertical in range(0, height - step_abs, step_abs):
|
25 |
-
for start_horizontal in range(0, width - step_abs, step_abs):
|
26 |
-
mask[start_vertical:start_vertical + mask_size, start_horizontal:start_horizontal + mask_size] = 255
|
27 |
-
|
28 |
-
cv2.imwrite(outmask_pattern.format(mask_i), mask)
|
29 |
-
|
30 |
-
mask[start_vertical:start_vertical + mask_size, start_horizontal:start_horizontal + mask_size] = 0
|
31 |
-
mask_i += 1
|
32 |
-
|
33 |
-
|
34 |
-
def main(args):
|
35 |
-
if not args.indir.endswith('/'):
|
36 |
-
args.indir += '/'
|
37 |
-
if not args.outdir.endswith('/'):
|
38 |
-
args.outdir += '/'
|
39 |
-
|
40 |
-
config = load_yaml(args.config)
|
41 |
-
|
42 |
-
in_files = list(glob.glob(os.path.join(args.indir, '**', f'*{config.img_ext}'), recursive=True))
|
43 |
-
for infile in tqdm.tqdm(in_files):
|
44 |
-
outimg = args.outdir + infile[len(args.indir):]
|
45 |
-
outmask_pattern = outimg[:-len(config.img_ext)] + '_mask{:04d}.png'
|
46 |
-
|
47 |
-
os.makedirs(os.path.dirname(outimg), exist_ok=True)
|
48 |
-
shutil.copy2(infile, outimg)
|
49 |
-
|
50 |
-
generate_masks_for_img(infile, outmask_pattern, **config.gen_kwargs)
|
51 |
-
|
52 |
-
|
53 |
-
if __name__ == '__main__':
|
54 |
-
import argparse
|
55 |
-
|
56 |
-
aparser = argparse.ArgumentParser()
|
57 |
-
aparser.add_argument('config', type=str, help='Path to config for dataset generation')
|
58 |
-
aparser.add_argument('indir', type=str, help='Path to folder with images')
|
59 |
-
aparser.add_argument('outdir', type=str, help='Path to folder to store aligned images and masks to')
|
60 |
-
|
61 |
-
main(aparser.parse_args())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlexWang/lama/models/ade20k/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
from .base import *
|
|
|
|
spaces/Aloento/9Nine-PITS/pqmf.py
DELETED
@@ -1,136 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
|
3 |
-
# Copyright 2020 Tomoki Hayashi
|
4 |
-
# MIT License (https://opensource.org/licenses/MIT)
|
5 |
-
|
6 |
-
"""Pseudo QMF modules."""
|
7 |
-
'''
|
8 |
-
Copied from https://github.com/kan-bayashi/ParallelWaveGAN/blob/master/parallel_wavegan/layers/pqmf.py
|
9 |
-
'''
|
10 |
-
|
11 |
-
import numpy as np
|
12 |
-
import torch
|
13 |
-
import torch.nn.functional as F
|
14 |
-
|
15 |
-
from scipy.signal.windows import kaiser
|
16 |
-
|
17 |
-
|
18 |
-
def design_prototype_filter(taps=62, cutoff_ratio=0.142, beta=9.0):
|
19 |
-
"""Design prototype filter for PQMF.
|
20 |
-
This method is based on `A Kaiser window approach for the design of prototype
|
21 |
-
filters of cosine modulated filterbanks`_.
|
22 |
-
Args:
|
23 |
-
taps (int): The number of filter taps.
|
24 |
-
cutoff_ratio (float): Cut-off frequency ratio.
|
25 |
-
beta (float): Beta coefficient for kaiser window.
|
26 |
-
Returns:
|
27 |
-
ndarray: Impluse response of prototype filter (taps + 1,).
|
28 |
-
.. _`A Kaiser window approach for the design of prototype filters of cosine modulated filterbanks`:
|
29 |
-
https://ieeexplore.ieee.org/abstract/document/681427
|
30 |
-
"""
|
31 |
-
# check the arguments are valid
|
32 |
-
assert taps % 2 == 0, "The number of taps mush be even number."
|
33 |
-
assert 0.0 < cutoff_ratio < 1.0, "Cutoff ratio must be > 0.0 and < 1.0."
|
34 |
-
|
35 |
-
# make initial filter
|
36 |
-
omega_c = np.pi * cutoff_ratio
|
37 |
-
with np.errstate(invalid="ignore"):
|
38 |
-
h_i = np.sin(omega_c * (np.arange(taps + 1) - 0.5 * taps)) / (
|
39 |
-
np.pi * (np.arange(taps + 1) - 0.5 * taps)
|
40 |
-
)
|
41 |
-
h_i[taps // 2] = np.cos(0) * cutoff_ratio # fix nan due to indeterminate form
|
42 |
-
|
43 |
-
# apply kaiser window
|
44 |
-
w = kaiser(taps + 1, beta)
|
45 |
-
h = h_i * w
|
46 |
-
|
47 |
-
return h
|
48 |
-
|
49 |
-
|
50 |
-
class PQMF(torch.nn.Module):
|
51 |
-
"""PQMF module.
|
52 |
-
This module is based on `Near-perfect-reconstruction pseudo-QMF banks`_.
|
53 |
-
.. _`Near-perfect-reconstruction pseudo-QMF banks`:
|
54 |
-
https://ieeexplore.ieee.org/document/258122
|
55 |
-
"""
|
56 |
-
|
57 |
-
def __init__(self, subbands=4, taps=62, cutoff_ratio=0.142, beta=9.0):
|
58 |
-
"""Initilize PQMF module.
|
59 |
-
The cutoff_ratio and beta parameters are optimized for #subbands = 4.
|
60 |
-
See dicussion in https://github.com/kan-bayashi/ParallelWaveGAN/issues/195.
|
61 |
-
Args:
|
62 |
-
subbands (int): The number of subbands.
|
63 |
-
taps (int): The number of filter taps.
|
64 |
-
cutoff_ratio (float): Cut-off frequency ratio.
|
65 |
-
beta (float): Beta coefficient for kaiser window.
|
66 |
-
"""
|
67 |
-
super(PQMF, self).__init__()
|
68 |
-
|
69 |
-
# build analysis & synthesis filter coefficients
|
70 |
-
h_proto = design_prototype_filter(taps, cutoff_ratio, beta)
|
71 |
-
h_analysis = np.zeros((subbands, len(h_proto)))
|
72 |
-
h_synthesis = np.zeros((subbands, len(h_proto)))
|
73 |
-
for k in range(subbands):
|
74 |
-
h_analysis[k] = (
|
75 |
-
2
|
76 |
-
* h_proto
|
77 |
-
* np.cos(
|
78 |
-
(2 * k + 1)
|
79 |
-
* (np.pi / (2 * subbands))
|
80 |
-
* (np.arange(taps + 1) - (taps / 2))
|
81 |
-
+ (-1) ** k * np.pi / 4
|
82 |
-
)
|
83 |
-
)
|
84 |
-
h_synthesis[k] = (
|
85 |
-
2
|
86 |
-
* h_proto
|
87 |
-
* np.cos(
|
88 |
-
(2 * k + 1)
|
89 |
-
* (np.pi / (2 * subbands))
|
90 |
-
* (np.arange(taps + 1) - (taps / 2))
|
91 |
-
- (-1) ** k * np.pi / 4
|
92 |
-
)
|
93 |
-
)
|
94 |
-
|
95 |
-
# convert to tensor
|
96 |
-
analysis_filter = torch.Tensor(h_analysis).float().unsqueeze(1)
|
97 |
-
synthesis_filter = torch.Tensor(h_synthesis).float().unsqueeze(0)
|
98 |
-
|
99 |
-
# register coefficients as beffer
|
100 |
-
self.register_buffer("analysis_filter", analysis_filter)
|
101 |
-
self.register_buffer("synthesis_filter", synthesis_filter)
|
102 |
-
|
103 |
-
# filter for downsampling & upsampling
|
104 |
-
updown_filter = torch.zeros((subbands, subbands, subbands)).float()
|
105 |
-
for k in range(subbands):
|
106 |
-
updown_filter[k, k, 0] = 1.0
|
107 |
-
self.register_buffer("updown_filter", updown_filter)
|
108 |
-
self.subbands = subbands
|
109 |
-
|
110 |
-
# keep padding info
|
111 |
-
self.pad_fn = torch.nn.ConstantPad1d(taps // 2, 0.0)
|
112 |
-
|
113 |
-
def analysis(self, x):
|
114 |
-
"""Analysis with PQMF.
|
115 |
-
Args:
|
116 |
-
x (Tensor): Input tensor (B, 1, T).
|
117 |
-
Returns:
|
118 |
-
Tensor: Output tensor (B, subbands, T // subbands).
|
119 |
-
"""
|
120 |
-
x = F.conv1d(self.pad_fn(x), self.analysis_filter)
|
121 |
-
return F.conv1d(x, self.updown_filter, stride=self.subbands)
|
122 |
-
|
123 |
-
def synthesis(self, x):
|
124 |
-
"""Synthesis with PQMF.
|
125 |
-
Args:
|
126 |
-
x (Tensor): Input tensor (B, subbands, T // subbands).
|
127 |
-
Returns:
|
128 |
-
Tensor: Output tensor (B, 1, T).
|
129 |
-
"""
|
130 |
-
# NOTE(kan-bayashi): Power will be dreased so here multipy by # subbands.
|
131 |
-
# Not sure this is the correct way, it is better to check again.
|
132 |
-
# TODO(kan-bayashi): Understand the reconstruction procedure
|
133 |
-
x = F.conv_transpose1d(
|
134 |
-
x, self.updown_filter * self.subbands, stride=self.subbands
|
135 |
-
)
|
136 |
-
return F.conv1d(self.pad_fn(x), self.synthesis_filter)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ameaou/academic-chatgpt3.1/request_llm/bridge_chatglm.py
DELETED
@@ -1,140 +0,0 @@
|
|
1 |
-
|
2 |
-
from transformers import AutoModel, AutoTokenizer
|
3 |
-
import time
|
4 |
-
import importlib
|
5 |
-
from toolbox import update_ui, get_conf
|
6 |
-
from multiprocessing import Process, Pipe
|
7 |
-
|
8 |
-
load_message = "ChatGLM尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,ChatGLM消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
|
9 |
-
|
10 |
-
#################################################################################
|
11 |
-
class GetGLMHandle(Process):
|
12 |
-
def __init__(self):
|
13 |
-
super().__init__(daemon=True)
|
14 |
-
self.parent, self.child = Pipe()
|
15 |
-
self.chatglm_model = None
|
16 |
-
self.chatglm_tokenizer = None
|
17 |
-
self.info = ""
|
18 |
-
self.success = True
|
19 |
-
self.check_dependency()
|
20 |
-
self.start()
|
21 |
-
|
22 |
-
def check_dependency(self):
|
23 |
-
try:
|
24 |
-
import sentencepiece
|
25 |
-
self.info = "依赖检测通过"
|
26 |
-
self.success = True
|
27 |
-
except:
|
28 |
-
self.info = "缺少ChatGLM的依赖,如果要使用ChatGLM,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_chatglm.txt`安装ChatGLM的依赖。"
|
29 |
-
self.success = False
|
30 |
-
|
31 |
-
def ready(self):
|
32 |
-
return self.chatglm_model is not None
|
33 |
-
|
34 |
-
def run(self):
|
35 |
-
# 第一次运行,加载参数
|
36 |
-
retry = 0
|
37 |
-
while True:
|
38 |
-
try:
|
39 |
-
if self.chatglm_model is None:
|
40 |
-
self.chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
|
41 |
-
device, = get_conf('LOCAL_MODEL_DEVICE')
|
42 |
-
if device=='cpu':
|
43 |
-
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
|
44 |
-
else:
|
45 |
-
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
|
46 |
-
self.chatglm_model = self.chatglm_model.eval()
|
47 |
-
break
|
48 |
-
else:
|
49 |
-
break
|
50 |
-
except:
|
51 |
-
retry += 1
|
52 |
-
if retry > 3:
|
53 |
-
self.child.send('[Local Message] Call ChatGLM fail 不能正常加载ChatGLM的参数。')
|
54 |
-
raise RuntimeError("不能正常加载ChatGLM的参数!")
|
55 |
-
|
56 |
-
# 进入任务等待状态
|
57 |
-
while True:
|
58 |
-
kwargs = self.child.recv()
|
59 |
-
try:
|
60 |
-
for response, history in self.chatglm_model.stream_chat(self.chatglm_tokenizer, **kwargs):
|
61 |
-
self.child.send(response)
|
62 |
-
except:
|
63 |
-
self.child.send('[Local Message] Call ChatGLM fail.')
|
64 |
-
self.child.send('[Finish]')
|
65 |
-
|
66 |
-
def stream_chat(self, **kwargs):
|
67 |
-
self.parent.send(kwargs)
|
68 |
-
while True:
|
69 |
-
res = self.parent.recv()
|
70 |
-
if res != '[Finish]':
|
71 |
-
yield res
|
72 |
-
else:
|
73 |
-
break
|
74 |
-
return
|
75 |
-
|
76 |
-
global glm_handle
|
77 |
-
glm_handle = None
|
78 |
-
#################################################################################
|
79 |
-
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
80 |
-
"""
|
81 |
-
多线程方法
|
82 |
-
函数的说明请见 request_llm/bridge_all.py
|
83 |
-
"""
|
84 |
-
global glm_handle
|
85 |
-
if glm_handle is None:
|
86 |
-
glm_handle = GetGLMHandle()
|
87 |
-
observe_window[0] = load_message + "\n\n" + glm_handle.info
|
88 |
-
if not glm_handle.success:
|
89 |
-
error = glm_handle.info
|
90 |
-
glm_handle = None
|
91 |
-
raise RuntimeError(error)
|
92 |
-
|
93 |
-
# chatglm 没有 sys_prompt 接口,因此把prompt加入 history
|
94 |
-
history_feedin = []
|
95 |
-
for i in range(len(history)//2):
|
96 |
-
history_feedin.append(["What can I do?", sys_prompt] )
|
97 |
-
history_feedin.append([history[2*i], history[2*i+1]] )
|
98 |
-
|
99 |
-
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
|
100 |
-
response = ""
|
101 |
-
for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
102 |
-
observe_window[0] = response
|
103 |
-
if len(observe_window) >= 2:
|
104 |
-
if (time.time()-observe_window[1]) > watch_dog_patience:
|
105 |
-
raise RuntimeError("程序终止。")
|
106 |
-
return response
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
111 |
-
"""
|
112 |
-
单线程方法
|
113 |
-
函数的说明请见 request_llm/bridge_all.py
|
114 |
-
"""
|
115 |
-
chatbot.append((inputs, ""))
|
116 |
-
|
117 |
-
global glm_handle
|
118 |
-
if glm_handle is None:
|
119 |
-
glm_handle = GetGLMHandle()
|
120 |
-
chatbot[-1] = (inputs, load_message + "\n\n" + glm_handle.info)
|
121 |
-
yield from update_ui(chatbot=chatbot, history=[])
|
122 |
-
if not glm_handle.success:
|
123 |
-
glm_handle = None
|
124 |
-
return
|
125 |
-
|
126 |
-
if additional_fn is not None:
|
127 |
-
import core_functional
|
128 |
-
importlib.reload(core_functional) # 热更新prompt
|
129 |
-
core_functional = core_functional.get_core_functions()
|
130 |
-
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
131 |
-
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
|
132 |
-
|
133 |
-
history_feedin = []
|
134 |
-
for i in range(len(history)//2):
|
135 |
-
history_feedin.append(["What can I do?", system_prompt] )
|
136 |
-
history_feedin.append([history[2*i], history[2*i+1]] )
|
137 |
-
|
138 |
-
for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
139 |
-
chatbot[-1] = (inputs, response)
|
140 |
-
yield from update_ui(chatbot=chatbot, history=history)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/tflib/network.py
DELETED
@@ -1,781 +0,0 @@
|
|
1 |
-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
"""Helper for managing networks."""
|
10 |
-
|
11 |
-
import types
|
12 |
-
import inspect
|
13 |
-
import re
|
14 |
-
import uuid
|
15 |
-
import sys
|
16 |
-
import copy
|
17 |
-
import numpy as np
|
18 |
-
import tensorflow as tf
|
19 |
-
|
20 |
-
from collections import OrderedDict
|
21 |
-
from typing import Any, List, Tuple, Union, Callable
|
22 |
-
|
23 |
-
from . import tfutil
|
24 |
-
from .. import util
|
25 |
-
|
26 |
-
from .tfutil import TfExpression, TfExpressionEx
|
27 |
-
|
28 |
-
# pylint: disable=protected-access
|
29 |
-
# pylint: disable=attribute-defined-outside-init
|
30 |
-
# pylint: disable=too-many-public-methods
|
31 |
-
|
32 |
-
_import_handlers = [] # Custom import handlers for dealing with legacy data in pickle import.
|
33 |
-
_import_module_src = dict() # Source code for temporary modules created during pickle import.
|
34 |
-
|
35 |
-
|
36 |
-
def import_handler(handler_func):
|
37 |
-
"""Function decorator for declaring custom import handlers."""
|
38 |
-
_import_handlers.append(handler_func)
|
39 |
-
return handler_func
|
40 |
-
|
41 |
-
|
42 |
-
class Network:
|
43 |
-
"""Generic network abstraction.
|
44 |
-
|
45 |
-
Acts as a convenience wrapper for a parameterized network construction
|
46 |
-
function, providing several utility methods and convenient access to
|
47 |
-
the inputs/outputs/weights.
|
48 |
-
|
49 |
-
Network objects can be safely pickled and unpickled for long-term
|
50 |
-
archival purposes. The pickling works reliably as long as the underlying
|
51 |
-
network construction function is defined in a standalone Python module
|
52 |
-
that has no side effects or application-specific imports.
|
53 |
-
|
54 |
-
Args:
|
55 |
-
name: Network name. Used to select TensorFlow name and variable scopes. Defaults to build func name if None.
|
56 |
-
func_name: Fully qualified name of the underlying network construction function, or a top-level function object.
|
57 |
-
static_kwargs: Keyword arguments to be passed in to the network construction function.
|
58 |
-
"""
|
59 |
-
|
60 |
-
def __init__(self, name: str = None, func_name: Any = None, **static_kwargs):
|
61 |
-
# Locate the user-specified build function.
|
62 |
-
assert isinstance(func_name, str) or util.is_top_level_function(func_name)
|
63 |
-
if util.is_top_level_function(func_name):
|
64 |
-
func_name = util.get_top_level_function_name(func_name)
|
65 |
-
module, func_name = util.get_module_from_obj_name(func_name)
|
66 |
-
func = util.get_obj_from_module(module, func_name)
|
67 |
-
|
68 |
-
# Dig up source code for the module containing the build function.
|
69 |
-
module_src = _import_module_src.get(module, None)
|
70 |
-
if module_src is None:
|
71 |
-
module_src = inspect.getsource(module)
|
72 |
-
|
73 |
-
# Initialize fields.
|
74 |
-
self._init_fields(name=(name or func_name), static_kwargs=static_kwargs, build_func=func, build_func_name=func_name, build_module_src=module_src)
|
75 |
-
|
76 |
-
def _init_fields(self, name: str, static_kwargs: dict, build_func: Callable, build_func_name: str, build_module_src: str) -> None:
|
77 |
-
tfutil.assert_tf_initialized()
|
78 |
-
assert isinstance(name, str)
|
79 |
-
assert len(name) >= 1
|
80 |
-
assert re.fullmatch(r"[A-Za-z0-9_.\\-]*", name)
|
81 |
-
assert isinstance(static_kwargs, dict)
|
82 |
-
assert util.is_pickleable(static_kwargs)
|
83 |
-
assert callable(build_func)
|
84 |
-
assert isinstance(build_func_name, str)
|
85 |
-
assert isinstance(build_module_src, str)
|
86 |
-
|
87 |
-
# Choose TensorFlow name scope.
|
88 |
-
with tf.name_scope(None):
|
89 |
-
scope = tf.get_default_graph().unique_name(name, mark_as_used=True)
|
90 |
-
|
91 |
-
# Query current TensorFlow device.
|
92 |
-
with tfutil.absolute_name_scope(scope), tf.control_dependencies(None):
|
93 |
-
device = tf.no_op(name="_QueryDevice").device
|
94 |
-
|
95 |
-
# Immutable state.
|
96 |
-
self._name = name
|
97 |
-
self._scope = scope
|
98 |
-
self._device = device
|
99 |
-
self._static_kwargs = util.EasyDict(copy.deepcopy(static_kwargs))
|
100 |
-
self._build_func = build_func
|
101 |
-
self._build_func_name = build_func_name
|
102 |
-
self._build_module_src = build_module_src
|
103 |
-
|
104 |
-
# State before _init_graph().
|
105 |
-
self._var_inits = dict() # var_name => initial_value, set to None by _init_graph()
|
106 |
-
self._all_inits_known = False # Do we know for sure that _var_inits covers all the variables?
|
107 |
-
self._components = None # subnet_name => Network, None if the components are not known yet
|
108 |
-
|
109 |
-
# Initialized by _init_graph().
|
110 |
-
self._input_templates = None
|
111 |
-
self._output_templates = None
|
112 |
-
self._own_vars = None
|
113 |
-
|
114 |
-
# Cached values initialized the respective methods.
|
115 |
-
self._input_shapes = None
|
116 |
-
self._output_shapes = None
|
117 |
-
self._input_names = None
|
118 |
-
self._output_names = None
|
119 |
-
self._vars = None
|
120 |
-
self._trainables = None
|
121 |
-
self._var_global_to_local = None
|
122 |
-
self._run_cache = dict()
|
123 |
-
|
124 |
-
def _init_graph(self) -> None:
|
125 |
-
assert self._var_inits is not None
|
126 |
-
assert self._input_templates is None
|
127 |
-
assert self._output_templates is None
|
128 |
-
assert self._own_vars is None
|
129 |
-
|
130 |
-
# Initialize components.
|
131 |
-
if self._components is None:
|
132 |
-
self._components = util.EasyDict()
|
133 |
-
|
134 |
-
# Choose build func kwargs.
|
135 |
-
build_kwargs = dict(self.static_kwargs)
|
136 |
-
build_kwargs["is_template_graph"] = True
|
137 |
-
build_kwargs["components"] = self._components
|
138 |
-
|
139 |
-
# Override scope and device, and ignore surrounding control dependencies.
|
140 |
-
with tfutil.absolute_variable_scope(self.scope, reuse=False), tfutil.absolute_name_scope(self.scope), tf.device(self.device), tf.control_dependencies(None):
|
141 |
-
assert tf.get_variable_scope().name == self.scope
|
142 |
-
assert tf.get_default_graph().get_name_scope() == self.scope
|
143 |
-
|
144 |
-
# Create input templates.
|
145 |
-
self._input_templates = []
|
146 |
-
for param in inspect.signature(self._build_func).parameters.values():
|
147 |
-
if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty:
|
148 |
-
self._input_templates.append(tf.placeholder(tf.float32, name=param.name))
|
149 |
-
|
150 |
-
# Call build func.
|
151 |
-
out_expr = self._build_func(*self._input_templates, **build_kwargs)
|
152 |
-
|
153 |
-
# Collect output templates and variables.
|
154 |
-
assert tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple)
|
155 |
-
self._output_templates = [out_expr] if tfutil.is_tf_expression(out_expr) else list(out_expr)
|
156 |
-
self._own_vars = OrderedDict((var.name[len(self.scope) + 1:].split(":")[0], var) for var in tf.global_variables(self.scope + "/"))
|
157 |
-
|
158 |
-
# Check for errors.
|
159 |
-
if len(self._input_templates) == 0:
|
160 |
-
raise ValueError("Network build func did not list any inputs.")
|
161 |
-
if len(self._output_templates) == 0:
|
162 |
-
raise ValueError("Network build func did not return any outputs.")
|
163 |
-
if any(not tfutil.is_tf_expression(t) for t in self._output_templates):
|
164 |
-
raise ValueError("Network outputs must be TensorFlow expressions.")
|
165 |
-
if any(t.shape.ndims is None for t in self._input_templates):
|
166 |
-
raise ValueError("Network input shapes not defined. Please call x.set_shape() for each input.")
|
167 |
-
if any(t.shape.ndims is None for t in self._output_templates):
|
168 |
-
raise ValueError("Network output shapes not defined. Please call x.set_shape() where applicable.")
|
169 |
-
if any(not isinstance(comp, Network) for comp in self._components.values()):
|
170 |
-
raise ValueError("Components of a Network must be Networks themselves.")
|
171 |
-
if len(self._components) != len(set(comp.name for comp in self._components.values())):
|
172 |
-
raise ValueError("Components of a Network must have unique names.")
|
173 |
-
|
174 |
-
# Initialize variables.
|
175 |
-
if len(self._var_inits):
|
176 |
-
tfutil.set_vars({self._get_vars()[name]: value for name, value in self._var_inits.items() if name in self._get_vars()})
|
177 |
-
remaining_inits = [var.initializer for name, var in self._own_vars.items() if name not in self._var_inits]
|
178 |
-
if self._all_inits_known:
|
179 |
-
assert len(remaining_inits) == 0
|
180 |
-
else:
|
181 |
-
tfutil.run(remaining_inits)
|
182 |
-
self._var_inits = None
|
183 |
-
|
184 |
-
@property
|
185 |
-
def name(self):
|
186 |
-
"""User-specified name string."""
|
187 |
-
return self._name
|
188 |
-
|
189 |
-
@property
|
190 |
-
def scope(self):
|
191 |
-
"""Unique TensorFlow scope containing template graph and variables, derived from the user-specified name."""
|
192 |
-
return self._scope
|
193 |
-
|
194 |
-
@property
|
195 |
-
def device(self):
|
196 |
-
"""Name of the TensorFlow device that the weights of this network reside on. Determined by the current device at construction time."""
|
197 |
-
return self._device
|
198 |
-
|
199 |
-
@property
|
200 |
-
def static_kwargs(self):
|
201 |
-
"""EasyDict of arguments passed to the user-supplied build func."""
|
202 |
-
return copy.deepcopy(self._static_kwargs)
|
203 |
-
|
204 |
-
@property
|
205 |
-
def components(self):
|
206 |
-
"""EasyDict of sub-networks created by the build func."""
|
207 |
-
return copy.copy(self._get_components())
|
208 |
-
|
209 |
-
def _get_components(self):
|
210 |
-
if self._components is None:
|
211 |
-
self._init_graph()
|
212 |
-
assert self._components is not None
|
213 |
-
return self._components
|
214 |
-
|
215 |
-
@property
|
216 |
-
def input_shapes(self):
|
217 |
-
"""List of input tensor shapes, including minibatch dimension."""
|
218 |
-
if self._input_shapes is None:
|
219 |
-
self._input_shapes = [t.shape.as_list() for t in self.input_templates]
|
220 |
-
return copy.deepcopy(self._input_shapes)
|
221 |
-
|
222 |
-
@property
|
223 |
-
def output_shapes(self):
|
224 |
-
"""List of output tensor shapes, including minibatch dimension."""
|
225 |
-
if self._output_shapes is None:
|
226 |
-
self._output_shapes = [t.shape.as_list() for t in self.output_templates]
|
227 |
-
return copy.deepcopy(self._output_shapes)
|
228 |
-
|
229 |
-
@property
|
230 |
-
def input_shape(self):
|
231 |
-
"""Short-hand for input_shapes[0]."""
|
232 |
-
return self.input_shapes[0]
|
233 |
-
|
234 |
-
@property
|
235 |
-
def output_shape(self):
|
236 |
-
"""Short-hand for output_shapes[0]."""
|
237 |
-
return self.output_shapes[0]
|
238 |
-
|
239 |
-
@property
|
240 |
-
def num_inputs(self):
|
241 |
-
"""Number of input tensors."""
|
242 |
-
return len(self.input_shapes)
|
243 |
-
|
244 |
-
@property
|
245 |
-
def num_outputs(self):
|
246 |
-
"""Number of output tensors."""
|
247 |
-
return len(self.output_shapes)
|
248 |
-
|
249 |
-
@property
|
250 |
-
def input_names(self):
|
251 |
-
"""Name string for each input."""
|
252 |
-
if self._input_names is None:
|
253 |
-
self._input_names = [t.name.split("/")[-1].split(":")[0] for t in self.input_templates]
|
254 |
-
return copy.copy(self._input_names)
|
255 |
-
|
256 |
-
@property
|
257 |
-
def output_names(self):
|
258 |
-
"""Name string for each output."""
|
259 |
-
if self._output_names is None:
|
260 |
-
self._output_names = [t.name.split("/")[-1].split(":")[0] for t in self.output_templates]
|
261 |
-
return copy.copy(self._output_names)
|
262 |
-
|
263 |
-
@property
|
264 |
-
def input_templates(self):
|
265 |
-
"""Input placeholders in the template graph."""
|
266 |
-
if self._input_templates is None:
|
267 |
-
self._init_graph()
|
268 |
-
assert self._input_templates is not None
|
269 |
-
return copy.copy(self._input_templates)
|
270 |
-
|
271 |
-
@property
|
272 |
-
def output_templates(self):
|
273 |
-
"""Output tensors in the template graph."""
|
274 |
-
if self._output_templates is None:
|
275 |
-
self._init_graph()
|
276 |
-
assert self._output_templates is not None
|
277 |
-
return copy.copy(self._output_templates)
|
278 |
-
|
279 |
-
@property
|
280 |
-
def own_vars(self):
|
281 |
-
"""Variables defined by this network (local_name => var), excluding sub-networks."""
|
282 |
-
return copy.copy(self._get_own_vars())
|
283 |
-
|
284 |
-
def _get_own_vars(self):
|
285 |
-
if self._own_vars is None:
|
286 |
-
self._init_graph()
|
287 |
-
assert self._own_vars is not None
|
288 |
-
return self._own_vars
|
289 |
-
|
290 |
-
@property
|
291 |
-
def vars(self):
|
292 |
-
"""All variables (local_name => var)."""
|
293 |
-
return copy.copy(self._get_vars())
|
294 |
-
|
295 |
-
def _get_vars(self):
|
296 |
-
if self._vars is None:
|
297 |
-
self._vars = OrderedDict(self._get_own_vars())
|
298 |
-
for comp in self._get_components().values():
|
299 |
-
self._vars.update((comp.name + "/" + name, var) for name, var in comp._get_vars().items())
|
300 |
-
return self._vars
|
301 |
-
|
302 |
-
@property
|
303 |
-
def trainables(self):
|
304 |
-
"""All trainable variables (local_name => var)."""
|
305 |
-
return copy.copy(self._get_trainables())
|
306 |
-
|
307 |
-
def _get_trainables(self):
|
308 |
-
if self._trainables is None:
|
309 |
-
self._trainables = OrderedDict((name, var) for name, var in self.vars.items() if var.trainable)
|
310 |
-
return self._trainables
|
311 |
-
|
312 |
-
@property
|
313 |
-
def var_global_to_local(self):
|
314 |
-
"""Mapping from variable global names to local names."""
|
315 |
-
return copy.copy(self._get_var_global_to_local())
|
316 |
-
|
317 |
-
def _get_var_global_to_local(self):
|
318 |
-
if self._var_global_to_local is None:
|
319 |
-
self._var_global_to_local = OrderedDict((var.name.split(":")[0], name) for name, var in self.vars.items())
|
320 |
-
return self._var_global_to_local
|
321 |
-
|
322 |
-
def reset_own_vars(self) -> None:
|
323 |
-
"""Re-initialize all variables of this network, excluding sub-networks."""
|
324 |
-
if self._var_inits is None or self._components is None:
|
325 |
-
tfutil.run([var.initializer for var in self._get_own_vars().values()])
|
326 |
-
else:
|
327 |
-
self._var_inits.clear()
|
328 |
-
self._all_inits_known = False
|
329 |
-
|
330 |
-
def reset_vars(self) -> None:
|
331 |
-
"""Re-initialize all variables of this network, including sub-networks."""
|
332 |
-
if self._var_inits is None:
|
333 |
-
tfutil.run([var.initializer for var in self._get_vars().values()])
|
334 |
-
else:
|
335 |
-
self._var_inits.clear()
|
336 |
-
self._all_inits_known = False
|
337 |
-
if self._components is not None:
|
338 |
-
for comp in self._components.values():
|
339 |
-
comp.reset_vars()
|
340 |
-
|
341 |
-
def reset_trainables(self) -> None:
|
342 |
-
"""Re-initialize all trainable variables of this network, including sub-networks."""
|
343 |
-
tfutil.run([var.initializer for var in self._get_trainables().values()])
|
344 |
-
|
345 |
-
def get_output_for(self, *in_expr: TfExpression, return_as_list: bool = False, **dynamic_kwargs) -> Union[TfExpression, List[TfExpression]]:
|
346 |
-
"""Construct TensorFlow expression(s) for the output(s) of this network, given the input expression(s).
|
347 |
-
The graph is placed on the current TensorFlow device."""
|
348 |
-
assert len(in_expr) == self.num_inputs
|
349 |
-
assert not all(expr is None for expr in in_expr)
|
350 |
-
self._get_vars() # ensure that all variables have been created
|
351 |
-
|
352 |
-
# Choose build func kwargs.
|
353 |
-
build_kwargs = dict(self.static_kwargs)
|
354 |
-
build_kwargs.update(dynamic_kwargs)
|
355 |
-
build_kwargs["is_template_graph"] = False
|
356 |
-
build_kwargs["components"] = self._components
|
357 |
-
|
358 |
-
# Build TensorFlow graph to evaluate the network.
|
359 |
-
with tfutil.absolute_variable_scope(self.scope, reuse=True), tf.name_scope(self.name):
|
360 |
-
assert tf.get_variable_scope().name == self.scope
|
361 |
-
valid_inputs = [expr for expr in in_expr if expr is not None]
|
362 |
-
final_inputs = []
|
363 |
-
for expr, name, shape in zip(in_expr, self.input_names, self.input_shapes):
|
364 |
-
if expr is not None:
|
365 |
-
expr = tf.identity(expr, name=name)
|
366 |
-
else:
|
367 |
-
expr = tf.zeros([tf.shape(valid_inputs[0])[0]] + shape[1:], name=name)
|
368 |
-
final_inputs.append(expr)
|
369 |
-
out_expr = self._build_func(*final_inputs, **build_kwargs)
|
370 |
-
|
371 |
-
# Propagate input shapes back to the user-specified expressions.
|
372 |
-
for expr, final in zip(in_expr, final_inputs):
|
373 |
-
if isinstance(expr, tf.Tensor):
|
374 |
-
expr.set_shape(final.shape)
|
375 |
-
|
376 |
-
# Express outputs in the desired format.
|
377 |
-
assert tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple)
|
378 |
-
if return_as_list:
|
379 |
-
out_expr = [out_expr] if tfutil.is_tf_expression(out_expr) else list(out_expr)
|
380 |
-
return out_expr
|
381 |
-
|
382 |
-
def get_var_local_name(self, var_or_global_name: Union[TfExpression, str]) -> str:
|
383 |
-
"""Get the local name of a given variable, without any surrounding name scopes."""
|
384 |
-
assert tfutil.is_tf_expression(var_or_global_name) or isinstance(var_or_global_name, str)
|
385 |
-
global_name = var_or_global_name if isinstance(var_or_global_name, str) else var_or_global_name.name
|
386 |
-
return self._get_var_global_to_local()[global_name]
|
387 |
-
|
388 |
-
def find_var(self, var_or_local_name: Union[TfExpression, str]) -> TfExpression:
|
389 |
-
"""Find variable by local or global name."""
|
390 |
-
assert tfutil.is_tf_expression(var_or_local_name) or isinstance(var_or_local_name, str)
|
391 |
-
return self._get_vars()[var_or_local_name] if isinstance(var_or_local_name, str) else var_or_local_name
|
392 |
-
|
393 |
-
def get_var(self, var_or_local_name: Union[TfExpression, str]) -> np.ndarray:
|
394 |
-
"""Get the value of a given variable as NumPy array.
|
395 |
-
Note: This method is very inefficient -- prefer to use tflib.run(list_of_vars) whenever possible."""
|
396 |
-
return self.find_var(var_or_local_name).eval()
|
397 |
-
|
398 |
-
def set_var(self, var_or_local_name: Union[TfExpression, str], new_value: Union[int, float, np.ndarray]) -> None:
|
399 |
-
"""Set the value of a given variable based on the given NumPy array.
|
400 |
-
Note: This method is very inefficient -- prefer to use tflib.set_vars() whenever possible."""
|
401 |
-
tfutil.set_vars({self.find_var(var_or_local_name): new_value})
|
402 |
-
|
403 |
-
def __getstate__(self) -> dict:
|
404 |
-
"""Pickle export."""
|
405 |
-
state = dict()
|
406 |
-
state["version"] = 5
|
407 |
-
state["name"] = self.name
|
408 |
-
state["static_kwargs"] = dict(self.static_kwargs)
|
409 |
-
state["components"] = dict(self.components)
|
410 |
-
state["build_module_src"] = self._build_module_src
|
411 |
-
state["build_func_name"] = self._build_func_name
|
412 |
-
state["variables"] = list(zip(self._get_own_vars().keys(), tfutil.run(list(self._get_own_vars().values()))))
|
413 |
-
state["input_shapes"] = self.input_shapes
|
414 |
-
state["output_shapes"] = self.output_shapes
|
415 |
-
state["input_names"] = self.input_names
|
416 |
-
state["output_names"] = self.output_names
|
417 |
-
return state
|
418 |
-
|
419 |
-
def __setstate__(self, state: dict) -> None:
|
420 |
-
"""Pickle import."""
|
421 |
-
|
422 |
-
# Execute custom import handlers.
|
423 |
-
for handler in _import_handlers:
|
424 |
-
state = handler(state)
|
425 |
-
|
426 |
-
# Get basic fields.
|
427 |
-
assert state["version"] in [2, 3, 4, 5]
|
428 |
-
name = state["name"]
|
429 |
-
static_kwargs = state["static_kwargs"]
|
430 |
-
build_module_src = state["build_module_src"]
|
431 |
-
build_func_name = state["build_func_name"]
|
432 |
-
|
433 |
-
# Create temporary module from the imported source code.
|
434 |
-
module_name = "_tflib_network_import_" + uuid.uuid4().hex
|
435 |
-
module = types.ModuleType(module_name)
|
436 |
-
sys.modules[module_name] = module
|
437 |
-
_import_module_src[module] = build_module_src
|
438 |
-
exec(build_module_src, module.__dict__) # pylint: disable=exec-used
|
439 |
-
build_func = util.get_obj_from_module(module, build_func_name)
|
440 |
-
|
441 |
-
# Initialize fields.
|
442 |
-
self._init_fields(name=name, static_kwargs=static_kwargs, build_func=build_func, build_func_name=build_func_name, build_module_src=build_module_src)
|
443 |
-
self._var_inits.update(copy.deepcopy(state["variables"]))
|
444 |
-
self._all_inits_known = True
|
445 |
-
self._components = util.EasyDict(state.get("components", {}))
|
446 |
-
self._input_shapes = copy.deepcopy(state.get("input_shapes", None))
|
447 |
-
self._output_shapes = copy.deepcopy(state.get("output_shapes", None))
|
448 |
-
self._input_names = copy.deepcopy(state.get("input_names", None))
|
449 |
-
self._output_names = copy.deepcopy(state.get("output_names", None))
|
450 |
-
|
451 |
-
def clone(self, name: str = None, **new_static_kwargs) -> "Network":
|
452 |
-
"""Create a clone of this network with its own copy of the variables."""
|
453 |
-
static_kwargs = dict(self.static_kwargs)
|
454 |
-
static_kwargs.update(new_static_kwargs)
|
455 |
-
net = object.__new__(Network)
|
456 |
-
net._init_fields(name=(name or self.name), static_kwargs=static_kwargs, build_func=self._build_func, build_func_name=self._build_func_name, build_module_src=self._build_module_src)
|
457 |
-
net.copy_vars_from(self)
|
458 |
-
return net
|
459 |
-
|
460 |
-
def copy_own_vars_from(self, src_net: "Network") -> None:
|
461 |
-
"""Copy the values of all variables from the given network, excluding sub-networks."""
|
462 |
-
|
463 |
-
# Source has unknown variables or unknown components => init now.
|
464 |
-
if (src_net._var_inits is not None and not src_net._all_inits_known) or src_net._components is None:
|
465 |
-
src_net._get_vars()
|
466 |
-
|
467 |
-
# Both networks are inited => copy directly.
|
468 |
-
if src_net._var_inits is None and self._var_inits is None:
|
469 |
-
names = [name for name in self._get_own_vars().keys() if name in src_net._get_own_vars()]
|
470 |
-
tfutil.set_vars(tfutil.run({self._get_vars()[name]: src_net._get_vars()[name] for name in names}))
|
471 |
-
return
|
472 |
-
|
473 |
-
# Read from source.
|
474 |
-
if src_net._var_inits is None:
|
475 |
-
value_dict = tfutil.run(src_net._get_own_vars())
|
476 |
-
else:
|
477 |
-
value_dict = src_net._var_inits
|
478 |
-
|
479 |
-
# Write to destination.
|
480 |
-
if self._var_inits is None:
|
481 |
-
tfutil.set_vars({self._get_vars()[name]: value for name, value in value_dict.items() if name in self._get_vars()})
|
482 |
-
else:
|
483 |
-
self._var_inits.update(value_dict)
|
484 |
-
|
485 |
-
def copy_vars_from(self, src_net: "Network") -> None:
|
486 |
-
"""Copy the values of all variables from the given network, including sub-networks."""
|
487 |
-
|
488 |
-
# Source has unknown variables or unknown components => init now.
|
489 |
-
if (src_net._var_inits is not None and not src_net._all_inits_known) or src_net._components is None:
|
490 |
-
src_net._get_vars()
|
491 |
-
|
492 |
-
# Source is inited, but destination components have not been created yet => set as initial values.
|
493 |
-
if src_net._var_inits is None and self._components is None:
|
494 |
-
self._var_inits.update(tfutil.run(src_net._get_vars()))
|
495 |
-
return
|
496 |
-
|
497 |
-
# Destination has unknown components => init now.
|
498 |
-
if self._components is None:
|
499 |
-
self._get_vars()
|
500 |
-
|
501 |
-
# Both networks are inited => copy directly.
|
502 |
-
if src_net._var_inits is None and self._var_inits is None:
|
503 |
-
names = [name for name in self._get_vars().keys() if name in src_net._get_vars()]
|
504 |
-
tfutil.set_vars(tfutil.run({self._get_vars()[name]: src_net._get_vars()[name] for name in names}))
|
505 |
-
return
|
506 |
-
|
507 |
-
# Copy recursively, component by component.
|
508 |
-
self.copy_own_vars_from(src_net)
|
509 |
-
for name, src_comp in src_net._components.items():
|
510 |
-
if name in self._components:
|
511 |
-
self._components[name].copy_vars_from(src_comp)
|
512 |
-
|
513 |
-
def copy_trainables_from(self, src_net: "Network") -> None:
|
514 |
-
"""Copy the values of all trainable variables from the given network, including sub-networks."""
|
515 |
-
names = [name for name in self._get_trainables().keys() if name in src_net._get_trainables()]
|
516 |
-
tfutil.set_vars(tfutil.run({self._get_vars()[name]: src_net._get_vars()[name] for name in names}))
|
517 |
-
|
518 |
-
def convert(self, new_func_name: str, new_name: str = None, **new_static_kwargs) -> "Network":
|
519 |
-
"""Create new network with the given parameters, and copy all variables from this network."""
|
520 |
-
if new_name is None:
|
521 |
-
new_name = self.name
|
522 |
-
static_kwargs = dict(self.static_kwargs)
|
523 |
-
static_kwargs.update(new_static_kwargs)
|
524 |
-
net = Network(name=new_name, func_name=new_func_name, **static_kwargs)
|
525 |
-
net.copy_vars_from(self)
|
526 |
-
return net
|
527 |
-
|
528 |
-
def setup_as_moving_average_of(self, src_net: "Network", beta: TfExpressionEx = 0.99, beta_nontrainable: TfExpressionEx = 0.0) -> tf.Operation:
|
529 |
-
"""Construct a TensorFlow op that updates the variables of this network
|
530 |
-
to be slightly closer to those of the given network."""
|
531 |
-
with tfutil.absolute_name_scope(self.scope + "/_MovingAvg"):
|
532 |
-
ops = []
|
533 |
-
for name, var in self._get_vars().items():
|
534 |
-
if name in src_net._get_vars():
|
535 |
-
cur_beta = beta if var.trainable else beta_nontrainable
|
536 |
-
new_value = tfutil.lerp(src_net._get_vars()[name], var, cur_beta)
|
537 |
-
ops.append(var.assign(new_value))
|
538 |
-
return tf.group(*ops)
|
539 |
-
|
540 |
-
def run(self,
|
541 |
-
*in_arrays: Tuple[Union[np.ndarray, None], ...],
|
542 |
-
input_transform: dict = None,
|
543 |
-
output_transform: dict = None,
|
544 |
-
return_as_list: bool = False,
|
545 |
-
print_progress: bool = False,
|
546 |
-
minibatch_size: int = None,
|
547 |
-
num_gpus: int = 1,
|
548 |
-
assume_frozen: bool = False,
|
549 |
-
**dynamic_kwargs) -> Union[np.ndarray, Tuple[np.ndarray, ...], List[np.ndarray]]:
|
550 |
-
"""Run this network for the given NumPy array(s), and return the output(s) as NumPy array(s).
|
551 |
-
|
552 |
-
Args:
|
553 |
-
input_transform: A dict specifying a custom transformation to be applied to the input tensor(s) before evaluating the network.
|
554 |
-
The dict must contain a 'func' field that points to a top-level function. The function is called with the input
|
555 |
-
TensorFlow expression(s) as positional arguments. Any remaining fields of the dict will be passed in as kwargs.
|
556 |
-
output_transform: A dict specifying a custom transformation to be applied to the output tensor(s) after evaluating the network.
|
557 |
-
The dict must contain a 'func' field that points to a top-level function. The function is called with the output
|
558 |
-
TensorFlow expression(s) as positional arguments. Any remaining fields of the dict will be passed in as kwargs.
|
559 |
-
return_as_list: True = return a list of NumPy arrays, False = return a single NumPy array, or a tuple if there are multiple outputs.
|
560 |
-
print_progress: Print progress to the console? Useful for very large input arrays.
|
561 |
-
minibatch_size: Maximum minibatch size to use, None = disable batching.
|
562 |
-
num_gpus: Number of GPUs to use.
|
563 |
-
assume_frozen: Improve multi-GPU performance by assuming that the trainable parameters will remain changed between calls.
|
564 |
-
dynamic_kwargs: Additional keyword arguments to be passed into the network build function.
|
565 |
-
"""
|
566 |
-
assert len(in_arrays) == self.num_inputs
|
567 |
-
assert not all(arr is None for arr in in_arrays)
|
568 |
-
assert input_transform is None or util.is_top_level_function(input_transform["func"])
|
569 |
-
assert output_transform is None or util.is_top_level_function(output_transform["func"])
|
570 |
-
output_transform, dynamic_kwargs = _handle_legacy_output_transforms(output_transform, dynamic_kwargs)
|
571 |
-
num_items = in_arrays[0].shape[0]
|
572 |
-
if minibatch_size is None:
|
573 |
-
minibatch_size = num_items
|
574 |
-
|
575 |
-
# Construct unique hash key from all arguments that affect the TensorFlow graph.
|
576 |
-
key = dict(input_transform=input_transform, output_transform=output_transform, num_gpus=num_gpus, assume_frozen=assume_frozen, dynamic_kwargs=dynamic_kwargs)
|
577 |
-
def unwind_key(obj):
|
578 |
-
if isinstance(obj, dict):
|
579 |
-
return [(key, unwind_key(value)) for key, value in sorted(obj.items())]
|
580 |
-
if callable(obj):
|
581 |
-
return util.get_top_level_function_name(obj)
|
582 |
-
return obj
|
583 |
-
key = repr(unwind_key(key))
|
584 |
-
|
585 |
-
# Build graph.
|
586 |
-
if key not in self._run_cache:
|
587 |
-
with tfutil.absolute_name_scope(self.scope + "/_Run"), tf.control_dependencies(None):
|
588 |
-
with tf.device("/cpu:0"):
|
589 |
-
in_expr = [tf.placeholder(tf.float32, name=name) for name in self.input_names]
|
590 |
-
in_split = list(zip(*[tf.split(x, num_gpus) for x in in_expr]))
|
591 |
-
|
592 |
-
out_split = []
|
593 |
-
for gpu in range(num_gpus):
|
594 |
-
with tf.device(self.device if num_gpus == 1 else "/gpu:%d" % gpu):
|
595 |
-
net_gpu = self.clone() if assume_frozen else self
|
596 |
-
in_gpu = in_split[gpu]
|
597 |
-
|
598 |
-
if input_transform is not None:
|
599 |
-
in_kwargs = dict(input_transform)
|
600 |
-
in_gpu = in_kwargs.pop("func")(*in_gpu, **in_kwargs)
|
601 |
-
in_gpu = [in_gpu] if tfutil.is_tf_expression(in_gpu) else list(in_gpu)
|
602 |
-
|
603 |
-
assert len(in_gpu) == self.num_inputs
|
604 |
-
out_gpu = net_gpu.get_output_for(*in_gpu, return_as_list=True, **dynamic_kwargs)
|
605 |
-
|
606 |
-
if output_transform is not None:
|
607 |
-
out_kwargs = dict(output_transform)
|
608 |
-
out_gpu = out_kwargs.pop("func")(*out_gpu, **out_kwargs)
|
609 |
-
out_gpu = [out_gpu] if tfutil.is_tf_expression(out_gpu) else list(out_gpu)
|
610 |
-
|
611 |
-
assert len(out_gpu) == self.num_outputs
|
612 |
-
out_split.append(out_gpu)
|
613 |
-
|
614 |
-
with tf.device("/cpu:0"):
|
615 |
-
out_expr = [tf.concat(outputs, axis=0) for outputs in zip(*out_split)]
|
616 |
-
self._run_cache[key] = in_expr, out_expr
|
617 |
-
|
618 |
-
# Run minibatches.
|
619 |
-
in_expr, out_expr = self._run_cache[key]
|
620 |
-
out_arrays = [np.empty([num_items] + expr.shape.as_list()[1:], expr.dtype.name) for expr in out_expr]
|
621 |
-
|
622 |
-
for mb_begin in range(0, num_items, minibatch_size):
|
623 |
-
if print_progress:
|
624 |
-
print("\r%d / %d" % (mb_begin, num_items), end="")
|
625 |
-
|
626 |
-
mb_end = min(mb_begin + minibatch_size, num_items)
|
627 |
-
mb_num = mb_end - mb_begin
|
628 |
-
mb_in = [src[mb_begin : mb_end] if src is not None else np.zeros([mb_num] + shape[1:]) for src, shape in zip(in_arrays, self.input_shapes)]
|
629 |
-
mb_out = tf.get_default_session().run(out_expr, dict(zip(in_expr, mb_in)))
|
630 |
-
|
631 |
-
for dst, src in zip(out_arrays, mb_out):
|
632 |
-
dst[mb_begin: mb_end] = src
|
633 |
-
|
634 |
-
# Done.
|
635 |
-
if print_progress:
|
636 |
-
print("\r%d / %d" % (num_items, num_items))
|
637 |
-
|
638 |
-
if not return_as_list:
|
639 |
-
out_arrays = out_arrays[0] if len(out_arrays) == 1 else tuple(out_arrays)
|
640 |
-
return out_arrays
|
641 |
-
|
642 |
-
def list_ops(self) -> List[TfExpression]:
|
643 |
-
_ = self.output_templates # ensure that the template graph has been created
|
644 |
-
include_prefix = self.scope + "/"
|
645 |
-
exclude_prefix = include_prefix + "_"
|
646 |
-
ops = tf.get_default_graph().get_operations()
|
647 |
-
ops = [op for op in ops if op.name.startswith(include_prefix)]
|
648 |
-
ops = [op for op in ops if not op.name.startswith(exclude_prefix)]
|
649 |
-
return ops
|
650 |
-
|
651 |
-
def list_layers(self) -> List[Tuple[str, TfExpression, List[TfExpression]]]:
|
652 |
-
"""Returns a list of (layer_name, output_expr, trainable_vars) tuples corresponding to
|
653 |
-
individual layers of the network. Mainly intended to be used for reporting."""
|
654 |
-
layers = []
|
655 |
-
|
656 |
-
def recurse(scope, parent_ops, parent_vars, level):
|
657 |
-
if len(parent_ops) == 0 and len(parent_vars) == 0:
|
658 |
-
return
|
659 |
-
|
660 |
-
# Ignore specific patterns.
|
661 |
-
if any(p in scope for p in ["/Shape", "/strided_slice", "/Cast", "/concat", "/Assign"]):
|
662 |
-
return
|
663 |
-
|
664 |
-
# Filter ops and vars by scope.
|
665 |
-
global_prefix = scope + "/"
|
666 |
-
local_prefix = global_prefix[len(self.scope) + 1:]
|
667 |
-
cur_ops = [op for op in parent_ops if op.name.startswith(global_prefix) or op.name == global_prefix[:-1]]
|
668 |
-
cur_vars = [(name, var) for name, var in parent_vars if name.startswith(local_prefix) or name == local_prefix[:-1]]
|
669 |
-
if not cur_ops and not cur_vars:
|
670 |
-
return
|
671 |
-
|
672 |
-
# Filter out all ops related to variables.
|
673 |
-
for var in [op for op in cur_ops if op.type.startswith("Variable")]:
|
674 |
-
var_prefix = var.name + "/"
|
675 |
-
cur_ops = [op for op in cur_ops if not op.name.startswith(var_prefix)]
|
676 |
-
|
677 |
-
# Scope does not contain ops as immediate children => recurse deeper.
|
678 |
-
contains_direct_ops = any("/" not in op.name[len(global_prefix):] and op.type not in ["Identity", "Cast", "Transpose"] for op in cur_ops)
|
679 |
-
if (level == 0 or not contains_direct_ops) and (len(cur_ops) != 0 or len(cur_vars) != 0):
|
680 |
-
visited = set()
|
681 |
-
for rel_name in [op.name[len(global_prefix):] for op in cur_ops] + [name[len(local_prefix):] for name, _var in cur_vars]:
|
682 |
-
token = rel_name.split("/")[0]
|
683 |
-
if token not in visited:
|
684 |
-
recurse(global_prefix + token, cur_ops, cur_vars, level + 1)
|
685 |
-
visited.add(token)
|
686 |
-
return
|
687 |
-
|
688 |
-
# Report layer.
|
689 |
-
layer_name = scope[len(self.scope) + 1:]
|
690 |
-
layer_output = cur_ops[-1].outputs[0] if cur_ops else cur_vars[-1][1]
|
691 |
-
layer_trainables = [var for _name, var in cur_vars if var.trainable]
|
692 |
-
layers.append((layer_name, layer_output, layer_trainables))
|
693 |
-
|
694 |
-
recurse(self.scope, self.list_ops(), list(self._get_vars().items()), 0)
|
695 |
-
return layers
|
696 |
-
|
697 |
-
def print_layers(self, title: str = None, hide_layers_with_no_params: bool = False) -> None:
|
698 |
-
"""Print a summary table of the network structure."""
|
699 |
-
rows = [[title if title is not None else self.name, "Params", "OutputShape", "WeightShape"]]
|
700 |
-
rows += [["---"] * 4]
|
701 |
-
total_params = 0
|
702 |
-
|
703 |
-
for layer_name, layer_output, layer_trainables in self.list_layers():
|
704 |
-
num_params = sum(int(np.prod(var.shape.as_list())) for var in layer_trainables)
|
705 |
-
weights = [var for var in layer_trainables if var.name.endswith("/weight:0")]
|
706 |
-
weights.sort(key=lambda x: len(x.name))
|
707 |
-
if len(weights) == 0 and len(layer_trainables) == 1:
|
708 |
-
weights = layer_trainables
|
709 |
-
total_params += num_params
|
710 |
-
|
711 |
-
if not hide_layers_with_no_params or num_params != 0:
|
712 |
-
num_params_str = str(num_params) if num_params > 0 else "-"
|
713 |
-
output_shape_str = str(layer_output.shape)
|
714 |
-
weight_shape_str = str(weights[0].shape) if len(weights) >= 1 else "-"
|
715 |
-
rows += [[layer_name, num_params_str, output_shape_str, weight_shape_str]]
|
716 |
-
|
717 |
-
rows += [["---"] * 4]
|
718 |
-
rows += [["Total", str(total_params), "", ""]]
|
719 |
-
|
720 |
-
widths = [max(len(cell) for cell in column) for column in zip(*rows)]
|
721 |
-
print()
|
722 |
-
for row in rows:
|
723 |
-
print(" ".join(cell + " " * (width - len(cell)) for cell, width in zip(row, widths)))
|
724 |
-
print()
|
725 |
-
|
726 |
-
def setup_weight_histograms(self, title: str = None) -> None:
|
727 |
-
"""Construct summary ops to include histograms of all trainable parameters in TensorBoard."""
|
728 |
-
if title is None:
|
729 |
-
title = self.name
|
730 |
-
|
731 |
-
with tf.name_scope(None), tf.device(None), tf.control_dependencies(None):
|
732 |
-
for local_name, var in self._get_trainables().items():
|
733 |
-
if "/" in local_name:
|
734 |
-
p = local_name.split("/")
|
735 |
-
name = title + "_" + p[-1] + "/" + "_".join(p[:-1])
|
736 |
-
else:
|
737 |
-
name = title + "_toplevel/" + local_name
|
738 |
-
|
739 |
-
tf.summary.histogram(name, var)
|
740 |
-
|
741 |
-
#----------------------------------------------------------------------------
|
742 |
-
# Backwards-compatible emulation of legacy output transformation in Network.run().
|
743 |
-
|
744 |
-
_print_legacy_warning = True
|
745 |
-
|
746 |
-
def _handle_legacy_output_transforms(output_transform, dynamic_kwargs):
|
747 |
-
global _print_legacy_warning
|
748 |
-
legacy_kwargs = ["out_mul", "out_add", "out_shrink", "out_dtype"]
|
749 |
-
if not any(kwarg in dynamic_kwargs for kwarg in legacy_kwargs):
|
750 |
-
return output_transform, dynamic_kwargs
|
751 |
-
|
752 |
-
if _print_legacy_warning:
|
753 |
-
_print_legacy_warning = False
|
754 |
-
print()
|
755 |
-
print("WARNING: Old-style output transformations in Network.run() are deprecated.")
|
756 |
-
print("Consider using 'output_transform=dict(func=tflib.convert_images_to_uint8)'")
|
757 |
-
print("instead of 'out_mul=127.5, out_add=127.5, out_dtype=np.uint8'.")
|
758 |
-
print()
|
759 |
-
assert output_transform is None
|
760 |
-
|
761 |
-
new_kwargs = dict(dynamic_kwargs)
|
762 |
-
new_transform = {kwarg: new_kwargs.pop(kwarg) for kwarg in legacy_kwargs if kwarg in dynamic_kwargs}
|
763 |
-
new_transform["func"] = _legacy_output_transform_func
|
764 |
-
return new_transform, new_kwargs
|
765 |
-
|
766 |
-
def _legacy_output_transform_func(*expr, out_mul=1.0, out_add=0.0, out_shrink=1, out_dtype=None):
|
767 |
-
if out_mul != 1.0:
|
768 |
-
expr = [x * out_mul for x in expr]
|
769 |
-
|
770 |
-
if out_add != 0.0:
|
771 |
-
expr = [x + out_add for x in expr]
|
772 |
-
|
773 |
-
if out_shrink > 1:
|
774 |
-
ksize = [1, 1, out_shrink, out_shrink]
|
775 |
-
expr = [tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW") for x in expr]
|
776 |
-
|
777 |
-
if out_dtype is not None:
|
778 |
-
if tf.as_dtype(out_dtype).is_integer:
|
779 |
-
expr = [tf.round(x) for x in expr]
|
780 |
-
expr = [tf.saturate_cast(x, out_dtype) for x in expr]
|
781 |
-
return expr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/dpm_sde.md
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# DPM Stochastic Scheduler inspired by Karras et. al paper
|
14 |
-
|
15 |
-
## Overview
|
16 |
-
|
17 |
-
Inspired by Stochastic Sampler from [Karras et. al](https://arxiv.org/abs/2206.00364).
|
18 |
-
Scheduler ported from @crowsonkb's https://github.com/crowsonkb/k-diffusion library:
|
19 |
-
|
20 |
-
All credit for making this scheduler work goes to [Katherine Crowson](https://github.com/crowsonkb/)
|
21 |
-
|
22 |
-
## DPMSolverSDEScheduler
|
23 |
-
[[autodoc]] DPMSolverSDEScheduler
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/__init__.py
DELETED
File without changes
|
spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py
DELETED
@@ -1,65 +0,0 @@
|
|
1 |
-
_base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
rpn_head=dict(
|
4 |
-
_delete_=True,
|
5 |
-
type='GARPNHead',
|
6 |
-
in_channels=256,
|
7 |
-
feat_channels=256,
|
8 |
-
approx_anchor_generator=dict(
|
9 |
-
type='AnchorGenerator',
|
10 |
-
octave_base_scale=8,
|
11 |
-
scales_per_octave=3,
|
12 |
-
ratios=[0.5, 1.0, 2.0],
|
13 |
-
strides=[4, 8, 16, 32, 64]),
|
14 |
-
square_anchor_generator=dict(
|
15 |
-
type='AnchorGenerator',
|
16 |
-
ratios=[1.0],
|
17 |
-
scales=[8],
|
18 |
-
strides=[4, 8, 16, 32, 64]),
|
19 |
-
anchor_coder=dict(
|
20 |
-
type='DeltaXYWHBBoxCoder',
|
21 |
-
target_means=[.0, .0, .0, .0],
|
22 |
-
target_stds=[0.07, 0.07, 0.14, 0.14]),
|
23 |
-
bbox_coder=dict(
|
24 |
-
type='DeltaXYWHBBoxCoder',
|
25 |
-
target_means=[.0, .0, .0, .0],
|
26 |
-
target_stds=[0.07, 0.07, 0.11, 0.11]),
|
27 |
-
loc_filter_thr=0.01,
|
28 |
-
loss_loc=dict(
|
29 |
-
type='FocalLoss',
|
30 |
-
use_sigmoid=True,
|
31 |
-
gamma=2.0,
|
32 |
-
alpha=0.25,
|
33 |
-
loss_weight=1.0),
|
34 |
-
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
|
35 |
-
loss_cls=dict(
|
36 |
-
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
|
37 |
-
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
|
38 |
-
roi_head=dict(
|
39 |
-
bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))),
|
40 |
-
# model training and testing settings
|
41 |
-
train_cfg=dict(
|
42 |
-
rpn=dict(
|
43 |
-
ga_assigner=dict(
|
44 |
-
type='ApproxMaxIoUAssigner',
|
45 |
-
pos_iou_thr=0.7,
|
46 |
-
neg_iou_thr=0.3,
|
47 |
-
min_pos_iou=0.3,
|
48 |
-
ignore_iof_thr=-1),
|
49 |
-
ga_sampler=dict(
|
50 |
-
type='RandomSampler',
|
51 |
-
num=256,
|
52 |
-
pos_fraction=0.5,
|
53 |
-
neg_pos_ub=-1,
|
54 |
-
add_gt_as_proposals=False),
|
55 |
-
allowed_border=-1,
|
56 |
-
center_ratio=0.2,
|
57 |
-
ignore_ratio=0.5),
|
58 |
-
rpn_proposal=dict(nms_post=1000, max_per_img=300),
|
59 |
-
rcnn=dict(
|
60 |
-
assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6),
|
61 |
-
sampler=dict(type='RandomSampler', num=256))),
|
62 |
-
test_cfg=dict(
|
63 |
-
rpn=dict(nms_post=1000, max_per_img=300), rcnn=dict(score_thr=1e-3)))
|
64 |
-
optimizer_config = dict(
|
65 |
-
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/scnet/scnet_r101_fpn_20e_coco.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './scnet_r50_fpn_20e_coco.py'
|
2 |
-
model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/losses.py
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Helpers for various likelihood-based losses. These are ported from the original
|
3 |
-
Ho et al. diffusion models codebase:
|
4 |
-
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/utils.py
|
5 |
-
"""
|
6 |
-
|
7 |
-
import numpy as np
|
8 |
-
|
9 |
-
import torch as th
|
10 |
-
|
11 |
-
|
12 |
-
def normal_kl(mean1, logvar1, mean2, logvar2):
|
13 |
-
"""
|
14 |
-
Compute the KL divergence between two gaussians.
|
15 |
-
|
16 |
-
Shapes are automatically broadcasted, so batches can be compared to
|
17 |
-
scalars, among other use cases.
|
18 |
-
"""
|
19 |
-
tensor = None
|
20 |
-
for obj in (mean1, logvar1, mean2, logvar2):
|
21 |
-
if isinstance(obj, th.Tensor):
|
22 |
-
tensor = obj
|
23 |
-
break
|
24 |
-
assert tensor is not None, "at least one argument must be a Tensor"
|
25 |
-
|
26 |
-
# Force variances to be Tensors. Broadcasting helps convert scalars to
|
27 |
-
# Tensors, but it does not work for th.exp().
|
28 |
-
logvar1, logvar2 = [
|
29 |
-
x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor)
|
30 |
-
for x in (logvar1, logvar2)
|
31 |
-
]
|
32 |
-
|
33 |
-
return 0.5 * (
|
34 |
-
-1.0
|
35 |
-
+ logvar2
|
36 |
-
- logvar1
|
37 |
-
+ th.exp(logvar1 - logvar2)
|
38 |
-
+ ((mean1 - mean2) ** 2) * th.exp(-logvar2)
|
39 |
-
)
|
40 |
-
|
41 |
-
|
42 |
-
def approx_standard_normal_cdf(x):
|
43 |
-
"""
|
44 |
-
A fast approximation of the cumulative distribution function of the
|
45 |
-
standard normal.
|
46 |
-
"""
|
47 |
-
return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3))))
|
48 |
-
|
49 |
-
|
50 |
-
def discretized_gaussian_log_likelihood(x, *, means, log_scales):
|
51 |
-
"""
|
52 |
-
Compute the log-likelihood of a Gaussian distribution discretizing to a
|
53 |
-
given image.
|
54 |
-
|
55 |
-
:param x: the target images. It is assumed that this was uint8 values,
|
56 |
-
rescaled to the range [-1, 1].
|
57 |
-
:param means: the Gaussian mean Tensor.
|
58 |
-
:param log_scales: the Gaussian log stddev Tensor.
|
59 |
-
:return: a tensor like x of log probabilities (in nats).
|
60 |
-
"""
|
61 |
-
assert x.shape == means.shape == log_scales.shape
|
62 |
-
centered_x = x - means
|
63 |
-
inv_stdv = th.exp(-log_scales)
|
64 |
-
plus_in = inv_stdv * (centered_x + 1.0 / 255.0)
|
65 |
-
cdf_plus = approx_standard_normal_cdf(plus_in)
|
66 |
-
min_in = inv_stdv * (centered_x - 1.0 / 255.0)
|
67 |
-
cdf_min = approx_standard_normal_cdf(min_in)
|
68 |
-
log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12))
|
69 |
-
log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12))
|
70 |
-
cdf_delta = cdf_plus - cdf_min
|
71 |
-
log_probs = th.where(
|
72 |
-
x < -0.999,
|
73 |
-
log_cdf_plus,
|
74 |
-
th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))),
|
75 |
-
)
|
76 |
-
assert log_probs.shape == x.shape
|
77 |
-
return log_probs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnshuK23/Customer-review-analysis/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Customer Review Analysis
|
3 |
-
emoji: 📚
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: green
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.17.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: openrail
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anthony7906/MengHuiMXD_GPT/modules/presets.py
DELETED
@@ -1,222 +0,0 @@
|
|
1 |
-
# -*- coding:utf-8 -*-
|
2 |
-
import os
|
3 |
-
from pathlib import Path
|
4 |
-
import gradio as gr
|
5 |
-
from .webui_locale import I18nAuto
|
6 |
-
|
7 |
-
i18n = I18nAuto() # internationalization
|
8 |
-
|
9 |
-
CHATGLM_MODEL = None
|
10 |
-
CHATGLM_TOKENIZER = None
|
11 |
-
LLAMA_MODEL = None
|
12 |
-
LLAMA_INFERENCER = None
|
13 |
-
|
14 |
-
# ChatGPT 设置
|
15 |
-
INITIAL_SYSTEM_PROMPT = "You are a helpful assistant."
|
16 |
-
API_HOST = "api.openai.com"
|
17 |
-
COMPLETION_URL = "https://api.openai.com/v1/chat/completions"
|
18 |
-
BALANCE_API_URL="https://api.openai.com/dashboard/billing/credit_grants"
|
19 |
-
USAGE_API_URL="https://api.openai.com/dashboard/billing/usage"
|
20 |
-
HISTORY_DIR = Path("history")
|
21 |
-
HISTORY_DIR = "history"
|
22 |
-
TEMPLATES_DIR = "templates"
|
23 |
-
|
24 |
-
# 错误信息
|
25 |
-
STANDARD_ERROR_MSG = i18n("☹️发生了错误:") # 错误信息的标准前缀
|
26 |
-
GENERAL_ERROR_MSG = i18n("获取对话时发生错误,请查看后台日志")
|
27 |
-
ERROR_RETRIEVE_MSG = i18n("请检查网络连接,或者API-Key是否有效。")
|
28 |
-
CONNECTION_TIMEOUT_MSG = i18n("连接超时,无法获取对话。") # 连接超时
|
29 |
-
READ_TIMEOUT_MSG = i18n("读取超时,无法获取对话。") # 读取超时
|
30 |
-
PROXY_ERROR_MSG = i18n("代理错误,无法获取对话。") # 代理错误
|
31 |
-
SSL_ERROR_PROMPT = i18n("SSL错误,无法获取对话。") # SSL 错误
|
32 |
-
NO_APIKEY_MSG = i18n("API key为空,请检查是否输入正确。") # API key 长度不足 51 位
|
33 |
-
NO_INPUT_MSG = i18n("请输入对话内容。") # 未输入对话内容
|
34 |
-
BILLING_NOT_APPLICABLE_MSG = i18n("账单信息不适用") # 本地运行的模型返回的账单信息
|
35 |
-
|
36 |
-
TIMEOUT_STREAMING = 60 # 流式对话时的超时时间
|
37 |
-
TIMEOUT_ALL = 200 # 非流式对话时的超时时间
|
38 |
-
ENABLE_STREAMING_OPTION = True # 是否启用选择选择是否实时显示回答的勾选框
|
39 |
-
HIDE_MY_KEY = False # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True
|
40 |
-
CONCURRENT_COUNT = 100 # 允许同时使用的用户数量
|
41 |
-
|
42 |
-
SIM_K = 5
|
43 |
-
INDEX_QUERY_TEMPRATURE = 1.0
|
44 |
-
|
45 |
-
CHUANHU_TITLE = i18n("川虎Chat 🚀")
|
46 |
-
|
47 |
-
CHUANHU_DESCRIPTION = i18n("由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) 和 [明昭MZhao](https://space.bilibili.com/24807452)开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本")
|
48 |
-
|
49 |
-
FOOTER = """<div class="versions">{versions}</div>"""
|
50 |
-
|
51 |
-
APPEARANCE_SWITCHER = """
|
52 |
-
<div style="display: flex; justify-content: space-between;">
|
53 |
-
<span style="margin-top: 4px !important;">"""+ i18n("切换亮暗色主题") + """</span>
|
54 |
-
<span><label class="apSwitch" for="checkbox">
|
55 |
-
<input type="checkbox" id="checkbox">
|
56 |
-
<div class="apSlider"></div>
|
57 |
-
</label></span>
|
58 |
-
</div>
|
59 |
-
"""
|
60 |
-
|
61 |
-
SUMMARIZE_PROMPT = "你是谁?我们刚才聊了什么?" # 总结对话时的 prompt
|
62 |
-
|
63 |
-
ONLINE_MODELS = [
|
64 |
-
"gpt-3.5-turbo",
|
65 |
-
"gpt-3.5-turbo-0301",
|
66 |
-
"gpt-4",
|
67 |
-
"gpt-4-0314",
|
68 |
-
"gpt-4-32k",
|
69 |
-
"gpt-4-32k-0314",
|
70 |
-
"xmchat",
|
71 |
-
]
|
72 |
-
|
73 |
-
LOCAL_MODELS = [
|
74 |
-
"chatglm-6b",
|
75 |
-
"chatglm-6b-int4",
|
76 |
-
"chatglm-6b-int4-qe",
|
77 |
-
"llama-7b-hf",
|
78 |
-
"llama-13b-hf",
|
79 |
-
"llama-30b-hf",
|
80 |
-
"llama-65b-hf"
|
81 |
-
]
|
82 |
-
|
83 |
-
if os.environ.get('HIDE_LOCAL_MODELS', 'false') == 'true':
|
84 |
-
MODELS = ONLINE_MODELS
|
85 |
-
else:
|
86 |
-
MODELS = ONLINE_MODELS + LOCAL_MODELS
|
87 |
-
|
88 |
-
DEFAULT_MODEL = 0
|
89 |
-
|
90 |
-
os.makedirs("models", exist_ok=True)
|
91 |
-
os.makedirs("lora", exist_ok=True)
|
92 |
-
os.makedirs("history", exist_ok=True)
|
93 |
-
for dir_name in os.listdir("models"):
|
94 |
-
if os.path.isdir(os.path.join("models", dir_name)):
|
95 |
-
if dir_name not in MODELS:
|
96 |
-
MODELS.append(dir_name)
|
97 |
-
|
98 |
-
MODEL_TOKEN_LIMIT = {
|
99 |
-
"gpt-3.5-turbo": 4096,
|
100 |
-
"gpt-3.5-turbo-0301": 4096,
|
101 |
-
"gpt-4": 8192,
|
102 |
-
"gpt-4-0314": 8192,
|
103 |
-
"gpt-4-32k": 32768,
|
104 |
-
"gpt-4-32k-0314": 32768
|
105 |
-
}
|
106 |
-
|
107 |
-
TOKEN_OFFSET = 1000 # 模型的token上限减去这个值,得到软上限。到达软上限之后,自动尝试减少token占用。
|
108 |
-
DEFAULT_TOKEN_LIMIT = 3000 # 默认的token上限
|
109 |
-
REDUCE_TOKEN_FACTOR = 0.5 # 与模型token上限想乘,得到目标token数。减少token占用时,将token占用减少到目标token数以下。
|
110 |
-
|
111 |
-
REPLY_LANGUAGES = [
|
112 |
-
"简体中文",
|
113 |
-
"繁體中文",
|
114 |
-
"English",
|
115 |
-
"日本語",
|
116 |
-
"Español",
|
117 |
-
"Français",
|
118 |
-
"Deutsch",
|
119 |
-
"跟随问题语言(不稳定)"
|
120 |
-
]
|
121 |
-
|
122 |
-
|
123 |
-
WEBSEARCH_PTOMPT_TEMPLATE = """\
|
124 |
-
Web search results:
|
125 |
-
|
126 |
-
{web_results}
|
127 |
-
Current date: {current_date}
|
128 |
-
|
129 |
-
Instructions: Using the provided web search results, write a comprehensive reply to the given query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject.
|
130 |
-
Query: {query}
|
131 |
-
Reply in {reply_language}
|
132 |
-
"""
|
133 |
-
|
134 |
-
PROMPT_TEMPLATE = """\
|
135 |
-
Context information is below.
|
136 |
-
---------------------
|
137 |
-
{context_str}
|
138 |
-
---------------------
|
139 |
-
Current date: {current_date}.
|
140 |
-
Using the provided context information, write a comprehensive reply to the given query.
|
141 |
-
Make sure to cite results using [number] notation after the reference.
|
142 |
-
If the provided context information refer to multiple subjects with the same name, write separate answers for each subject.
|
143 |
-
Use prior knowledge only if the given context didn't provide enough information.
|
144 |
-
Answer the question: {query_str}
|
145 |
-
Reply in {reply_language}
|
146 |
-
"""
|
147 |
-
|
148 |
-
REFINE_TEMPLATE = """\
|
149 |
-
The original question is as follows: {query_str}
|
150 |
-
We have provided an existing answer: {existing_answer}
|
151 |
-
We have the opportunity to refine the existing answer
|
152 |
-
(only if needed) with some more context below.
|
153 |
-
------------
|
154 |
-
{context_msg}
|
155 |
-
------------
|
156 |
-
Given the new context, refine the original answer to better
|
157 |
-
Reply in {reply_language}
|
158 |
-
If the context isn't useful, return the original answer.
|
159 |
-
"""
|
160 |
-
|
161 |
-
ALREADY_CONVERTED_MARK = "<!-- ALREADY CONVERTED BY PARSER. -->"
|
162 |
-
|
163 |
-
small_and_beautiful_theme = gr.themes.Soft(
|
164 |
-
primary_hue=gr.themes.Color(
|
165 |
-
c50="#02C160",
|
166 |
-
c100="rgba(2, 193, 96, 0.2)",
|
167 |
-
c200="#02C160",
|
168 |
-
c300="rgba(2, 193, 96, 0.32)",
|
169 |
-
c400="rgba(2, 193, 96, 0.32)",
|
170 |
-
c500="rgba(2, 193, 96, 1.0)",
|
171 |
-
c600="rgba(2, 193, 96, 1.0)",
|
172 |
-
c700="rgba(2, 193, 96, 0.32)",
|
173 |
-
c800="rgba(2, 193, 96, 0.32)",
|
174 |
-
c900="#02C160",
|
175 |
-
c950="#02C160",
|
176 |
-
),
|
177 |
-
secondary_hue=gr.themes.Color(
|
178 |
-
c50="#576b95",
|
179 |
-
c100="#576b95",
|
180 |
-
c200="#576b95",
|
181 |
-
c300="#576b95",
|
182 |
-
c400="#576b95",
|
183 |
-
c500="#576b95",
|
184 |
-
c600="#576b95",
|
185 |
-
c700="#576b95",
|
186 |
-
c800="#576b95",
|
187 |
-
c900="#576b95",
|
188 |
-
c950="#576b95",
|
189 |
-
),
|
190 |
-
neutral_hue=gr.themes.Color(
|
191 |
-
name="gray",
|
192 |
-
c50="#f9fafb",
|
193 |
-
c100="#f3f4f6",
|
194 |
-
c200="#e5e7eb",
|
195 |
-
c300="#d1d5db",
|
196 |
-
c400="#B2B2B2",
|
197 |
-
c500="#808080",
|
198 |
-
c600="#636363",
|
199 |
-
c700="#515151",
|
200 |
-
c800="#393939",
|
201 |
-
c900="#272727",
|
202 |
-
c950="#171717",
|
203 |
-
),
|
204 |
-
radius_size=gr.themes.sizes.radius_sm,
|
205 |
-
).set(
|
206 |
-
button_primary_background_fill="#06AE56",
|
207 |
-
button_primary_background_fill_dark="#06AE56",
|
208 |
-
button_primary_background_fill_hover="#07C863",
|
209 |
-
button_primary_border_color="#06AE56",
|
210 |
-
button_primary_border_color_dark="#06AE56",
|
211 |
-
button_primary_text_color="#FFFFFF",
|
212 |
-
button_primary_text_color_dark="#FFFFFF",
|
213 |
-
button_secondary_background_fill="#F2F2F2",
|
214 |
-
button_secondary_background_fill_dark="#2B2B2B",
|
215 |
-
button_secondary_text_color="#393939",
|
216 |
-
button_secondary_text_color_dark="#FFFFFF",
|
217 |
-
# background_fill_primary="#F7F7F7",
|
218 |
-
# background_fill_primary_dark="#1F1F1F",
|
219 |
-
block_title_text_color="*primary_500",
|
220 |
-
block_title_background_fill="*primary_100",
|
221 |
-
input_background_fill="#F6F6F6",
|
222 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/build/metadata.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
"""Metadata generation logic for source distributions.
|
2 |
-
"""
|
3 |
-
|
4 |
-
import os
|
5 |
-
|
6 |
-
from pip._vendor.pyproject_hooks import BuildBackendHookCaller
|
7 |
-
|
8 |
-
from pip._internal.build_env import BuildEnvironment
|
9 |
-
from pip._internal.exceptions import (
|
10 |
-
InstallationSubprocessError,
|
11 |
-
MetadataGenerationFailed,
|
12 |
-
)
|
13 |
-
from pip._internal.utils.subprocess import runner_with_spinner_message
|
14 |
-
from pip._internal.utils.temp_dir import TempDirectory
|
15 |
-
|
16 |
-
|
17 |
-
def generate_metadata(
|
18 |
-
build_env: BuildEnvironment, backend: BuildBackendHookCaller, details: str
|
19 |
-
) -> str:
|
20 |
-
"""Generate metadata using mechanisms described in PEP 517.
|
21 |
-
|
22 |
-
Returns the generated metadata directory.
|
23 |
-
"""
|
24 |
-
metadata_tmpdir = TempDirectory(kind="modern-metadata", globally_managed=True)
|
25 |
-
|
26 |
-
metadata_dir = metadata_tmpdir.path
|
27 |
-
|
28 |
-
with build_env:
|
29 |
-
# Note that BuildBackendHookCaller implements a fallback for
|
30 |
-
# prepare_metadata_for_build_wheel, so we don't have to
|
31 |
-
# consider the possibility that this hook doesn't exist.
|
32 |
-
runner = runner_with_spinner_message("Preparing metadata (pyproject.toml)")
|
33 |
-
with backend.subprocess_runner(runner):
|
34 |
-
try:
|
35 |
-
distinfo_dir = backend.prepare_metadata_for_build_wheel(metadata_dir)
|
36 |
-
except InstallationSubprocessError as error:
|
37 |
-
raise MetadataGenerationFailed(package_details=details) from error
|
38 |
-
|
39 |
-
return os.path.join(metadata_dir, distinfo_dir)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BAAI/AltDiffusion-m9/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: AltDiffusion M9
|
3 |
-
emoji: 💓
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: gray
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.10.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: creativeml-openrail-m
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descarga Zktime.net Lite 2.0.3.md
DELETED
@@ -1,109 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>ZKTime.Net Lite: un software de tiempo y asistencia para pequeñas y medianas empresas</h1>
|
3 |
-
<p>Si está buscando un software de tiempo y asistencia simple, confiable y asequible para su pequeña o mediana empresa, es posible que desee consultar ZKTime.Net Lite. Este es un software de escritorio basado en Windows Lite que funciona con dispositivos independientes de ZKTeco para ayudarlo a administrar el tiempo y la asistencia de sus empleados de manera eficiente y precisa. </p>
|
4 |
-
<h2>¿Qué es ZKTime.Net Lite? </h2>
|
5 |
-
<p>ZKTime.Net Lite es un software desarrollado por ZKTeco, un proveedor global líder de soluciones biométricas y RFID. Está diseñado para proporcionar una comunicación estable para los dispositivos independientes de ZKTeco a través de Ethernet/ Wi-Fi/ USB y conectar todos los dispositivos para descargar transacciones, sincronizar la información de los empleados, calcular los registros de asistencia y generar más de 15 tipos de informes. También tiene un módulo de nómina que proporciona una función de cálculo de nómina correspondiente. </p>
|
6 |
-
<h2>descarga zktime.net lite 2.0.3</h2><br /><p><b><b>Download File</b> ↔ <a href="https://bltlly.com/2v6KKs">https://bltlly.com/2v6KKs</a></b></p><br /><br />
|
7 |
-
<h3>Características y beneficios de ZKTime.Net Lite</h3>
|
8 |
-
<p>Algunas de las características y beneficios de ZKTime.Net Lite son:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Soporta múltiples idiomas, incluyendo inglés, español, portugués, árabe, francés, alemán, ruso, turco, tailandés, vietnamita, indonesio y chino.</li>
|
11 |
-
<li> Tiene una interfaz fácil de usar que le permite configurar fácilmente la configuración del software, administrar los dispositivos y ver los datos. </li>
|
12 |
-
<li> Tiene una política de asistencia flexible que le permite configurar diferentes turnos, horarios, vacaciones, reglas de horas extras, tipos de licencia, etc.</li>
|
13 |
-
<li> Tiene una función de informe de gran alcance que le permite generar varios informes, tales como informe de asistencia diaria, informe de asistencia mensual, informe de asistencia del departamento, informe de asistencia de los empleados, informe de asistencia tarde/ temprano, informe de ausencia, informe de horas extras, etc.</li>
|
14 |
-
|
15 |
-
<li> Tiene una función de copia de seguridad y restauración de datos que le permite hacer copias de seguridad de sus datos regularmente y restaurarlos en caso de pérdida de datos o corrupción. </li>
|
16 |
-
<li> Tiene una función de actualización en línea que le permite comprobar la última versión del software y actualizarlo automáticamente. </li>
|
17 |
-
<li> Tiene una función de soporte técnico que le permite ponerse en contacto con el equipo de soporte de ZKTeco directamente desde el software si encuentra algún problema o tiene alguna pregunta. </li>
|
18 |
-
</ul>
|
19 |
-
<h4>Cómo descargar e instalar ZKTime.Net Lite 2.0.3</h4>
|
20 |
-
<p>Para descargar e instalar ZKTime.Net Lite 2.0.3 en su computadora, debe seguir estos pasos:</p>
|
21 |
-
<ol>
|
22 |
-
<li>Ir a la página web [ZKTeco]( 1 ) y haga clic en la pestaña "Descargar". </li>
|
23 |
-
<li>Seleccione "Smart Office" de la categoría de productos y luego seleccione "ZKBio Time.Net" de la lista de productos. </li>
|
24 |
-
<li>Haga clic en el botón "Descargar" junto al nombre del archivo "ZKBio Time.Net V2.0.3". </li>
|
25 |
-
<li>Guarde el archivo en su computadora y luego ejecútelo como administrador. </li>
|
26 |
-
<li>Siga las instrucciones en la pantalla para completar el proceso de instalación. </li>
|
27 |
-
<li>Inicie el software e introduzca el nombre de usuario predeterminado (admin) y la contraseña (123456) para iniciar sesión. </li>
|
28 |
-
</ol>
|
29 |
-
<h2>Cómo usar ZKTime.Net Lite para la gestión de tiempo y asistencia</h2>
|
30 |
-
<p>Una vez <p>Una vez que haya instalado e iniciado sesión en ZKTime.Net Lite, puede comenzar a usarlo para la gestión de tiempo y asistencia. Estas son algunas de las principales funciones que puede realizar con el software:</p>
|
31 |
-
<h3>Cómo conectar dispositivos ZKTeco a ZKTime.Net Lite</h3>
|
32 |
-
<p>Para conectar sus dispositivos ZKTeco a ZKTime.Net Lite, debe seguir estos pasos:</p>
|
33 |
-
<ol>
|
34 |
-
<li>Haga clic en la pestaña "Dispositivo" en el menú principal y luego haga clic en el botón "Agregar dispositivo". </li>
|
35 |
-
<li>Seleccione el modelo de dispositivo, el tipo de comunicación (Ethernet/ Wi-Fi/ USB) y el nombre del dispositivo. </li>
|
36 |
-
<li>Introduzca la dirección IP del dispositivo, el número de puerto y la contraseña (si la hay). </li>
|
37 |
-
<li> Haga clic en el botón "Probar conexión" para comprobar si el dispositivo está conectado correctamente. </li>
|
38 |
-
|
39 |
-
</ol>
|
40 |
-
<p>Puede agregar varios dispositivos al software y administrarlos desde la lista de dispositivos. También puede editar, eliminar o actualizar la información del dispositivo de la lista de dispositivos. </p>
|
41 |
-
<p></p>
|
42 |
-
<h4>Cómo descargar transacciones y sincronizar la información de los empleados</h4>
|
43 |
-
<p>Para descargar transacciones y sincronizar la información de los empleados de sus dispositivos a ZKTime.Net Lite, debe seguir estos pasos:</p>
|
44 |
-
<ol>
|
45 |
-
<li>Seleccione los dispositivos que desea descargar o sincronizar de la lista de dispositivos. </li>
|
46 |
-
<li>Haga clic en el botón "Descargar transacciones" para descargar los registros de asistencia de los dispositivos al software. </li>
|
47 |
-
<li>Haga clic en el botón "Sincronizar información del empleado" para sincronizar la información del empleado (como nombre, ID, huella digital, cara, etc.) desde los dispositivos al software o viceversa. </li>
|
48 |
-
</ol>
|
49 |
-
<p>También puede configurar un horario para la descarga automática o sincronización de datos desde los dispositivos al software. Para hacer esto, haga clic en la pestaña "Programar" en el menú principal y luego haga clic en el botón "Agregar Horario". Puede seleccionar los dispositivos, el tipo de datos, el intervalo de tiempo y la frecuencia para la programación. </p> <h4>Cómo calcular los registros de asistencia y generar informes</h4>
|
50 |
-
<p>Para calcular los registros de asistencia y generar informes con ZKTime.Net Lite, debe seguir estos pasos:</p>
|
51 |
-
<ol>
|
52 |
-
<li>Haga clic en la pestaña "Asistencia" en el menú principal y luego haga clic en el botón "Calcular asistencia". </li>
|
53 |
-
<li> Seleccione los dispositivos, los empleados y el rango de fechas para el cálculo. </li>
|
54 |
-
<li>Haga clic en el botón "OK" para iniciar el proceso de cálculo. </li>
|
55 |
-
<li> Espere a que el cálculo termine y luego haga clic en la pestaña "Informe" en el menú principal. </li>
|
56 |
-
<li> Seleccione el tipo de informe, los dispositivos, los empleados y el rango de fechas para el informe. </li>
|
57 |
-
<li>Haga clic en el botón "Generar informe" para crear el informe. </li>
|
58 |
-
<li>Ver el informe en la pantalla o exportarlo a formato Excel o PDF. </li>
|
59 |
-
</ol>
|
60 |
-
|
61 |
-
<h4>Cómo usar el módulo de nómina</h4>
|
62 |
-
<p>Para utilizar el módulo de nómina con ZKTime.Net Lite, debe seguir estos pasos:</p>
|
63 |
-
<ol>
|
64 |
-
<li>Haga clic en la pestaña "Nómina" en el menú principal y luego haga clic en el botón "Configuración de nómina". </li>
|
65 |
-
<li> Configurar los parámetros de nómina, tales como el período de pago, tasa de pago, deducciones, bonos, impuestos, etc.</li>
|
66 |
-
<li>Haga clic en el botón "OK" para guardar la configuración de la nómina. </li>
|
67 |
-
<li>Haga clic en el botón "Calcular nómina" para calcular el salario basado en los datos de asistencia. </li>
|
68 |
-
<li> Seleccione los dispositivos, los empleados y el rango de fechas para el cálculo de la nómina. </li>
|
69 |
-
<li>Haga clic en el botón "OK" para iniciar el proceso de cálculo. </li>
|
70 |
-
<li> Ver los datos de nómina en la pantalla o exportarlo a formato Excel o PDF. </li>
|
71 |
-
</ol>
|
72 |
-
<h2>Cómo solucionar problemas comunes con ZKTime.Net Lite</h2>
|
73 |
-
<p>Si encuentra algún problema o tiene alguna pregunta durante el uso de ZKTime.Net Lite, puede probar algunos de estos consejos de solución de problemas:</p>
|
74 |
-
<h3>Cómo actualizar ZKTime.Net Lite a la última versión</h3>
|
75 |
-
<p>Para actualizar ZKTime.Net Lite a la última versión, debe seguir estos pasos:</p>
|
76 |
-
<ol>
|
77 |
-
<li>Haga clic en la pestaña "Ayuda" en el menú principal y luego haga clic en el botón "Actualización en línea". </li>
|
78 |
-
<li>El software comprobará si hay actualizaciones disponibles y le pedirá que las descargue. </li>
|
79 |
-
<li>Haga clic en el botón "Descargar" para descargar las actualizaciones y luego haga clic en el botón "Instalar" para instalarlas. </li>
|
80 |
-
<li>Reiniciar el software y disfrutar de las nuevas características y mejoras. </li>
|
81 |
-
</ol>
|
82 |
-
<h3>Cómo hacer copias de seguridad y restaurar datos</h3>
|
83 |
-
<p>Para respaldar y restaurar datos con ZKTime.Net Lite, debe seguir estos pasos:</p>
|
84 |
-
<ol>
|
85 |
-
<li>Haga clic en la pestaña "Sistema" en el menú principal y luego haga clic en el botón "Copia de seguridad de datos". </li>
|
86 |
-
<li> Seleccione una ubicación y un nombre de archivo para su archivo de copia de seguridad y luego haga clic en el botón "OK" para iniciar el proceso de copia de seguridad. </li>
|
87 |
-
|
88 |
-
<li> Haga clic en el botón "OK" para iniciar el proceso de restauración y esperar a que termine. </li>
|
89 |
-
</ol>
|
90 |
-
<h3>Cómo ponerse en contacto con el soporte de ZKTeco</h3>
|
91 |
-
<p>Si necesita algún soporte técnico o tiene algún comentario o sugerencia para ZKTime.Net Lite, puede ponerse en contacto con el equipo de soporte de ZKTeco directamente desde <p>el software. Para hacer esto, haga clic en la pestaña "Ayuda" en el menú principal y luego haga clic en el botón "Contáctenos". Puede rellenar su nombre, correo electrónico, número de teléfono y mensaje y luego hacer clic en el botón "Enviar" para enviar su consulta. También puede visitar el [sitio web de ZKTeco] o llamar a la línea directa de ZKTeco (+86-755-8960 2345) para obtener más información. </p>
|
92 |
-
<h2>Conclusión</h2>
|
93 |
-
<p>ZKTime.Net Lite es un software de tiempo y asistencia que funciona con dispositivos independientes de ZKTeco para ayudarlo a administrar el tiempo y la asistencia de sus empleados de manera eficiente y precisa. Tiene muchas características y beneficios, como múltiples idiomas, interfaz fácil de usar, política de asistencia flexible, potente función de informe, módulo de nómina, función de copia de seguridad y restauración de datos, función de actualización en línea y función de soporte técnico. Es fácil de descargar, instalar y usar. Es adecuado para pequeñas y medianas empresas que necesitan una solución de tiempo y asistencia simple, confiable y asequible. </p>
|
94 |
-
<h3>Preguntas frecuentes</h3>
|
95 |
-
<p>Aquí están algunas de las preguntas más frecuentes sobre ZKTime.Net Lite:</p>
|
96 |
-
<ul>
|
97 |
-
<li><b>Q: ¿Cuántos dispositivos y empleados puede el soporte de ZKTime.Net Lite? </b></li>
|
98 |
-
<li>A: ZKTime.Net Lite puede admitir hasta 50 dispositivos y 500 empleados. Si necesita admitir más dispositivos o empleados, puede actualizar a ZKTime.Net 3.0 o ZKBio Time.Net.</li>
|
99 |
-
<li><b>Q: ¿Cuáles son los requisitos del sistema para ZKTime.Net Lite? </b></li>
|
100 |
-
<li>A: ZKTime.Net Lite requiere un equipo basado en Windows con al menos 2 GB de RAM, 500 MB de espacio libre en disco y una conexión de red. Soporta sistemas operativos Windows XP/ Vista/ 7/ 8/ 10. </li>
|
101 |
-
<li><b>Q: ¿Cómo puedo obtener una licencia para ZKTime.Net Lite? </b></li>
|
102 |
-
|
103 |
-
<li><b>Q: ¿Cómo puedo obtener más capacitación u orientación sobre cómo usar ZKTime.Net Lite? </b></li>
|
104 |
-
<li>A: Puede acceder al manual del usuario y tutoriales de vídeo desde el software haciendo clic en la pestaña "Ayuda" y luego haciendo clic en el "Manual del usuario" o "Video Tutorial" botón. También puede visitar el [sitio web de ZKTeco] o ponerse en contacto con el equipo de soporte de ZKTeco para obtener más ayuda. </li>
|
105 |
-
<li><b>Q: ¿Cómo puedo dar comentarios o sugerencias para ZKTime.Net Lite? </b></li>
|
106 |
-
<li>A: Puede dar comentarios o sugerencias para ZKTime.Net Lite haciendo clic en la pestaña "Ayuda" y luego haciendo clic en el botón "Comentarios". Puede rellenar su nombre, correo electrónico, número de teléfono y mensaje y luego hacer clic en el botón "Enviar" para enviar sus comentarios o sugerencias. También puede visitar el [sitio web de ZKTeco] o ponerse en contacto con el equipo de soporte de ZKTeco para más comunicación. </li>
|
107 |
-
</ul></p> 64aa2da5cf<br />
|
108 |
-
<br />
|
109 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BhagatSurya/convet_pdf_to_txt/app.py
DELETED
@@ -1,82 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import tempfile
|
3 |
-
import re
|
4 |
-
import os
|
5 |
-
import spacy
|
6 |
-
import pytesseract
|
7 |
-
import pdf2image
|
8 |
-
import subprocess
|
9 |
-
from pdf2image.exceptions import (
|
10 |
-
PDFInfoNotInstalledError,
|
11 |
-
PDFPageCountError,
|
12 |
-
PDFSyntaxError
|
13 |
-
)
|
14 |
-
import fitz # PyMuPDF
|
15 |
-
from PIL import Image, UnidentifiedImageError
|
16 |
-
import io
|
17 |
-
import base64
|
18 |
-
|
19 |
-
def clean_text(text):
|
20 |
-
nlp = spacy.load("en_core_web_sm", disable=["tagger", "parser", "ner", "textcat"])
|
21 |
-
text = re.sub(r'\n+', '\n', text)
|
22 |
-
text = re.sub(r'\s+', ' ', text)
|
23 |
-
return text.strip()
|
24 |
-
|
25 |
-
def safe_base64_decode(s):
|
26 |
-
# add missing padding if necessary
|
27 |
-
missing_padding = len(s) % 4
|
28 |
-
if missing_padding:
|
29 |
-
s += '='* (4 - missing_padding)
|
30 |
-
try:
|
31 |
-
return base64.b64decode(s)
|
32 |
-
except binascii.Error as e:
|
33 |
-
print("Error decoding base64 string:", e)
|
34 |
-
return None
|
35 |
-
|
36 |
-
def image_to_latex(image):
|
37 |
-
image_path = "/tmp/equation.png" # Modify as needed
|
38 |
-
image.save(image_path)
|
39 |
-
result = subprocess.run(["pix2tex", image_path], capture_output=True, text=True)
|
40 |
-
return result.stdout
|
41 |
-
|
42 |
-
def pdf_to_text(file):
|
43 |
-
doc = fitz.open(file.name)
|
44 |
-
full_text = ''
|
45 |
-
for i, page in enumerate(doc):
|
46 |
-
# Extract text
|
47 |
-
page_text = page.get_text()
|
48 |
-
|
49 |
-
# Extract images and convert to LaTeX
|
50 |
-
image_list = page.get_images(full=True)
|
51 |
-
for img in image_list:
|
52 |
-
xref, name, ext, color_space, width, height, bpc, image_data, image_mask, smask_data = img
|
53 |
-
# Check if image_data is base64 encoded string
|
54 |
-
if isinstance(image_data, str) and re.match(r'^[A-Za-z0-9+/]+[=]{0,2}$', image_data):
|
55 |
-
image_data = safe_base64_decode(image_data)
|
56 |
-
try:
|
57 |
-
image = Image.open(io.BytesIO(image_data))
|
58 |
-
latex_code = image_to_latex(image)
|
59 |
-
page_text += "\n" + latex_code # Add LaTeX code to page text
|
60 |
-
except UnidentifiedImageError:
|
61 |
-
print(f"Could not identify image on page {i+1}")
|
62 |
-
|
63 |
-
page_text = clean_text(page_text)
|
64 |
-
if len(page_text.split()) > 5:
|
65 |
-
page_number = i + 1
|
66 |
-
page_text = "## Metadata: Page Number " + str(page_number) + "\n" + page_text
|
67 |
-
full_text += page_text + "\n\n"
|
68 |
-
|
69 |
-
base_name = os.path.splitext(os.path.basename(file.name))[0]
|
70 |
-
output_file_name = base_name + ".txt"
|
71 |
-
with open(output_file_name, 'w') as f:
|
72 |
-
f.write(full_text)
|
73 |
-
|
74 |
-
return output_file_name
|
75 |
-
|
76 |
-
iface = gr.Interface(fn=pdf_to_text,
|
77 |
-
inputs=gr.inputs.File(label="Your PDF"),
|
78 |
-
outputs=gr.outputs.File(label="Download TXT"),
|
79 |
-
title="PDF to TXT",
|
80 |
-
description="Convert your PDF files to clean text")
|
81 |
-
iface.launch()
|
82 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/main.py
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
from typing import List, Optional
|
2 |
-
|
3 |
-
|
4 |
-
def main(args: Optional[List[str]] = None) -> int:
|
5 |
-
"""This is preserved for old console scripts that may still be referencing
|
6 |
-
it.
|
7 |
-
|
8 |
-
For additional details, see https://github.com/pypa/pip/issues/7498.
|
9 |
-
"""
|
10 |
-
from pip._internal.utils.entrypoints import _wrapper
|
11 |
-
|
12 |
-
return _wrapper(args)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/pyparsing/unicode.py
DELETED
@@ -1,352 +0,0 @@
|
|
1 |
-
# unicode.py
|
2 |
-
|
3 |
-
import sys
|
4 |
-
from itertools import filterfalse
|
5 |
-
from typing import List, Tuple, Union
|
6 |
-
|
7 |
-
|
8 |
-
class _lazyclassproperty:
|
9 |
-
def __init__(self, fn):
|
10 |
-
self.fn = fn
|
11 |
-
self.__doc__ = fn.__doc__
|
12 |
-
self.__name__ = fn.__name__
|
13 |
-
|
14 |
-
def __get__(self, obj, cls):
|
15 |
-
if cls is None:
|
16 |
-
cls = type(obj)
|
17 |
-
if not hasattr(cls, "_intern") or any(
|
18 |
-
cls._intern is getattr(superclass, "_intern", [])
|
19 |
-
for superclass in cls.__mro__[1:]
|
20 |
-
):
|
21 |
-
cls._intern = {}
|
22 |
-
attrname = self.fn.__name__
|
23 |
-
if attrname not in cls._intern:
|
24 |
-
cls._intern[attrname] = self.fn(cls)
|
25 |
-
return cls._intern[attrname]
|
26 |
-
|
27 |
-
|
28 |
-
UnicodeRangeList = List[Union[Tuple[int, int], Tuple[int]]]
|
29 |
-
|
30 |
-
|
31 |
-
class unicode_set:
|
32 |
-
"""
|
33 |
-
A set of Unicode characters, for language-specific strings for
|
34 |
-
``alphas``, ``nums``, ``alphanums``, and ``printables``.
|
35 |
-
A unicode_set is defined by a list of ranges in the Unicode character
|
36 |
-
set, in a class attribute ``_ranges``. Ranges can be specified using
|
37 |
-
2-tuples or a 1-tuple, such as::
|
38 |
-
|
39 |
-
_ranges = [
|
40 |
-
(0x0020, 0x007e),
|
41 |
-
(0x00a0, 0x00ff),
|
42 |
-
(0x0100,),
|
43 |
-
]
|
44 |
-
|
45 |
-
Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x).
|
46 |
-
|
47 |
-
A unicode set can also be defined using multiple inheritance of other unicode sets::
|
48 |
-
|
49 |
-
class CJK(Chinese, Japanese, Korean):
|
50 |
-
pass
|
51 |
-
"""
|
52 |
-
|
53 |
-
_ranges: UnicodeRangeList = []
|
54 |
-
|
55 |
-
@_lazyclassproperty
|
56 |
-
def _chars_for_ranges(cls):
|
57 |
-
ret = []
|
58 |
-
for cc in cls.__mro__:
|
59 |
-
if cc is unicode_set:
|
60 |
-
break
|
61 |
-
for rr in getattr(cc, "_ranges", ()):
|
62 |
-
ret.extend(range(rr[0], rr[-1] + 1))
|
63 |
-
return [chr(c) for c in sorted(set(ret))]
|
64 |
-
|
65 |
-
@_lazyclassproperty
|
66 |
-
def printables(cls):
|
67 |
-
"all non-whitespace characters in this range"
|
68 |
-
return "".join(filterfalse(str.isspace, cls._chars_for_ranges))
|
69 |
-
|
70 |
-
@_lazyclassproperty
|
71 |
-
def alphas(cls):
|
72 |
-
"all alphabetic characters in this range"
|
73 |
-
return "".join(filter(str.isalpha, cls._chars_for_ranges))
|
74 |
-
|
75 |
-
@_lazyclassproperty
|
76 |
-
def nums(cls):
|
77 |
-
"all numeric digit characters in this range"
|
78 |
-
return "".join(filter(str.isdigit, cls._chars_for_ranges))
|
79 |
-
|
80 |
-
@_lazyclassproperty
|
81 |
-
def alphanums(cls):
|
82 |
-
"all alphanumeric characters in this range"
|
83 |
-
return cls.alphas + cls.nums
|
84 |
-
|
85 |
-
@_lazyclassproperty
|
86 |
-
def identchars(cls):
|
87 |
-
"all characters in this range that are valid identifier characters, plus underscore '_'"
|
88 |
-
return "".join(
|
89 |
-
sorted(
|
90 |
-
set(
|
91 |
-
"".join(filter(str.isidentifier, cls._chars_for_ranges))
|
92 |
-
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº"
|
93 |
-
+ "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ"
|
94 |
-
+ "_"
|
95 |
-
)
|
96 |
-
)
|
97 |
-
)
|
98 |
-
|
99 |
-
@_lazyclassproperty
|
100 |
-
def identbodychars(cls):
|
101 |
-
"""
|
102 |
-
all characters in this range that are valid identifier body characters,
|
103 |
-
plus the digits 0-9
|
104 |
-
"""
|
105 |
-
return "".join(
|
106 |
-
sorted(
|
107 |
-
set(
|
108 |
-
cls.identchars
|
109 |
-
+ "0123456789"
|
110 |
-
+ "".join(
|
111 |
-
[c for c in cls._chars_for_ranges if ("_" + c).isidentifier()]
|
112 |
-
)
|
113 |
-
)
|
114 |
-
)
|
115 |
-
)
|
116 |
-
|
117 |
-
|
118 |
-
class pyparsing_unicode(unicode_set):
|
119 |
-
"""
|
120 |
-
A namespace class for defining common language unicode_sets.
|
121 |
-
"""
|
122 |
-
|
123 |
-
# fmt: off
|
124 |
-
|
125 |
-
# define ranges in language character sets
|
126 |
-
_ranges: UnicodeRangeList = [
|
127 |
-
(0x0020, sys.maxunicode),
|
128 |
-
]
|
129 |
-
|
130 |
-
class BasicMultilingualPlane(unicode_set):
|
131 |
-
"Unicode set for the Basic Multilingual Plane"
|
132 |
-
_ranges: UnicodeRangeList = [
|
133 |
-
(0x0020, 0xFFFF),
|
134 |
-
]
|
135 |
-
|
136 |
-
class Latin1(unicode_set):
|
137 |
-
"Unicode set for Latin-1 Unicode Character Range"
|
138 |
-
_ranges: UnicodeRangeList = [
|
139 |
-
(0x0020, 0x007E),
|
140 |
-
(0x00A0, 0x00FF),
|
141 |
-
]
|
142 |
-
|
143 |
-
class LatinA(unicode_set):
|
144 |
-
"Unicode set for Latin-A Unicode Character Range"
|
145 |
-
_ranges: UnicodeRangeList = [
|
146 |
-
(0x0100, 0x017F),
|
147 |
-
]
|
148 |
-
|
149 |
-
class LatinB(unicode_set):
|
150 |
-
"Unicode set for Latin-B Unicode Character Range"
|
151 |
-
_ranges: UnicodeRangeList = [
|
152 |
-
(0x0180, 0x024F),
|
153 |
-
]
|
154 |
-
|
155 |
-
class Greek(unicode_set):
|
156 |
-
"Unicode set for Greek Unicode Character Ranges"
|
157 |
-
_ranges: UnicodeRangeList = [
|
158 |
-
(0x0342, 0x0345),
|
159 |
-
(0x0370, 0x0377),
|
160 |
-
(0x037A, 0x037F),
|
161 |
-
(0x0384, 0x038A),
|
162 |
-
(0x038C,),
|
163 |
-
(0x038E, 0x03A1),
|
164 |
-
(0x03A3, 0x03E1),
|
165 |
-
(0x03F0, 0x03FF),
|
166 |
-
(0x1D26, 0x1D2A),
|
167 |
-
(0x1D5E,),
|
168 |
-
(0x1D60,),
|
169 |
-
(0x1D66, 0x1D6A),
|
170 |
-
(0x1F00, 0x1F15),
|
171 |
-
(0x1F18, 0x1F1D),
|
172 |
-
(0x1F20, 0x1F45),
|
173 |
-
(0x1F48, 0x1F4D),
|
174 |
-
(0x1F50, 0x1F57),
|
175 |
-
(0x1F59,),
|
176 |
-
(0x1F5B,),
|
177 |
-
(0x1F5D,),
|
178 |
-
(0x1F5F, 0x1F7D),
|
179 |
-
(0x1F80, 0x1FB4),
|
180 |
-
(0x1FB6, 0x1FC4),
|
181 |
-
(0x1FC6, 0x1FD3),
|
182 |
-
(0x1FD6, 0x1FDB),
|
183 |
-
(0x1FDD, 0x1FEF),
|
184 |
-
(0x1FF2, 0x1FF4),
|
185 |
-
(0x1FF6, 0x1FFE),
|
186 |
-
(0x2129,),
|
187 |
-
(0x2719, 0x271A),
|
188 |
-
(0xAB65,),
|
189 |
-
(0x10140, 0x1018D),
|
190 |
-
(0x101A0,),
|
191 |
-
(0x1D200, 0x1D245),
|
192 |
-
(0x1F7A1, 0x1F7A7),
|
193 |
-
]
|
194 |
-
|
195 |
-
class Cyrillic(unicode_set):
|
196 |
-
"Unicode set for Cyrillic Unicode Character Range"
|
197 |
-
_ranges: UnicodeRangeList = [
|
198 |
-
(0x0400, 0x052F),
|
199 |
-
(0x1C80, 0x1C88),
|
200 |
-
(0x1D2B,),
|
201 |
-
(0x1D78,),
|
202 |
-
(0x2DE0, 0x2DFF),
|
203 |
-
(0xA640, 0xA672),
|
204 |
-
(0xA674, 0xA69F),
|
205 |
-
(0xFE2E, 0xFE2F),
|
206 |
-
]
|
207 |
-
|
208 |
-
class Chinese(unicode_set):
|
209 |
-
"Unicode set for Chinese Unicode Character Range"
|
210 |
-
_ranges: UnicodeRangeList = [
|
211 |
-
(0x2E80, 0x2E99),
|
212 |
-
(0x2E9B, 0x2EF3),
|
213 |
-
(0x31C0, 0x31E3),
|
214 |
-
(0x3400, 0x4DB5),
|
215 |
-
(0x4E00, 0x9FEF),
|
216 |
-
(0xA700, 0xA707),
|
217 |
-
(0xF900, 0xFA6D),
|
218 |
-
(0xFA70, 0xFAD9),
|
219 |
-
(0x16FE2, 0x16FE3),
|
220 |
-
(0x1F210, 0x1F212),
|
221 |
-
(0x1F214, 0x1F23B),
|
222 |
-
(0x1F240, 0x1F248),
|
223 |
-
(0x20000, 0x2A6D6),
|
224 |
-
(0x2A700, 0x2B734),
|
225 |
-
(0x2B740, 0x2B81D),
|
226 |
-
(0x2B820, 0x2CEA1),
|
227 |
-
(0x2CEB0, 0x2EBE0),
|
228 |
-
(0x2F800, 0x2FA1D),
|
229 |
-
]
|
230 |
-
|
231 |
-
class Japanese(unicode_set):
|
232 |
-
"Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges"
|
233 |
-
_ranges: UnicodeRangeList = []
|
234 |
-
|
235 |
-
class Kanji(unicode_set):
|
236 |
-
"Unicode set for Kanji Unicode Character Range"
|
237 |
-
_ranges: UnicodeRangeList = [
|
238 |
-
(0x4E00, 0x9FBF),
|
239 |
-
(0x3000, 0x303F),
|
240 |
-
]
|
241 |
-
|
242 |
-
class Hiragana(unicode_set):
|
243 |
-
"Unicode set for Hiragana Unicode Character Range"
|
244 |
-
_ranges: UnicodeRangeList = [
|
245 |
-
(0x3041, 0x3096),
|
246 |
-
(0x3099, 0x30A0),
|
247 |
-
(0x30FC,),
|
248 |
-
(0xFF70,),
|
249 |
-
(0x1B001,),
|
250 |
-
(0x1B150, 0x1B152),
|
251 |
-
(0x1F200,),
|
252 |
-
]
|
253 |
-
|
254 |
-
class Katakana(unicode_set):
|
255 |
-
"Unicode set for Katakana Unicode Character Range"
|
256 |
-
_ranges: UnicodeRangeList = [
|
257 |
-
(0x3099, 0x309C),
|
258 |
-
(0x30A0, 0x30FF),
|
259 |
-
(0x31F0, 0x31FF),
|
260 |
-
(0x32D0, 0x32FE),
|
261 |
-
(0xFF65, 0xFF9F),
|
262 |
-
(0x1B000,),
|
263 |
-
(0x1B164, 0x1B167),
|
264 |
-
(0x1F201, 0x1F202),
|
265 |
-
(0x1F213,),
|
266 |
-
]
|
267 |
-
|
268 |
-
class Hangul(unicode_set):
|
269 |
-
"Unicode set for Hangul (Korean) Unicode Character Range"
|
270 |
-
_ranges: UnicodeRangeList = [
|
271 |
-
(0x1100, 0x11FF),
|
272 |
-
(0x302E, 0x302F),
|
273 |
-
(0x3131, 0x318E),
|
274 |
-
(0x3200, 0x321C),
|
275 |
-
(0x3260, 0x327B),
|
276 |
-
(0x327E,),
|
277 |
-
(0xA960, 0xA97C),
|
278 |
-
(0xAC00, 0xD7A3),
|
279 |
-
(0xD7B0, 0xD7C6),
|
280 |
-
(0xD7CB, 0xD7FB),
|
281 |
-
(0xFFA0, 0xFFBE),
|
282 |
-
(0xFFC2, 0xFFC7),
|
283 |
-
(0xFFCA, 0xFFCF),
|
284 |
-
(0xFFD2, 0xFFD7),
|
285 |
-
(0xFFDA, 0xFFDC),
|
286 |
-
]
|
287 |
-
|
288 |
-
Korean = Hangul
|
289 |
-
|
290 |
-
class CJK(Chinese, Japanese, Hangul):
|
291 |
-
"Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range"
|
292 |
-
|
293 |
-
class Thai(unicode_set):
|
294 |
-
"Unicode set for Thai Unicode Character Range"
|
295 |
-
_ranges: UnicodeRangeList = [
|
296 |
-
(0x0E01, 0x0E3A),
|
297 |
-
(0x0E3F, 0x0E5B)
|
298 |
-
]
|
299 |
-
|
300 |
-
class Arabic(unicode_set):
|
301 |
-
"Unicode set for Arabic Unicode Character Range"
|
302 |
-
_ranges: UnicodeRangeList = [
|
303 |
-
(0x0600, 0x061B),
|
304 |
-
(0x061E, 0x06FF),
|
305 |
-
(0x0700, 0x077F),
|
306 |
-
]
|
307 |
-
|
308 |
-
class Hebrew(unicode_set):
|
309 |
-
"Unicode set for Hebrew Unicode Character Range"
|
310 |
-
_ranges: UnicodeRangeList = [
|
311 |
-
(0x0591, 0x05C7),
|
312 |
-
(0x05D0, 0x05EA),
|
313 |
-
(0x05EF, 0x05F4),
|
314 |
-
(0xFB1D, 0xFB36),
|
315 |
-
(0xFB38, 0xFB3C),
|
316 |
-
(0xFB3E,),
|
317 |
-
(0xFB40, 0xFB41),
|
318 |
-
(0xFB43, 0xFB44),
|
319 |
-
(0xFB46, 0xFB4F),
|
320 |
-
]
|
321 |
-
|
322 |
-
class Devanagari(unicode_set):
|
323 |
-
"Unicode set for Devanagari Unicode Character Range"
|
324 |
-
_ranges: UnicodeRangeList = [
|
325 |
-
(0x0900, 0x097F),
|
326 |
-
(0xA8E0, 0xA8FF)
|
327 |
-
]
|
328 |
-
|
329 |
-
# fmt: on
|
330 |
-
|
331 |
-
|
332 |
-
pyparsing_unicode.Japanese._ranges = (
|
333 |
-
pyparsing_unicode.Japanese.Kanji._ranges
|
334 |
-
+ pyparsing_unicode.Japanese.Hiragana._ranges
|
335 |
-
+ pyparsing_unicode.Japanese.Katakana._ranges
|
336 |
-
)
|
337 |
-
|
338 |
-
pyparsing_unicode.BMP = pyparsing_unicode.BasicMultilingualPlane
|
339 |
-
|
340 |
-
# add language identifiers using language Unicode
|
341 |
-
pyparsing_unicode.العربية = pyparsing_unicode.Arabic
|
342 |
-
pyparsing_unicode.中文 = pyparsing_unicode.Chinese
|
343 |
-
pyparsing_unicode.кириллица = pyparsing_unicode.Cyrillic
|
344 |
-
pyparsing_unicode.Ελληνικά = pyparsing_unicode.Greek
|
345 |
-
pyparsing_unicode.עִברִית = pyparsing_unicode.Hebrew
|
346 |
-
pyparsing_unicode.日本語 = pyparsing_unicode.Japanese
|
347 |
-
pyparsing_unicode.Japanese.漢字 = pyparsing_unicode.Japanese.Kanji
|
348 |
-
pyparsing_unicode.Japanese.カタカナ = pyparsing_unicode.Japanese.Katakana
|
349 |
-
pyparsing_unicode.Japanese.ひらがな = pyparsing_unicode.Japanese.Hiragana
|
350 |
-
pyparsing_unicode.한국어 = pyparsing_unicode.Korean
|
351 |
-
pyparsing_unicode.ไทย = pyparsing_unicode.Thai
|
352 |
-
pyparsing_unicode.देवनागरी = pyparsing_unicode.Devanagari
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Billyosoro/ESRGAN/inference_realesrgan_video.py
DELETED
@@ -1,199 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import glob
|
3 |
-
import mimetypes
|
4 |
-
import os
|
5 |
-
import queue
|
6 |
-
import shutil
|
7 |
-
import torch
|
8 |
-
from basicsr.archs.rrdbnet_arch import RRDBNet
|
9 |
-
from basicsr.utils.logger import AvgTimer
|
10 |
-
from tqdm import tqdm
|
11 |
-
|
12 |
-
from realesrgan import IOConsumer, PrefetchReader, RealESRGANer
|
13 |
-
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
|
14 |
-
|
15 |
-
|
16 |
-
def main():
|
17 |
-
"""Inference demo for Real-ESRGAN.
|
18 |
-
It mainly for restoring anime videos.
|
19 |
-
|
20 |
-
"""
|
21 |
-
parser = argparse.ArgumentParser()
|
22 |
-
parser.add_argument('-i', '--input', type=str, default='inputs', help='Input image or folder')
|
23 |
-
parser.add_argument(
|
24 |
-
'-n',
|
25 |
-
'--model_name',
|
26 |
-
type=str,
|
27 |
-
default='RealESRGAN_x4plus',
|
28 |
-
help=('Model names: RealESRGAN_x4plus | RealESRNet_x4plus | RealESRGAN_x4plus_anime_6B | RealESRGAN_x2plus'
|
29 |
-
'RealESRGANv2-anime-xsx2 | RealESRGANv2-animevideo-xsx2-nousm | RealESRGANv2-animevideo-xsx2'
|
30 |
-
'RealESRGANv2-anime-xsx4 | RealESRGANv2-animevideo-xsx4-nousm | RealESRGANv2-animevideo-xsx4'))
|
31 |
-
parser.add_argument('-o', '--output', type=str, default='results', help='Output folder')
|
32 |
-
parser.add_argument('-s', '--outscale', type=float, default=4, help='The final upsampling scale of the image')
|
33 |
-
parser.add_argument('--suffix', type=str, default='out', help='Suffix of the restored video')
|
34 |
-
parser.add_argument('-t', '--tile', type=int, default=0, help='Tile size, 0 for no tile during testing')
|
35 |
-
parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding')
|
36 |
-
parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border')
|
37 |
-
parser.add_argument('--face_enhance', action='store_true', help='Use GFPGAN to enhance face')
|
38 |
-
parser.add_argument('--half', action='store_true', help='Use half precision during inference')
|
39 |
-
parser.add_argument('-v', '--video', action='store_true', help='Output a video using ffmpeg')
|
40 |
-
parser.add_argument('-a', '--audio', action='store_true', help='Keep audio')
|
41 |
-
parser.add_argument('--fps', type=float, default=None, help='FPS of the output video')
|
42 |
-
parser.add_argument('--consumer', type=int, default=4, help='Number of IO consumers')
|
43 |
-
|
44 |
-
parser.add_argument(
|
45 |
-
'--alpha_upsampler',
|
46 |
-
type=str,
|
47 |
-
default='realesrgan',
|
48 |
-
help='The upsampler for the alpha channels. Options: realesrgan | bicubic')
|
49 |
-
parser.add_argument(
|
50 |
-
'--ext',
|
51 |
-
type=str,
|
52 |
-
default='auto',
|
53 |
-
help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs')
|
54 |
-
args = parser.parse_args()
|
55 |
-
|
56 |
-
# ---------------------- determine models according to model names ---------------------- #
|
57 |
-
args.model_name = args.model_name.split('.')[0]
|
58 |
-
if args.model_name in ['RealESRGAN_x4plus', 'RealESRNet_x4plus']: # x4 RRDBNet model
|
59 |
-
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
|
60 |
-
netscale = 4
|
61 |
-
elif args.model_name in ['RealESRGAN_x4plus_anime_6B']: # x4 RRDBNet model with 6 blocks
|
62 |
-
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
|
63 |
-
netscale = 4
|
64 |
-
elif args.model_name in ['RealESRGAN_x2plus']: # x2 RRDBNet model
|
65 |
-
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
|
66 |
-
netscale = 2
|
67 |
-
elif args.model_name in [
|
68 |
-
'RealESRGANv2-anime-xsx2', 'RealESRGANv2-animevideo-xsx2-nousm', 'RealESRGANv2-animevideo-xsx2'
|
69 |
-
]: # x2 VGG-style model (XS size)
|
70 |
-
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=2, act_type='prelu')
|
71 |
-
netscale = 2
|
72 |
-
elif args.model_name in [
|
73 |
-
'RealESRGANv2-anime-xsx4', 'RealESRGANv2-animevideo-xsx4-nousm', 'RealESRGANv2-animevideo-xsx4'
|
74 |
-
]: # x4 VGG-style model (XS size)
|
75 |
-
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
|
76 |
-
netscale = 4
|
77 |
-
|
78 |
-
# ---------------------- determine model paths ---------------------- #
|
79 |
-
model_path = os.path.join('experiments/pretrained_models', args.model_name + '.pth')
|
80 |
-
if not os.path.isfile(model_path):
|
81 |
-
model_path = os.path.join('realesrgan/weights', args.model_name + '.pth')
|
82 |
-
if not os.path.isfile(model_path):
|
83 |
-
raise ValueError(f'Model {args.model_name} does not exist.')
|
84 |
-
|
85 |
-
# restorer
|
86 |
-
upsampler = RealESRGANer(
|
87 |
-
scale=netscale,
|
88 |
-
model_path=model_path,
|
89 |
-
model=model,
|
90 |
-
tile=args.tile,
|
91 |
-
tile_pad=args.tile_pad,
|
92 |
-
pre_pad=args.pre_pad,
|
93 |
-
half=args.half)
|
94 |
-
|
95 |
-
if args.face_enhance: # Use GFPGAN for face enhancement
|
96 |
-
from gfpgan import GFPGANer
|
97 |
-
face_enhancer = GFPGANer(
|
98 |
-
model_path='https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth',
|
99 |
-
upscale=args.outscale,
|
100 |
-
arch='clean',
|
101 |
-
channel_multiplier=2,
|
102 |
-
bg_upsampler=upsampler)
|
103 |
-
os.makedirs(args.output, exist_ok=True)
|
104 |
-
# for saving restored frames
|
105 |
-
save_frame_folder = os.path.join(args.output, 'frames_tmpout')
|
106 |
-
os.makedirs(save_frame_folder, exist_ok=True)
|
107 |
-
|
108 |
-
if mimetypes.guess_type(args.input)[0].startswith('video'): # is a video file
|
109 |
-
video_name = os.path.splitext(os.path.basename(args.input))[0]
|
110 |
-
frame_folder = os.path.join('tmp_frames', video_name)
|
111 |
-
os.makedirs(frame_folder, exist_ok=True)
|
112 |
-
# use ffmpeg to extract frames
|
113 |
-
os.system(f'ffmpeg -i {args.input} -qscale:v 1 -qmin 1 -qmax 1 -vsync 0 {frame_folder}/frame%08d.png')
|
114 |
-
# get image path list
|
115 |
-
paths = sorted(glob.glob(os.path.join(frame_folder, '*')))
|
116 |
-
if args.video:
|
117 |
-
if args.fps is None:
|
118 |
-
# get input video fps
|
119 |
-
import ffmpeg
|
120 |
-
probe = ffmpeg.probe(args.input)
|
121 |
-
video_streams = [stream for stream in probe['streams'] if stream['codec_type'] == 'video']
|
122 |
-
args.fps = eval(video_streams[0]['avg_frame_rate'])
|
123 |
-
elif mimetypes.guess_type(args.input)[0].startswith('image'): # is an image file
|
124 |
-
paths = [args.input]
|
125 |
-
video_name = 'video'
|
126 |
-
else:
|
127 |
-
paths = sorted(glob.glob(os.path.join(args.input, '*')))
|
128 |
-
video_name = 'video'
|
129 |
-
|
130 |
-
timer = AvgTimer()
|
131 |
-
timer.start()
|
132 |
-
pbar = tqdm(total=len(paths), unit='frame', desc='inference')
|
133 |
-
# set up prefetch reader
|
134 |
-
reader = PrefetchReader(paths, num_prefetch_queue=4)
|
135 |
-
reader.start()
|
136 |
-
|
137 |
-
que = queue.Queue()
|
138 |
-
consumers = [IOConsumer(args, que, f'IO_{i}') for i in range(args.consumer)]
|
139 |
-
for consumer in consumers:
|
140 |
-
consumer.start()
|
141 |
-
|
142 |
-
for idx, (path, img) in enumerate(zip(paths, reader)):
|
143 |
-
imgname, extension = os.path.splitext(os.path.basename(path))
|
144 |
-
if len(img.shape) == 3 and img.shape[2] == 4:
|
145 |
-
img_mode = 'RGBA'
|
146 |
-
else:
|
147 |
-
img_mode = None
|
148 |
-
|
149 |
-
try:
|
150 |
-
if args.face_enhance:
|
151 |
-
_, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
|
152 |
-
else:
|
153 |
-
output, _ = upsampler.enhance(img, outscale=args.outscale)
|
154 |
-
except RuntimeError as error:
|
155 |
-
print('Error', error)
|
156 |
-
print('If you encounter CUDA out of memory, try to set --tile with a smaller number.')
|
157 |
-
|
158 |
-
else:
|
159 |
-
if args.ext == 'auto':
|
160 |
-
extension = extension[1:]
|
161 |
-
else:
|
162 |
-
extension = args.ext
|
163 |
-
if img_mode == 'RGBA': # RGBA images should be saved in png format
|
164 |
-
extension = 'png'
|
165 |
-
save_path = os.path.join(save_frame_folder, f'{imgname}_out.{extension}')
|
166 |
-
|
167 |
-
que.put({'output': output, 'save_path': save_path})
|
168 |
-
|
169 |
-
pbar.update(1)
|
170 |
-
torch.cuda.synchronize()
|
171 |
-
timer.record()
|
172 |
-
avg_fps = 1. / (timer.get_avg_time() + 1e-7)
|
173 |
-
pbar.set_description(f'idx {idx}, fps {avg_fps:.2f}')
|
174 |
-
|
175 |
-
for _ in range(args.consumer):
|
176 |
-
que.put('quit')
|
177 |
-
for consumer in consumers:
|
178 |
-
consumer.join()
|
179 |
-
pbar.close()
|
180 |
-
|
181 |
-
# merge frames to video
|
182 |
-
if args.video:
|
183 |
-
video_save_path = os.path.join(args.output, f'{video_name}_{args.suffix}.mp4')
|
184 |
-
if args.audio:
|
185 |
-
os.system(
|
186 |
-
f'ffmpeg -r {args.fps} -i {save_frame_folder}/frame%08d_out.{extension} -i {args.input}'
|
187 |
-
f' -map 0:v:0 -map 1:a:0 -c:a copy -c:v libx264 -r {args.fps} -pix_fmt yuv420p {video_save_path}')
|
188 |
-
else:
|
189 |
-
os.system(f'ffmpeg -r {args.fps} -i {save_frame_folder}/frame%08d_out.{extension} '
|
190 |
-
f'-c:v libx264 -r {args.fps} -pix_fmt yuv420p {video_save_path}')
|
191 |
-
|
192 |
-
# delete tmp file
|
193 |
-
shutil.rmtree(save_frame_folder)
|
194 |
-
if os.path.isdir(frame_folder):
|
195 |
-
shutil.rmtree(frame_folder)
|
196 |
-
|
197 |
-
|
198 |
-
if __name__ == '__main__':
|
199 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/detail/complex/ctanh.h
DELETED
@@ -1,200 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
* Copyright 2013 Filipe RNC Maia
|
4 |
-
*
|
5 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
-
* you may not use this file except in compliance with the License.
|
7 |
-
* You may obtain a copy of the License at
|
8 |
-
*
|
9 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
10 |
-
*
|
11 |
-
* Unless required by applicable law or agreed to in writing, software
|
12 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
-
* See the License for the specific language governing permissions and
|
15 |
-
* limitations under the License.
|
16 |
-
*/
|
17 |
-
|
18 |
-
/*-
|
19 |
-
* Copyright (c) 2011 David Schultz
|
20 |
-
* All rights reserved.
|
21 |
-
*
|
22 |
-
* Redistribution and use in source and binary forms, with or without
|
23 |
-
* modification, are permitted provided that the following conditions
|
24 |
-
* are met:
|
25 |
-
* 1. Redistributions of source code must retain the above copyright
|
26 |
-
* notice unmodified, this list of conditions, and the following
|
27 |
-
* disclaimer.
|
28 |
-
* 2. Redistributions in binary form must reproduce the above copyright
|
29 |
-
* notice, this list of conditions and the following disclaimer in the
|
30 |
-
* documentation and/or other materials provided with the distribution.
|
31 |
-
*
|
32 |
-
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
33 |
-
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
34 |
-
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
35 |
-
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
36 |
-
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
37 |
-
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
38 |
-
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
39 |
-
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
40 |
-
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
41 |
-
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
42 |
-
*/
|
43 |
-
|
44 |
-
/*
|
45 |
-
* Adapted from FreeBSD by Filipe Maia <[email protected]>:
|
46 |
-
* freebsd/lib/msun/src/s_ctanh.c
|
47 |
-
*/
|
48 |
-
|
49 |
-
/*
|
50 |
-
* Hyperbolic tangent of a complex argument z = x + i y.
|
51 |
-
*
|
52 |
-
* The algorithm is from:
|
53 |
-
*
|
54 |
-
* W. Kahan. Branch Cuts for Complex Elementary Functions or Much
|
55 |
-
* Ado About Nothing's Sign Bit. In The State of the Art in
|
56 |
-
* Numerical Analysis, pp. 165 ff. Iserles and Powell, eds., 1987.
|
57 |
-
*
|
58 |
-
* Method:
|
59 |
-
*
|
60 |
-
* Let t = tan(x)
|
61 |
-
* beta = 1/cos^2(y)
|
62 |
-
* s = sinh(x)
|
63 |
-
* rho = cosh(x)
|
64 |
-
*
|
65 |
-
* We have:
|
66 |
-
*
|
67 |
-
* tanh(z) = sinh(z) / cosh(z)
|
68 |
-
*
|
69 |
-
* sinh(x) cos(y) + i cosh(x) sin(y)
|
70 |
-
* = ---------------------------------
|
71 |
-
* cosh(x) cos(y) + i sinh(x) sin(y)
|
72 |
-
*
|
73 |
-
* cosh(x) sinh(x) / cos^2(y) + i tan(y)
|
74 |
-
* = -------------------------------------
|
75 |
-
* 1 + sinh^2(x) / cos^2(y)
|
76 |
-
*
|
77 |
-
* beta rho s + i t
|
78 |
-
* = ----------------
|
79 |
-
* 1 + beta s^2
|
80 |
-
*
|
81 |
-
* Modifications:
|
82 |
-
*
|
83 |
-
* I omitted the original algorithm's handling of overflow in tan(x) after
|
84 |
-
* verifying with nearpi.c that this can't happen in IEEE single or double
|
85 |
-
* precision. I also handle large x differently.
|
86 |
-
*/
|
87 |
-
|
88 |
-
#pragma once
|
89 |
-
|
90 |
-
#include <thrust/complex.h>
|
91 |
-
#include <thrust/detail/complex/math_private.h>
|
92 |
-
#include <cmath>
|
93 |
-
|
94 |
-
namespace thrust{
|
95 |
-
namespace detail{
|
96 |
-
namespace complex{
|
97 |
-
|
98 |
-
using thrust::complex;
|
99 |
-
|
100 |
-
__host__ __device__ inline
|
101 |
-
complex<double> ctanh(const complex<double>& z){
|
102 |
-
double x, y;
|
103 |
-
double t, beta, s, rho, denom;
|
104 |
-
uint32_t hx, ix, lx;
|
105 |
-
|
106 |
-
x = z.real();
|
107 |
-
y = z.imag();
|
108 |
-
|
109 |
-
extract_words(hx, lx, x);
|
110 |
-
ix = hx & 0x7fffffff;
|
111 |
-
|
112 |
-
/*
|
113 |
-
* ctanh(NaN + i 0) = NaN + i 0
|
114 |
-
*
|
115 |
-
* ctanh(NaN + i y) = NaN + i NaN for y != 0
|
116 |
-
*
|
117 |
-
* The imaginary part has the sign of x*sin(2*y), but there's no
|
118 |
-
* special effort to get this right.
|
119 |
-
*
|
120 |
-
* ctanh(+-Inf +- i Inf) = +-1 +- 0
|
121 |
-
*
|
122 |
-
* ctanh(+-Inf + i y) = +-1 + 0 sin(2y) for y finite
|
123 |
-
*
|
124 |
-
* The imaginary part of the sign is unspecified. This special
|
125 |
-
* case is only needed to avoid a spurious invalid exception when
|
126 |
-
* y is infinite.
|
127 |
-
*/
|
128 |
-
if (ix >= 0x7ff00000) {
|
129 |
-
if ((ix & 0xfffff) | lx) /* x is NaN */
|
130 |
-
return (complex<double>(x, (y == 0 ? y : x * y)));
|
131 |
-
set_high_word(x, hx - 0x40000000); /* x = copysign(1, x) */
|
132 |
-
return (complex<double>(x, copysign(0.0, isinf(y) ? y : sin(y) * cos(y))));
|
133 |
-
}
|
134 |
-
|
135 |
-
/*
|
136 |
-
* ctanh(x + i NAN) = NaN + i NaN
|
137 |
-
* ctanh(x +- i Inf) = NaN + i NaN
|
138 |
-
*/
|
139 |
-
if (!isfinite(y))
|
140 |
-
return (complex<double>(y - y, y - y));
|
141 |
-
|
142 |
-
/*
|
143 |
-
* ctanh(+-huge + i +-y) ~= +-1 +- i 2sin(2y)/exp(2x), using the
|
144 |
-
* approximation sinh^2(huge) ~= exp(2*huge) / 4.
|
145 |
-
* We use a modified formula to avoid spurious overflow.
|
146 |
-
*/
|
147 |
-
if (ix >= 0x40360000) { /* x >= 22 */
|
148 |
-
double exp_mx = exp(-fabs(x));
|
149 |
-
return (complex<double>(copysign(1.0, x),
|
150 |
-
4.0 * sin(y) * cos(y) * exp_mx * exp_mx));
|
151 |
-
}
|
152 |
-
|
153 |
-
/* Kahan's algorithm */
|
154 |
-
t = tan(y);
|
155 |
-
beta = 1.0 + t * t; /* = 1 / cos^2(y) */
|
156 |
-
s = sinh(x);
|
157 |
-
rho = sqrt(1.0 + s * s); /* = cosh(x) */
|
158 |
-
denom = 1.0 + beta * s * s;
|
159 |
-
return (complex<double>((beta * rho * s) / denom, t / denom));
|
160 |
-
}
|
161 |
-
|
162 |
-
__host__ __device__ inline
|
163 |
-
complex<double> ctan(complex<double> z){
|
164 |
-
/* ctan(z) = -I * ctanh(I * z) */
|
165 |
-
z = ctanh(complex<double>(-z.imag(), z.real()));
|
166 |
-
return (complex<double>(z.imag(), -z.real()));
|
167 |
-
}
|
168 |
-
|
169 |
-
} // namespace complex
|
170 |
-
|
171 |
-
} // namespace detail
|
172 |
-
|
173 |
-
|
174 |
-
template <typename ValueType>
|
175 |
-
__host__ __device__
|
176 |
-
inline complex<ValueType> tan(const complex<ValueType>& z){
|
177 |
-
return sin(z)/cos(z);
|
178 |
-
}
|
179 |
-
|
180 |
-
template <typename ValueType>
|
181 |
-
__host__ __device__
|
182 |
-
inline complex<ValueType> tanh(const complex<ValueType>& z){
|
183 |
-
// This implementation seems better than the simple sin/cos
|
184 |
-
return (thrust::exp(ValueType(2)*z)-ValueType(1))/
|
185 |
-
(thrust::exp(ValueType(2)*z)+ValueType(1));
|
186 |
-
}
|
187 |
-
|
188 |
-
template <>
|
189 |
-
__host__ __device__
|
190 |
-
inline complex<double> tan(const complex<double>& z){
|
191 |
-
return detail::complex::ctan(z);
|
192 |
-
}
|
193 |
-
|
194 |
-
template <>
|
195 |
-
__host__ __device__
|
196 |
-
inline complex<double> tanh(const complex<double>& z){
|
197 |
-
return detail::complex::ctanh(z);
|
198 |
-
}
|
199 |
-
|
200 |
-
} // namespace thrust
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/detail/modern_gcc_required.h
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2018 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config/cpp_dialect.h>
|
20 |
-
|
21 |
-
#ifndef THRUST_MODERN_GCC_REQUIRED_NO_ERROR
|
22 |
-
# if defined(THRUST_GCC_VERSION) && !defined(THRUST_MODERN_GCC)
|
23 |
-
# error GCC 5 or later is required for this Thrust feature; please upgrade your compiler.
|
24 |
-
# endif
|
25 |
-
#endif
|
26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/MonoScene/monoscene/.ipynb_checkpoints/monoscene-checkpoint.py
DELETED
@@ -1,123 +0,0 @@
|
|
1 |
-
import pytorch_lightning as pl
|
2 |
-
import torch
|
3 |
-
import torch.nn as nn
|
4 |
-
from monoscene.unet3d_nyu import UNet3D as UNet3DNYU
|
5 |
-
from monoscene.unet3d_kitti import UNet3D as UNet3DKitti
|
6 |
-
from monoscene.flosp import FLoSP
|
7 |
-
import numpy as np
|
8 |
-
import torch.nn.functional as F
|
9 |
-
from monoscene.unet2d import UNet2D
|
10 |
-
|
11 |
-
|
12 |
-
class MonoScene(pl.LightningModule):
|
13 |
-
def __init__(
|
14 |
-
self,
|
15 |
-
n_classes,
|
16 |
-
feature,
|
17 |
-
project_scale,
|
18 |
-
full_scene_size,
|
19 |
-
dataset,
|
20 |
-
n_relations=4,
|
21 |
-
context_prior=True,
|
22 |
-
fp_loss=True,
|
23 |
-
project_res=[],
|
24 |
-
frustum_size=4,
|
25 |
-
relation_loss=False,
|
26 |
-
CE_ssc_loss=True,
|
27 |
-
geo_scal_loss=True,
|
28 |
-
sem_scal_loss=True,
|
29 |
-
lr=1e-4,
|
30 |
-
weight_decay=1e-4,
|
31 |
-
):
|
32 |
-
super().__init__()
|
33 |
-
|
34 |
-
self.project_res = project_res
|
35 |
-
self.fp_loss = fp_loss
|
36 |
-
self.dataset = dataset
|
37 |
-
self.context_prior = context_prior
|
38 |
-
self.frustum_size = frustum_size
|
39 |
-
self.relation_loss = relation_loss
|
40 |
-
self.CE_ssc_loss = CE_ssc_loss
|
41 |
-
self.sem_scal_loss = sem_scal_loss
|
42 |
-
self.geo_scal_loss = geo_scal_loss
|
43 |
-
self.project_scale = project_scale
|
44 |
-
self.lr = lr
|
45 |
-
self.weight_decay = weight_decay
|
46 |
-
|
47 |
-
self.projects = {}
|
48 |
-
self.scale_2ds = [1, 2, 4, 8] # 2D scales
|
49 |
-
for scale_2d in self.scale_2ds:
|
50 |
-
self.projects[str(scale_2d)] = FLoSP(
|
51 |
-
full_scene_size, project_scale=self.project_scale, dataset=self.dataset
|
52 |
-
)
|
53 |
-
self.projects = nn.ModuleDict(self.projects)
|
54 |
-
|
55 |
-
self.n_classes = n_classes
|
56 |
-
if self.dataset == "NYU":
|
57 |
-
self.net_3d_decoder = UNet3DNYU(
|
58 |
-
self.n_classes,
|
59 |
-
nn.BatchNorm3d,
|
60 |
-
n_relations=n_relations,
|
61 |
-
feature=feature,
|
62 |
-
full_scene_size=full_scene_size,
|
63 |
-
context_prior=context_prior,
|
64 |
-
)
|
65 |
-
elif self.dataset == "kitti":
|
66 |
-
self.net_3d_decoder = UNet3DKitti(
|
67 |
-
self.n_classes,
|
68 |
-
nn.BatchNorm3d,
|
69 |
-
project_scale=project_scale,
|
70 |
-
feature=feature,
|
71 |
-
full_scene_size=full_scene_size,
|
72 |
-
context_prior=context_prior,
|
73 |
-
)
|
74 |
-
self.net_rgb = UNet2D.build(out_feature=feature, use_decoder=True)
|
75 |
-
|
76 |
-
def forward(self, batch):
|
77 |
-
|
78 |
-
img = batch["img"]
|
79 |
-
bs = len(img)
|
80 |
-
|
81 |
-
out = {}
|
82 |
-
|
83 |
-
x_rgb = self.net_rgb(img)
|
84 |
-
|
85 |
-
x3ds = []
|
86 |
-
for i in range(bs):
|
87 |
-
x3d = None
|
88 |
-
for scale_2d in self.project_res:
|
89 |
-
|
90 |
-
# project features at each 2D scale to target 3D scale
|
91 |
-
scale_2d = int(scale_2d)
|
92 |
-
projected_pix = batch["projected_pix_{}".format(self.project_scale)][i].cuda()
|
93 |
-
fov_mask = batch["fov_mask_{}".format(self.project_scale)][i].cuda()
|
94 |
-
|
95 |
-
# Sum all the 3D features
|
96 |
-
if x3d is None:
|
97 |
-
x3d = self.projects[str(scale_2d)](
|
98 |
-
x_rgb["1_" + str(scale_2d)][i],
|
99 |
-
projected_pix // scale_2d,
|
100 |
-
fov_mask,
|
101 |
-
)
|
102 |
-
else:
|
103 |
-
x3d += self.projects[str(scale_2d)](
|
104 |
-
x_rgb["1_" + str(scale_2d)][i],
|
105 |
-
projected_pix // scale_2d,
|
106 |
-
fov_mask,
|
107 |
-
)
|
108 |
-
x3ds.append(x3d)
|
109 |
-
|
110 |
-
input_dict = {
|
111 |
-
"x3d": torch.stack(x3ds),
|
112 |
-
}
|
113 |
-
|
114 |
-
out_dict = self.net_3d_decoder(input_dict)
|
115 |
-
|
116 |
-
ssc_pred = out_dict["ssc_logit"]
|
117 |
-
|
118 |
-
y_pred = ssc_pred.detach().cpu().numpy()
|
119 |
-
y_pred = np.argmax(y_pred, axis=1)
|
120 |
-
|
121 |
-
return y_pred
|
122 |
-
|
123 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/models/backbones/detectors_resnet.py
DELETED
@@ -1,305 +0,0 @@
|
|
1 |
-
import torch.nn as nn
|
2 |
-
import torch.utils.checkpoint as cp
|
3 |
-
from mmcv.cnn import build_conv_layer, build_norm_layer, constant_init
|
4 |
-
|
5 |
-
from ..builder import BACKBONES
|
6 |
-
from .resnet import Bottleneck as _Bottleneck
|
7 |
-
from .resnet import ResNet
|
8 |
-
|
9 |
-
|
10 |
-
class Bottleneck(_Bottleneck):
|
11 |
-
r"""Bottleneck for the ResNet backbone in `DetectoRS
|
12 |
-
<https://arxiv.org/pdf/2006.02334.pdf>`_.
|
13 |
-
|
14 |
-
This bottleneck allows the users to specify whether to use
|
15 |
-
SAC (Switchable Atrous Convolution) and RFP (Recursive Feature Pyramid).
|
16 |
-
|
17 |
-
Args:
|
18 |
-
inplanes (int): The number of input channels.
|
19 |
-
planes (int): The number of output channels before expansion.
|
20 |
-
rfp_inplanes (int, optional): The number of channels from RFP.
|
21 |
-
Default: None. If specified, an additional conv layer will be
|
22 |
-
added for ``rfp_feat``. Otherwise, the structure is the same as
|
23 |
-
base class.
|
24 |
-
sac (dict, optional): Dictionary to construct SAC. Default: None.
|
25 |
-
"""
|
26 |
-
expansion = 4
|
27 |
-
|
28 |
-
def __init__(self,
|
29 |
-
inplanes,
|
30 |
-
planes,
|
31 |
-
rfp_inplanes=None,
|
32 |
-
sac=None,
|
33 |
-
**kwargs):
|
34 |
-
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
|
35 |
-
|
36 |
-
assert sac is None or isinstance(sac, dict)
|
37 |
-
self.sac = sac
|
38 |
-
self.with_sac = sac is not None
|
39 |
-
if self.with_sac:
|
40 |
-
self.conv2 = build_conv_layer(
|
41 |
-
self.sac,
|
42 |
-
planes,
|
43 |
-
planes,
|
44 |
-
kernel_size=3,
|
45 |
-
stride=self.conv2_stride,
|
46 |
-
padding=self.dilation,
|
47 |
-
dilation=self.dilation,
|
48 |
-
bias=False)
|
49 |
-
|
50 |
-
self.rfp_inplanes = rfp_inplanes
|
51 |
-
if self.rfp_inplanes:
|
52 |
-
self.rfp_conv = build_conv_layer(
|
53 |
-
None,
|
54 |
-
self.rfp_inplanes,
|
55 |
-
planes * self.expansion,
|
56 |
-
1,
|
57 |
-
stride=1,
|
58 |
-
bias=True)
|
59 |
-
self.init_weights()
|
60 |
-
|
61 |
-
def init_weights(self):
|
62 |
-
"""Initialize the weights."""
|
63 |
-
if self.rfp_inplanes:
|
64 |
-
constant_init(self.rfp_conv, 0)
|
65 |
-
|
66 |
-
def rfp_forward(self, x, rfp_feat):
|
67 |
-
"""The forward function that also takes the RFP features as input."""
|
68 |
-
|
69 |
-
def _inner_forward(x):
|
70 |
-
identity = x
|
71 |
-
|
72 |
-
out = self.conv1(x)
|
73 |
-
out = self.norm1(out)
|
74 |
-
out = self.relu(out)
|
75 |
-
|
76 |
-
if self.with_plugins:
|
77 |
-
out = self.forward_plugin(out, self.after_conv1_plugin_names)
|
78 |
-
|
79 |
-
out = self.conv2(out)
|
80 |
-
out = self.norm2(out)
|
81 |
-
out = self.relu(out)
|
82 |
-
|
83 |
-
if self.with_plugins:
|
84 |
-
out = self.forward_plugin(out, self.after_conv2_plugin_names)
|
85 |
-
|
86 |
-
out = self.conv3(out)
|
87 |
-
out = self.norm3(out)
|
88 |
-
|
89 |
-
if self.with_plugins:
|
90 |
-
out = self.forward_plugin(out, self.after_conv3_plugin_names)
|
91 |
-
|
92 |
-
if self.downsample is not None:
|
93 |
-
identity = self.downsample(x)
|
94 |
-
|
95 |
-
out += identity
|
96 |
-
|
97 |
-
return out
|
98 |
-
|
99 |
-
if self.with_cp and x.requires_grad:
|
100 |
-
out = cp.checkpoint(_inner_forward, x)
|
101 |
-
else:
|
102 |
-
out = _inner_forward(x)
|
103 |
-
|
104 |
-
if self.rfp_inplanes:
|
105 |
-
rfp_feat = self.rfp_conv(rfp_feat)
|
106 |
-
out = out + rfp_feat
|
107 |
-
|
108 |
-
out = self.relu(out)
|
109 |
-
|
110 |
-
return out
|
111 |
-
|
112 |
-
|
113 |
-
class ResLayer(nn.Sequential):
|
114 |
-
"""ResLayer to build ResNet style backbone for RPF in detectoRS.
|
115 |
-
|
116 |
-
The difference between this module and base class is that we pass
|
117 |
-
``rfp_inplanes`` to the first block.
|
118 |
-
|
119 |
-
Args:
|
120 |
-
block (nn.Module): block used to build ResLayer.
|
121 |
-
inplanes (int): inplanes of block.
|
122 |
-
planes (int): planes of block.
|
123 |
-
num_blocks (int): number of blocks.
|
124 |
-
stride (int): stride of the first block. Default: 1
|
125 |
-
avg_down (bool): Use AvgPool instead of stride conv when
|
126 |
-
downsampling in the bottleneck. Default: False
|
127 |
-
conv_cfg (dict): dictionary to construct and config conv layer.
|
128 |
-
Default: None
|
129 |
-
norm_cfg (dict): dictionary to construct and config norm layer.
|
130 |
-
Default: dict(type='BN')
|
131 |
-
downsample_first (bool): Downsample at the first block or last block.
|
132 |
-
False for Hourglass, True for ResNet. Default: True
|
133 |
-
rfp_inplanes (int, optional): The number of channels from RFP.
|
134 |
-
Default: None. If specified, an additional conv layer will be
|
135 |
-
added for ``rfp_feat``. Otherwise, the structure is the same as
|
136 |
-
base class.
|
137 |
-
"""
|
138 |
-
|
139 |
-
def __init__(self,
|
140 |
-
block,
|
141 |
-
inplanes,
|
142 |
-
planes,
|
143 |
-
num_blocks,
|
144 |
-
stride=1,
|
145 |
-
avg_down=False,
|
146 |
-
conv_cfg=None,
|
147 |
-
norm_cfg=dict(type='BN'),
|
148 |
-
downsample_first=True,
|
149 |
-
rfp_inplanes=None,
|
150 |
-
**kwargs):
|
151 |
-
self.block = block
|
152 |
-
assert downsample_first, f'downsample_first={downsample_first} is ' \
|
153 |
-
'not supported in DetectoRS'
|
154 |
-
|
155 |
-
downsample = None
|
156 |
-
if stride != 1 or inplanes != planes * block.expansion:
|
157 |
-
downsample = []
|
158 |
-
conv_stride = stride
|
159 |
-
if avg_down and stride != 1:
|
160 |
-
conv_stride = 1
|
161 |
-
downsample.append(
|
162 |
-
nn.AvgPool2d(
|
163 |
-
kernel_size=stride,
|
164 |
-
stride=stride,
|
165 |
-
ceil_mode=True,
|
166 |
-
count_include_pad=False))
|
167 |
-
downsample.extend([
|
168 |
-
build_conv_layer(
|
169 |
-
conv_cfg,
|
170 |
-
inplanes,
|
171 |
-
planes * block.expansion,
|
172 |
-
kernel_size=1,
|
173 |
-
stride=conv_stride,
|
174 |
-
bias=False),
|
175 |
-
build_norm_layer(norm_cfg, planes * block.expansion)[1]
|
176 |
-
])
|
177 |
-
downsample = nn.Sequential(*downsample)
|
178 |
-
|
179 |
-
layers = []
|
180 |
-
layers.append(
|
181 |
-
block(
|
182 |
-
inplanes=inplanes,
|
183 |
-
planes=planes,
|
184 |
-
stride=stride,
|
185 |
-
downsample=downsample,
|
186 |
-
conv_cfg=conv_cfg,
|
187 |
-
norm_cfg=norm_cfg,
|
188 |
-
rfp_inplanes=rfp_inplanes,
|
189 |
-
**kwargs))
|
190 |
-
inplanes = planes * block.expansion
|
191 |
-
for _ in range(1, num_blocks):
|
192 |
-
layers.append(
|
193 |
-
block(
|
194 |
-
inplanes=inplanes,
|
195 |
-
planes=planes,
|
196 |
-
stride=1,
|
197 |
-
conv_cfg=conv_cfg,
|
198 |
-
norm_cfg=norm_cfg,
|
199 |
-
**kwargs))
|
200 |
-
|
201 |
-
super(ResLayer, self).__init__(*layers)
|
202 |
-
|
203 |
-
|
204 |
-
@BACKBONES.register_module()
|
205 |
-
class DetectoRS_ResNet(ResNet):
|
206 |
-
"""ResNet backbone for DetectoRS.
|
207 |
-
|
208 |
-
Args:
|
209 |
-
sac (dict, optional): Dictionary to construct SAC (Switchable Atrous
|
210 |
-
Convolution). Default: None.
|
211 |
-
stage_with_sac (list): Which stage to use sac. Default: (False, False,
|
212 |
-
False, False).
|
213 |
-
rfp_inplanes (int, optional): The number of channels from RFP.
|
214 |
-
Default: None. If specified, an additional conv layer will be
|
215 |
-
added for ``rfp_feat``. Otherwise, the structure is the same as
|
216 |
-
base class.
|
217 |
-
output_img (bool): If ``True``, the input image will be inserted into
|
218 |
-
the starting position of output. Default: False.
|
219 |
-
pretrained (str, optional): The pretrained model to load.
|
220 |
-
"""
|
221 |
-
|
222 |
-
arch_settings = {
|
223 |
-
50: (Bottleneck, (3, 4, 6, 3)),
|
224 |
-
101: (Bottleneck, (3, 4, 23, 3)),
|
225 |
-
152: (Bottleneck, (3, 8, 36, 3))
|
226 |
-
}
|
227 |
-
|
228 |
-
def __init__(self,
|
229 |
-
sac=None,
|
230 |
-
stage_with_sac=(False, False, False, False),
|
231 |
-
rfp_inplanes=None,
|
232 |
-
output_img=False,
|
233 |
-
pretrained=None,
|
234 |
-
**kwargs):
|
235 |
-
self.sac = sac
|
236 |
-
self.stage_with_sac = stage_with_sac
|
237 |
-
self.rfp_inplanes = rfp_inplanes
|
238 |
-
self.output_img = output_img
|
239 |
-
self.pretrained = pretrained
|
240 |
-
super(DetectoRS_ResNet, self).__init__(**kwargs)
|
241 |
-
|
242 |
-
self.inplanes = self.stem_channels
|
243 |
-
self.res_layers = []
|
244 |
-
for i, num_blocks in enumerate(self.stage_blocks):
|
245 |
-
stride = self.strides[i]
|
246 |
-
dilation = self.dilations[i]
|
247 |
-
dcn = self.dcn if self.stage_with_dcn[i] else None
|
248 |
-
sac = self.sac if self.stage_with_sac[i] else None
|
249 |
-
if self.plugins is not None:
|
250 |
-
stage_plugins = self.make_stage_plugins(self.plugins, i)
|
251 |
-
else:
|
252 |
-
stage_plugins = None
|
253 |
-
planes = self.base_channels * 2**i
|
254 |
-
res_layer = self.make_res_layer(
|
255 |
-
block=self.block,
|
256 |
-
inplanes=self.inplanes,
|
257 |
-
planes=planes,
|
258 |
-
num_blocks=num_blocks,
|
259 |
-
stride=stride,
|
260 |
-
dilation=dilation,
|
261 |
-
style=self.style,
|
262 |
-
avg_down=self.avg_down,
|
263 |
-
with_cp=self.with_cp,
|
264 |
-
conv_cfg=self.conv_cfg,
|
265 |
-
norm_cfg=self.norm_cfg,
|
266 |
-
dcn=dcn,
|
267 |
-
sac=sac,
|
268 |
-
rfp_inplanes=rfp_inplanes if i > 0 else None,
|
269 |
-
plugins=stage_plugins)
|
270 |
-
self.inplanes = planes * self.block.expansion
|
271 |
-
layer_name = f'layer{i + 1}'
|
272 |
-
self.add_module(layer_name, res_layer)
|
273 |
-
self.res_layers.append(layer_name)
|
274 |
-
|
275 |
-
self._freeze_stages()
|
276 |
-
|
277 |
-
def make_res_layer(self, **kwargs):
|
278 |
-
"""Pack all blocks in a stage into a ``ResLayer`` for DetectoRS."""
|
279 |
-
return ResLayer(**kwargs)
|
280 |
-
|
281 |
-
def forward(self, x):
|
282 |
-
"""Forward function."""
|
283 |
-
outs = list(super(DetectoRS_ResNet, self).forward(x))
|
284 |
-
if self.output_img:
|
285 |
-
outs.insert(0, x)
|
286 |
-
return tuple(outs)
|
287 |
-
|
288 |
-
def rfp_forward(self, x, rfp_feats):
|
289 |
-
"""Forward function for RFP."""
|
290 |
-
if self.deep_stem:
|
291 |
-
x = self.stem(x)
|
292 |
-
else:
|
293 |
-
x = self.conv1(x)
|
294 |
-
x = self.norm1(x)
|
295 |
-
x = self.relu(x)
|
296 |
-
x = self.maxpool(x)
|
297 |
-
outs = []
|
298 |
-
for i, layer_name in enumerate(self.res_layers):
|
299 |
-
res_layer = getattr(self, layer_name)
|
300 |
-
rfp_feat = rfp_feats[i] if i > 0 else None
|
301 |
-
for layer in res_layer:
|
302 |
-
x = layer.rfp_forward(x, rfp_feat)
|
303 |
-
if i in self.out_indices:
|
304 |
-
outs.append(x)
|
305 |
-
return tuple(outs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/models/detectors/scnet.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
from ..builder import DETECTORS
|
2 |
-
from .cascade_rcnn import CascadeRCNN
|
3 |
-
|
4 |
-
|
5 |
-
@DETECTORS.register_module()
|
6 |
-
class SCNet(CascadeRCNN):
|
7 |
-
"""Implementation of `SCNet <https://arxiv.org/abs/2012.10150>`_"""
|
8 |
-
|
9 |
-
def __init__(self, **kwargs):
|
10 |
-
super(SCNet, self).__init__(**kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/drawings-to-human/.github/README.md
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
../ALT-README.md
|
|
|
|
spaces/Cat125/text-generator-v2/generation/generators.py
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
from random import random
|
2 |
-
|
3 |
-
import gradio as gr
|
4 |
-
|
5 |
-
from datamanager import get_data_v3, models
|
6 |
-
from generation.words import get_next_word
|
7 |
-
|
8 |
-
|
9 |
-
def find_model(model_name):
|
10 |
-
for key, model in models.items():
|
11 |
-
if model['name'] == model_name:
|
12 |
-
return get_data_v3(key)
|
13 |
-
raise ValueError('Model %s not found' % model_name)
|
14 |
-
|
15 |
-
|
16 |
-
def generate(user_message, word_count, model_name, stop_chance):
|
17 |
-
db = find_model(model_name)
|
18 |
-
message = user_message.lower().strip()
|
19 |
-
if word_count < 0 or word_count > 300:
|
20 |
-
return gr.Warning("Invalid word count. It must be between 0 and 300.")
|
21 |
-
text = ""
|
22 |
-
curword = ""
|
23 |
-
prevword = ""
|
24 |
-
while len(text.split()) < word_count:
|
25 |
-
prevword = curword
|
26 |
-
curword = get_next_word(db, message, prevword, text, {})
|
27 |
-
text += curword + " "
|
28 |
-
if '.' in curword and random() < stop_chance:
|
29 |
-
yield text.strip()
|
30 |
-
break
|
31 |
-
yield text
|
32 |
-
|
33 |
-
|
34 |
-
def cont(user_message, word_count, model_name):
|
35 |
-
db = find_model(model_name)
|
36 |
-
message = user_message.lower().strip()
|
37 |
-
if not message:
|
38 |
-
return gr.Warning('No message')
|
39 |
-
if word_count < 0 or word_count > 450:
|
40 |
-
raise gr.Error("Invalid word count. It must be between 0 and 450.")
|
41 |
-
text = message
|
42 |
-
curword = text.split()[-1]
|
43 |
-
text += " "
|
44 |
-
while len(text.split()) < word_count:
|
45 |
-
prevword = curword
|
46 |
-
curword = get_next_word(db, message, prevword, text, {})
|
47 |
-
text += curword + " "
|
48 |
-
yield text.strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Chrysoula/voice_to_text_swedish/app.py
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
from transformers import pipeline
|
2 |
-
import gradio as gr
|
3 |
-
import pytube as pt
|
4 |
-
|
5 |
-
pipe = pipeline(model="Hoft/whisper-small-swedish-asr") # change to "your-username/the-name-you-picked"
|
6 |
-
sa = pipeline('sentiment-analysis', model='marma/bert-base-swedish-cased-sentiment')
|
7 |
-
|
8 |
-
def get_emoji(feeling):
|
9 |
-
if feeling == 'POSITIVE':
|
10 |
-
return '😊'
|
11 |
-
else:
|
12 |
-
return '😔'
|
13 |
-
def microphone_or_file_transcribe(audio):
|
14 |
-
text = pipe(audio)["text"]
|
15 |
-
sa_result = sa(text)[0]
|
16 |
-
return text, get_emoji(sa_result['label'])
|
17 |
-
|
18 |
-
def youtube_transcribe(url):
|
19 |
-
yt = pt.YouTube(url)
|
20 |
-
|
21 |
-
stream = yt.streams.filter(only_audio=True)[0]
|
22 |
-
stream.download(filename="audio.mp3")
|
23 |
-
|
24 |
-
text = pipe("audio.mp3")["text"]
|
25 |
-
|
26 |
-
sa_result = sa(text)[0]
|
27 |
-
return text, get_emoji(sa_result['label'])
|
28 |
-
|
29 |
-
|
30 |
-
app = gr.Blocks()
|
31 |
-
|
32 |
-
microphone_tab = gr.Interface(
|
33 |
-
fn=microphone_or_file_transcribe,
|
34 |
-
inputs=gr.Audio(source="microphone", type="filepath"),
|
35 |
-
outputs=[gr.Textbox(label="Text"), gr.Textbox(label="Feeling")],
|
36 |
-
title="Whisper Small Swedish: Microphone ",
|
37 |
-
description="Realtime demo for Swedish speech recognition using a fine-tuned Whisper small model and Sentiment Analysis.",
|
38 |
-
)
|
39 |
-
|
40 |
-
youtube_tab = gr.Interface(
|
41 |
-
fn=youtube_transcribe,
|
42 |
-
inputs=[gr.inputs.Textbox(lines=1, placeholder="Paste the URL to a YouTube video", label="URL")],
|
43 |
-
outputs=[gr.Textbox(label="Text"), gr.Textbox(label="Feeling")],
|
44 |
-
title="Whisper Small Swedish: Youtube",
|
45 |
-
description="Realtime demo for Swedish speech recognition using a fine-tuned Whisper small model and Sentiment Analysis.",
|
46 |
-
)
|
47 |
-
|
48 |
-
file_tab = gr.Interface(
|
49 |
-
fn=microphone_or_file_transcribe,
|
50 |
-
inputs= gr.inputs.Audio(source="upload", type="filepath"),
|
51 |
-
outputs=[gr.Textbox(label="Text"), gr.Textbox(label="Feeling")],
|
52 |
-
title="Whisper Small Swedish: File",
|
53 |
-
description="Realtime demo for Swedish speech recognition using a fine-tuned Whisper small model and Sentiment Analysis.",
|
54 |
-
)
|
55 |
-
|
56 |
-
with app:
|
57 |
-
gr.TabbedInterface([microphone_tab, youtube_tab, file_tab], ["Microphone", "YouTube", "File"])
|
58 |
-
|
59 |
-
app.launch(enable_queue=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/Yunzai/Yunzai/lib/events/connect.js
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
import EventListener from "../listener/listener.js"
|
2 |
-
import cfg from "../config/config.js"
|
3 |
-
|
4 |
-
/**
|
5 |
-
* 监听连接事件
|
6 |
-
*/
|
7 |
-
export default class connectEvent extends EventListener {
|
8 |
-
constructor() {
|
9 |
-
super({ event: "connect" })
|
10 |
-
}
|
11 |
-
|
12 |
-
async execute(e) {
|
13 |
-
if (!Bot.uin.includes(e.self_id))
|
14 |
-
Bot.uin.push(e.self_id)
|
15 |
-
|
16 |
-
if (!cfg.bot.online_msg) return
|
17 |
-
const key = `Yz:loginMsg:${e.self_id}`
|
18 |
-
if (await redis.get(key)) return
|
19 |
-
redis.set(key, "1", { EX: cfg.bot.online_msg_exp })
|
20 |
-
for (const i of cfg.master[e.self_id] || [])
|
21 |
-
e.bot.pickFriend(i).sendMsg(`欢迎使用【TRSS-Yunzai v${cfg.package.version}】\n【#帮助】查看指令说明\n【#状态】查看运行状态\n【#日志】查看运行日志\n【#重启】重新启动\n【#更新】拉取 Git 更新\n【#全部更新】更新全部插件\n【#更新日志】查看更新日志\n【#设置主人】设置主人账号\n【#安装插件】查看可安装插件`)
|
22 |
-
}
|
23 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cloudyy/bark-voice-cloning/hubert/hubert_manager.py
DELETED
@@ -1,33 +0,0 @@
|
|
1 |
-
import os.path
|
2 |
-
import shutil
|
3 |
-
import urllib.request
|
4 |
-
|
5 |
-
import huggingface_hub
|
6 |
-
|
7 |
-
|
8 |
-
class HuBERTManager:
|
9 |
-
@staticmethod
|
10 |
-
def make_sure_hubert_installed(download_url: str = 'https://dl.fbaipublicfiles.com/hubert/hubert_base_ls960.pt', file_name: str = 'hubert.pt'):
|
11 |
-
install_dir = os.path.join('data', 'models', 'hubert')
|
12 |
-
if not os.path.isdir(install_dir):
|
13 |
-
os.makedirs(install_dir, exist_ok=True)
|
14 |
-
install_file = os.path.join(install_dir, file_name)
|
15 |
-
if not os.path.isfile(install_file):
|
16 |
-
print('Downloading HuBERT base model')
|
17 |
-
urllib.request.urlretrieve(download_url, install_file)
|
18 |
-
print('Downloaded HuBERT')
|
19 |
-
return install_file
|
20 |
-
|
21 |
-
|
22 |
-
@staticmethod
|
23 |
-
def make_sure_tokenizer_installed(model: str = 'quantifier_hubert_base_ls960_14.pth', repo: str = 'GitMylo/bark-voice-cloning', local_file: str = 'tokenizer.pth'):
|
24 |
-
install_dir = os.path.join('data', 'models', 'hubert')
|
25 |
-
if not os.path.isdir(install_dir):
|
26 |
-
os.makedirs(install_dir, exist_ok=True)
|
27 |
-
install_file = os.path.join(install_dir, local_file)
|
28 |
-
if not os.path.isfile(install_file):
|
29 |
-
print('Downloading HuBERT custom tokenizer')
|
30 |
-
huggingface_hub.hf_hub_download(repo, model, local_dir=install_dir, local_dir_use_symlinks=False)
|
31 |
-
shutil.move(os.path.join(install_dir, model), install_file)
|
32 |
-
print('Downloaded tokenizer')
|
33 |
-
return install_file
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CofAI/chat.b4/client/html/index.html
DELETED
@@ -1,126 +0,0 @@
|
|
1 |
-
<!DOCTYPE html>
|
2 |
-
<html lang="en">
|
3 |
-
<head>
|
4 |
-
<meta charset="UTF-8" />
|
5 |
-
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
|
6 |
-
<meta name="viewport" content="width=device-width, initial-scale=1.0 maximum-scale=1.0" />
|
7 |
-
<meta name="description" content="A conversational AI system that listens, learns, and challenges" />
|
8 |
-
<meta property="og:title" content="ChatGPT" />
|
9 |
-
<meta property="og:image" content="https://openai.com/content/images/2022/11/ChatGPT.jpg" />
|
10 |
-
<meta
|
11 |
-
property="og:description"
|
12 |
-
content="A conversational AI system that listens, learns, and challenges" />
|
13 |
-
<meta property="og:url" content="https://chat.acy.dev" />
|
14 |
-
<link rel="stylesheet" href="{{ url_for('bp.static', filename='css/style.css') }}" />
|
15 |
-
<link
|
16 |
-
rel="apple-touch-icon"
|
17 |
-
sizes="180x180"
|
18 |
-
href="{{ url_for('bp.static', filename='img/apple-touch-icon.png') }}" />
|
19 |
-
<link
|
20 |
-
rel="icon"
|
21 |
-
type="image/png"
|
22 |
-
sizes="32x32"
|
23 |
-
href="{{ url_for('bp.static', filename='img/favicon-32x32.png') }}" />
|
24 |
-
<link
|
25 |
-
rel="icon"
|
26 |
-
type="image/png"
|
27 |
-
sizes="16x16"
|
28 |
-
href="{{ url_for('bp.static', filename='img/favicon-16x16.png') }}" />
|
29 |
-
<link rel="manifest" href="{{ url_for('bp.static', filename='img/site.webmanifest') }}" />
|
30 |
-
<link
|
31 |
-
rel="stylesheet"
|
32 |
-
href="//cdn.jsdelivr.net/gh/highlightjs/cdn-release@latest/build/styles/base16/dracula.min.css" />
|
33 |
-
<title>Chat.CofAI</title>
|
34 |
-
</head>
|
35 |
-
|
36 |
-
<body data-urlprefix="{{ url_prefix}}">
|
37 |
-
<div class="main-container">
|
38 |
-
<div class="box sidebar">
|
39 |
-
<div class="top">
|
40 |
-
<button class="button" onclick="new_conversation()">
|
41 |
-
<i class="fa-regular fa-plus"></i>
|
42 |
-
<span>Новый чат</span>
|
43 |
-
</button>
|
44 |
-
<div class="spinner"></div>
|
45 |
-
</div>
|
46 |
-
<div class="sidebar-footer">
|
47 |
-
<button class="button" onclick="delete_conversations()">
|
48 |
-
<i class="fa-regular fa-trash"></i>
|
49 |
-
<span>Очистить историю</span>
|
50 |
-
</button>
|
51 |
-
<div class="field checkbox theme-toggler-container">
|
52 |
-
<input type="checkbox" id="theme-toggler" />
|
53 |
-
<label for="theme-toggler"></label>
|
54 |
-
<span>Тёмная тема</span>
|
55 |
-
</div>
|
56 |
-
<a class="info" href="https://chat-cofai.nethouse.ru/" target="_blank">
|
57 |
-
<i class="fa-brands fa-github"></i>
|
58 |
-
<span class="conversation-title"> Chat.CofAI BETA-4 </span>
|
59 |
-
</a>
|
60 |
-
</div>
|
61 |
-
</div>
|
62 |
-
<div class="conversation">
|
63 |
-
<div class="stop-generating stop-generating-hidden">
|
64 |
-
<button class="button" id="cancelButton">
|
65 |
-
<span>Остановить генерацию</span>
|
66 |
-
</button>
|
67 |
-
</div>
|
68 |
-
<div class="box" id="messages"></div>
|
69 |
-
<div class="user-input">
|
70 |
-
<div class="box input-box">
|
71 |
-
<textarea
|
72 |
-
id="message-input"
|
73 |
-
placeholder="Ask a question"
|
74 |
-
cols="30"
|
75 |
-
rows="10"
|
76 |
-
style="white-space: pre-wrap"></textarea>
|
77 |
-
<div id="send-button">
|
78 |
-
<i class="fa-regular fa-paper-plane-top"></i>
|
79 |
-
</div>
|
80 |
-
</div>
|
81 |
-
</div>
|
82 |
-
<div>
|
83 |
-
<div class="options-container">
|
84 |
-
<div class="buttons">
|
85 |
-
<div class="field">
|
86 |
-
<select class="dropdown" name="model" id="model">
|
87 |
-
<option value="gpt-3.5-turbo" selected>[BAD] GPT-3.5</option>
|
88 |
-
<option value="gpt-3.5-turbo-0301">[OLD] GPT-3.5-turbo-0301</option>
|
89 |
-
<option value="gpt-3.5-turbo-16k">[STABLE]GPT-3.5-turbo-16k</option>
|
90 |
-
<option value="gpt-4">[BEST] GPT-4</option>
|
91 |
-
</select>
|
92 |
-
</div>
|
93 |
-
<div class="field">
|
94 |
-
<select class="dropdown" name="jailbreak" id="jailbreak">
|
95 |
-
<option value="default" selected>Стандарт</option>
|
96 |
-
<option value="gpt-dan-11.0">Буёк</option>
|
97 |
-
<option value="gpt-evil">Злодей</option>
|
98 |
-
</select>
|
99 |
-
</div>
|
100 |
-
</div>
|
101 |
-
<div class="field checkbox">
|
102 |
-
<input type="checkbox" id="switch" />
|
103 |
-
<label for="switch"></label>
|
104 |
-
<span>Доступ в интернет</span>
|
105 |
-
</div>
|
106 |
-
</div>
|
107 |
-
</div>
|
108 |
-
</div>
|
109 |
-
</div>
|
110 |
-
<div class="menu-button">
|
111 |
-
<i class="fa-solid fa-bars"></i>
|
112 |
-
</div>
|
113 |
-
|
114 |
-
<!-- scripts -->
|
115 |
-
<script>
|
116 |
-
window.conversation_id = "{{ chat_id }}";
|
117 |
-
</script>
|
118 |
-
<script src="{{ url_for('bp.static', filename='js/icons.js') }}"></script>
|
119 |
-
<script src="{{ url_for('bp.static', filename='js/chat.js') }}" defer></script>
|
120 |
-
<script src="https://cdn.jsdelivr.net/npm/markdown-it@latest/dist/markdown-it.min.js"></script>
|
121 |
-
<script src="{{ url_for('bp.static', filename='js/highlight.min.js') }}"></script>
|
122 |
-
<script src="{{ url_for('bp.static', filename='js/highlightjs-copy.min.js') }}"></script>
|
123 |
-
<script src="{{ url_for('bp.static', filename='js/theme-toggler.js') }}"></script>
|
124 |
-
<script src="{{ url_for('bp.static', filename='js/sidebar-toggler.js') }}"></script>
|
125 |
-
</body>
|
126 |
-
</html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cpp4App/Cpp4App/SEM/retention_pp_processing.py
DELETED
@@ -1,24 +0,0 @@
|
|
1 |
-
from types_pp_processing import cleanHtml
|
2 |
-
import spacy
|
3 |
-
nlp = spacy.load('en_core_web_sm')
|
4 |
-
def retention_process(txt):
|
5 |
-
text = ""
|
6 |
-
result = cleanHtml(txt)
|
7 |
-
for sen in result:
|
8 |
-
text += sen
|
9 |
-
time = ""
|
10 |
-
doc = nlp(text)
|
11 |
-
flag = 0
|
12 |
-
for token in doc:
|
13 |
-
if flag == 1:
|
14 |
-
if token.text == "year" or token.text == "month" or token.text == "week" or token.text == "day" or token.text == "hour":
|
15 |
-
time += " " + token.text
|
16 |
-
break
|
17 |
-
else:
|
18 |
-
flag = 0
|
19 |
-
if token.pos_ == "NUM":
|
20 |
-
flag = 1
|
21 |
-
time = token.text
|
22 |
-
if time == "":
|
23 |
-
time = "The privacy policy does not specify how long the data will be retained"
|
24 |
-
return time,text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-5605d000.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import{S as v,e as T,s as S,N as K,k as j,K as _,L as C,p as L,o as w,z as r,v as d,A as M,x as A,B as N,at as G,a4 as k,C as H,a7 as J,a9 as B,ab as q,ac as z,ad as D,F as O}from"./index-1d65707a.js";import{a as P}from"./TabItem.svelte_svelte_type_style_lang-1276453b.js";import{C as Q}from"./Column-6c43afc7.js";/* empty css */function R(a){let e;const n=a[8].default,t=B(n,a,a[9],null);return{c(){t&&t.c()},m(s,l){t&&t.m(s,l),e=!0},p(s,l){t&&t.p&&(!e||l&512)&&q(t,n,s,s[9],e?D(n,s[9],l,null):z(s[9]),null)},i(s){e||(r(t,s),e=!0)},o(s){d(t,s),e=!1},d(s){t&&t.d(s)}}}function U(a){let e,n,t,s;return n=new Q({props:{$$slots:{default:[R]},$$scope:{ctx:a}}}),{c(){e=K("div"),j(n.$$.fragment),_(e,"id",a[0]),_(e,"class",t="tabitem "+a[1].join(" ")+" svelte-19hvt5v"),C(e,"display",a[3]===a[2]?"block":"none")},m(l,m){L(l,e,m),w(n,e,null),s=!0},p(l,[m]){const c={};m&512&&(c.$$scope={dirty:m,ctx:l}),n.$set(c),(!s||m&1)&&_(e,"id",l[0]),(!s||m&2&&t!==(t="tabitem "+l[1].join(" ")+" svelte-19hvt5v"))&&_(e,"class",t),m&12&&C(e,"display",l[3]===l[2]?"block":"none")},i(l){s||(r(n.$$.fragment,l),s=!0)},o(l){d(n.$$.fragment,l),s=!1},d(l){l&&M(e),A(n)}}}function V(a,e,n){let t,s,{$$slots:l={},$$scope:m}=e,{elem_id:c=""}=e,{elem_classes:f=[]}=e,{name:u}=e,{id:i={}}=e;const E=N(),{register_tab:F,unregister_tab:I,selected_tab:b,selected_tab_index:g}=G(P);k(a,b,o=>n(3,s=o)),k(a,g,o=>n(7,t=o));let h=F({name:u,id:i});return H(()=>()=>I({name:u,id:i})),a.$$set=o=>{"elem_id"in o&&n(0,c=o.elem_id),"elem_classes"in o&&n(1,f=o.elem_classes),"name"in o&&n(6,u=o.name),"id"in o&&n(2,i=o.id),"$$scope"in o&&n(9,m=o.$$scope)},a.$$.update=()=>{a.$$.dirty&192&&t===h&&J().then(()=>E("select",{value:u,index:h}))},[c,f,i,s,b,g,u,t,l,m]}class W extends v{constructor(e){super(),T(this,e,V,U,S,{elem_id:0,elem_classes:1,name:6,id:2})}}function X(a){let e;const n=a[4].default,t=B(n,a,a[6],null);return{c(){t&&t.c()},m(s,l){t&&t.m(s,l),e=!0},p(s,l){t&&t.p&&(!e||l&64)&&q(t,n,s,s[6],e?D(n,s[6],l,null):z(s[6]),null)},i(s){e||(r(t,s),e=!0)},o(s){d(t,s),e=!1},d(s){t&&t.d(s)}}}function Y(a){let e,n;return e=new W({props:{elem_id:a[0],elem_classes:a[1],name:a[2],id:a[3],$$slots:{default:[X]},$$scope:{ctx:a}}}),e.$on("select",a[5]),{c(){j(e.$$.fragment)},m(t,s){w(e,t,s),n=!0},p(t,[s]){const l={};s&1&&(l.elem_id=t[0]),s&2&&(l.elem_classes=t[1]),s&4&&(l.name=t[2]),s&8&&(l.id=t[3]),s&64&&(l.$$scope={dirty:s,ctx:t}),e.$set(l)},i(t){n||(r(e.$$.fragment,t),n=!0)},o(t){d(e.$$.fragment,t),n=!1},d(t){A(e,t)}}}function Z(a,e,n){let{$$slots:t={},$$scope:s}=e,{elem_id:l=""}=e,{elem_classes:m=[]}=e,{label:c}=e,{id:f}=e;function u(i){O.call(this,a,i)}return a.$$set=i=>{"elem_id"in i&&n(0,l=i.elem_id),"elem_classes"in i&&n(1,m=i.elem_classes),"label"in i&&n(2,c=i.label),"id"in i&&n(3,f=i.id),"$$scope"in i&&n(6,s=i.$$scope)},[l,m,c,f,t,u,s]}class y extends v{constructor(e){super(),T(this,e,Z,Y,S,{elem_id:0,elem_classes:1,label:2,id:3})}}const te=y,se=["static"];export{te as Component,se as modes};
|
2 |
-
//# sourceMappingURL=index-5605d000.js.map
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py
DELETED
@@ -1,230 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2022-present, the HuggingFace Inc. team.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
"""Contains utilities to validate argument values in `huggingface_hub`."""
|
16 |
-
import inspect
|
17 |
-
import re
|
18 |
-
import warnings
|
19 |
-
from functools import wraps
|
20 |
-
from itertools import chain
|
21 |
-
from typing import Any, Dict
|
22 |
-
|
23 |
-
from ._typing import CallableT
|
24 |
-
|
25 |
-
|
26 |
-
REPO_ID_REGEX = re.compile(
|
27 |
-
r"""
|
28 |
-
^
|
29 |
-
(\b[\w\-.]+\b/)? # optional namespace (username or organization)
|
30 |
-
\b # starts with a word boundary
|
31 |
-
[\w\-.]{1,96} # repo_name: alphanumeric + . _ -
|
32 |
-
\b # ends with a word boundary
|
33 |
-
$
|
34 |
-
""",
|
35 |
-
flags=re.VERBOSE,
|
36 |
-
)
|
37 |
-
|
38 |
-
|
39 |
-
class HFValidationError(ValueError):
|
40 |
-
"""Generic exception thrown by `huggingface_hub` validators.
|
41 |
-
|
42 |
-
Inherits from [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError).
|
43 |
-
"""
|
44 |
-
|
45 |
-
|
46 |
-
def validate_hf_hub_args(fn: CallableT) -> CallableT:
|
47 |
-
"""Validate values received as argument for any public method of `huggingface_hub`.
|
48 |
-
|
49 |
-
The goal of this decorator is to harmonize validation of arguments reused
|
50 |
-
everywhere. By default, all defined validators are tested.
|
51 |
-
|
52 |
-
Validators:
|
53 |
-
- [`~utils.validate_repo_id`]: `repo_id` must be `"repo_name"`
|
54 |
-
or `"namespace/repo_name"`. Namespace is a username or an organization.
|
55 |
-
- [`~utils.smoothly_deprecate_use_auth_token`]: Use `token` instead of
|
56 |
-
`use_auth_token` (only if `use_auth_token` is not expected by the decorated
|
57 |
-
function - in practice, always the case in `huggingface_hub`).
|
58 |
-
|
59 |
-
Example:
|
60 |
-
```py
|
61 |
-
>>> from huggingface_hub.utils import validate_hf_hub_args
|
62 |
-
|
63 |
-
>>> @validate_hf_hub_args
|
64 |
-
... def my_cool_method(repo_id: str):
|
65 |
-
... print(repo_id)
|
66 |
-
|
67 |
-
>>> my_cool_method(repo_id="valid_repo_id")
|
68 |
-
valid_repo_id
|
69 |
-
|
70 |
-
>>> my_cool_method("other..repo..id")
|
71 |
-
huggingface_hub.utils._validators.HFValidationError: Cannot have -- or .. in repo_id: 'other..repo..id'.
|
72 |
-
|
73 |
-
>>> my_cool_method(repo_id="other..repo..id")
|
74 |
-
huggingface_hub.utils._validators.HFValidationError: Cannot have -- or .. in repo_id: 'other..repo..id'.
|
75 |
-
|
76 |
-
>>> @validate_hf_hub_args
|
77 |
-
... def my_cool_auth_method(token: str):
|
78 |
-
... print(token)
|
79 |
-
|
80 |
-
>>> my_cool_auth_method(token="a token")
|
81 |
-
"a token"
|
82 |
-
|
83 |
-
>>> my_cool_auth_method(use_auth_token="a use_auth_token")
|
84 |
-
"a use_auth_token"
|
85 |
-
|
86 |
-
>>> my_cool_auth_method(token="a token", use_auth_token="a use_auth_token")
|
87 |
-
UserWarning: Both `token` and `use_auth_token` are passed (...)
|
88 |
-
"a token"
|
89 |
-
```
|
90 |
-
|
91 |
-
Raises:
|
92 |
-
[`~utils.HFValidationError`]:
|
93 |
-
If an input is not valid.
|
94 |
-
"""
|
95 |
-
# TODO: add an argument to opt-out validation for specific argument?
|
96 |
-
signature = inspect.signature(fn)
|
97 |
-
|
98 |
-
# Should the validator switch `use_auth_token` values to `token`? In practice, always
|
99 |
-
# True in `huggingface_hub`. Might not be the case in a downstream library.
|
100 |
-
check_use_auth_token = "use_auth_token" not in signature.parameters and "token" in signature.parameters
|
101 |
-
|
102 |
-
@wraps(fn)
|
103 |
-
def _inner_fn(*args, **kwargs):
|
104 |
-
has_token = False
|
105 |
-
for arg_name, arg_value in chain(
|
106 |
-
zip(signature.parameters, args), # Args values
|
107 |
-
kwargs.items(), # Kwargs values
|
108 |
-
):
|
109 |
-
if arg_name in ["repo_id", "from_id", "to_id"]:
|
110 |
-
validate_repo_id(arg_value)
|
111 |
-
|
112 |
-
elif arg_name == "token" and arg_value is not None:
|
113 |
-
has_token = True
|
114 |
-
|
115 |
-
if check_use_auth_token:
|
116 |
-
kwargs = smoothly_deprecate_use_auth_token(fn_name=fn.__name__, has_token=has_token, kwargs=kwargs)
|
117 |
-
|
118 |
-
return fn(*args, **kwargs)
|
119 |
-
|
120 |
-
return _inner_fn # type: ignore
|
121 |
-
|
122 |
-
|
123 |
-
def validate_repo_id(repo_id: str) -> None:
|
124 |
-
"""Validate `repo_id` is valid.
|
125 |
-
|
126 |
-
This is not meant to replace the proper validation made on the Hub but rather to
|
127 |
-
avoid local inconsistencies whenever possible (example: passing `repo_type` in the
|
128 |
-
`repo_id` is forbidden).
|
129 |
-
|
130 |
-
Rules:
|
131 |
-
- Between 1 and 96 characters.
|
132 |
-
- Either "repo_name" or "namespace/repo_name"
|
133 |
-
- [a-zA-Z0-9] or "-", "_", "."
|
134 |
-
- "--" and ".." are forbidden
|
135 |
-
|
136 |
-
Valid: `"foo"`, `"foo/bar"`, `"123"`, `"Foo-BAR_foo.bar123"`
|
137 |
-
|
138 |
-
Not valid: `"datasets/foo/bar"`, `".repo_id"`, `"foo--bar"`, `"foo.git"`
|
139 |
-
|
140 |
-
Example:
|
141 |
-
```py
|
142 |
-
>>> from huggingface_hub.utils import validate_repo_id
|
143 |
-
>>> validate_repo_id(repo_id="valid_repo_id")
|
144 |
-
>>> validate_repo_id(repo_id="other..repo..id")
|
145 |
-
huggingface_hub.utils._validators.HFValidationError: Cannot have -- or .. in repo_id: 'other..repo..id'.
|
146 |
-
```
|
147 |
-
|
148 |
-
Discussed in https://github.com/huggingface/huggingface_hub/issues/1008.
|
149 |
-
In moon-landing (internal repository):
|
150 |
-
- https://github.com/huggingface/moon-landing/blob/main/server/lib/Names.ts#L27
|
151 |
-
- https://github.com/huggingface/moon-landing/blob/main/server/views/components/NewRepoForm/NewRepoForm.svelte#L138
|
152 |
-
"""
|
153 |
-
if not isinstance(repo_id, str):
|
154 |
-
# Typically, a Path is not a repo_id
|
155 |
-
raise HFValidationError(f"Repo id must be a string, not {type(repo_id)}: '{repo_id}'.")
|
156 |
-
|
157 |
-
if repo_id.count("/") > 1:
|
158 |
-
raise HFValidationError(
|
159 |
-
"Repo id must be in the form 'repo_name' or 'namespace/repo_name':"
|
160 |
-
f" '{repo_id}'. Use `repo_type` argument if needed."
|
161 |
-
)
|
162 |
-
|
163 |
-
if not REPO_ID_REGEX.match(repo_id):
|
164 |
-
raise HFValidationError(
|
165 |
-
"Repo id must use alphanumeric chars or '-', '_', '.', '--' and '..' are"
|
166 |
-
" forbidden, '-' and '.' cannot start or end the name, max length is 96:"
|
167 |
-
f" '{repo_id}'."
|
168 |
-
)
|
169 |
-
|
170 |
-
if "--" in repo_id or ".." in repo_id:
|
171 |
-
raise HFValidationError(f"Cannot have -- or .. in repo_id: '{repo_id}'.")
|
172 |
-
|
173 |
-
if repo_id.endswith(".git"):
|
174 |
-
raise HFValidationError(f"Repo_id cannot end by '.git': '{repo_id}'.")
|
175 |
-
|
176 |
-
|
177 |
-
def smoothly_deprecate_use_auth_token(fn_name: str, has_token: bool, kwargs: Dict[str, Any]) -> Dict[str, Any]:
|
178 |
-
"""Smoothly deprecate `use_auth_token` in the `huggingface_hub` codebase.
|
179 |
-
|
180 |
-
The long-term goal is to remove any mention of `use_auth_token` in the codebase in
|
181 |
-
favor of a unique and less verbose `token` argument. This will be done a few steps:
|
182 |
-
|
183 |
-
0. Step 0: methods that require a read-access to the Hub use the `use_auth_token`
|
184 |
-
argument (`str`, `bool` or `None`). Methods requiring write-access have a `token`
|
185 |
-
argument (`str`, `None`). This implicit rule exists to be able to not send the
|
186 |
-
token when not necessary (`use_auth_token=False`) even if logged in.
|
187 |
-
|
188 |
-
1. Step 1: we want to harmonize everything and use `token` everywhere (supporting
|
189 |
-
`token=False` for read-only methods). In order not to break existing code, if
|
190 |
-
`use_auth_token` is passed to a function, the `use_auth_token` value is passed
|
191 |
-
as `token` instead, without any warning.
|
192 |
-
a. Corner case: if both `use_auth_token` and `token` values are passed, a warning
|
193 |
-
is thrown and the `use_auth_token` value is ignored.
|
194 |
-
|
195 |
-
2. Step 2: Once it is release, we should push downstream libraries to switch from
|
196 |
-
`use_auth_token` to `token` as much as possible, but without throwing a warning
|
197 |
-
(e.g. manually create issues on the corresponding repos).
|
198 |
-
|
199 |
-
3. Step 3: After a transitional period (6 months e.g. until April 2023?), we update
|
200 |
-
`huggingface_hub` to throw a warning on `use_auth_token`. Hopefully, very few
|
201 |
-
users will be impacted as it would have already been fixed.
|
202 |
-
In addition, unit tests in `huggingface_hub` must be adapted to expect warnings
|
203 |
-
to be thrown (but still use `use_auth_token` as before).
|
204 |
-
|
205 |
-
4. Step 4: After a normal deprecation cycle (3 releases ?), remove this validator.
|
206 |
-
`use_auth_token` will definitely not be supported.
|
207 |
-
In addition, we update unit tests in `huggingface_hub` to use `token` everywhere.
|
208 |
-
|
209 |
-
This has been discussed in:
|
210 |
-
- https://github.com/huggingface/huggingface_hub/issues/1094.
|
211 |
-
- https://github.com/huggingface/huggingface_hub/pull/928
|
212 |
-
- (related) https://github.com/huggingface/huggingface_hub/pull/1064
|
213 |
-
"""
|
214 |
-
new_kwargs = kwargs.copy() # do not mutate input !
|
215 |
-
|
216 |
-
use_auth_token = new_kwargs.pop("use_auth_token", None) # remove from kwargs
|
217 |
-
if use_auth_token is not None:
|
218 |
-
if has_token:
|
219 |
-
warnings.warn(
|
220 |
-
"Both `token` and `use_auth_token` are passed to"
|
221 |
-
f" `{fn_name}` with non-None values. `token` is now the"
|
222 |
-
" preferred argument to pass a User Access Token."
|
223 |
-
" `use_auth_token` value will be ignored."
|
224 |
-
)
|
225 |
-
else:
|
226 |
-
# `token` argument is not passed and a non-None value is passed in
|
227 |
-
# `use_auth_token` => use `use_auth_token` value as `token` kwarg.
|
228 |
-
new_kwargs["token"] = use_auth_token
|
229 |
-
|
230 |
-
return new_kwargs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|