parquet-converter commited on
Commit
df56c2c
·
1 Parent(s): c41fc7b

Update parquet files (step 92 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/101-5/gpt4free/g4f/.v1/gpt4free/forefront/__init__.py +0 -214
  2. spaces/101-5/gpt4free/g4f/.v1/testing/hpgptai_test.py +0 -41
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/CorelDRAW Graphics Suite 2020 Crack Free Download Pros and Cons.md +0 -25
  4. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Creatink V1.0.6 ? Multi-Concept Responsive WordPress Theme ((EXCLUSIVE)).md +0 -67
  5. spaces/1gistliPinn/ChatGPT4/Examples/Adobe Photoshop Elements 10 (serial Crack) Crack.md +0 -7
  6. spaces/1gistliPinn/ChatGPT4/Examples/Autodata Cd 3 Cd Code.md +0 -11
  7. spaces/1gistliPinn/ChatGPT4/Examples/Download Ebuddy Java Facebook.md +0 -93
  8. spaces/1phancelerku/anime-remove-background/Download Epic Race 3D and Compete with Other Players Online.md +0 -125
  9. spaces/1phancelerku/anime-remove-background/Download J Image for PC A Step-by-Step Guide.md +0 -131
  10. spaces/1phancelerku/anime-remove-background/Download and Install Word 2017 for Windows 7 Without Paying a Cent.md +0 -232
  11. spaces/1vash/demo-flask-docker-template/api_server.py +0 -164
  12. spaces/2ndelement/voicevox/voicevox_engine/synthesis_engine/make_synthesis_engines.py +0 -122
  13. spaces/7hao/bingo/src/components/user-menu.tsx +0 -113
  14. spaces/801artistry/RVC801/utils/clonerepo_experimental.py +0 -253
  15. spaces/AI-Dashboards/Memory-Chat-Story-Generator-ChatGPT/app.py +0 -217
  16. spaces/AIFILMS/StyleGANEX/models/stylegan2/op/__init__.py +0 -2
  17. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/__init__.py +0 -0
  18. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnetv1d101_8xb32_in1k.py +0 -5
  19. spaces/Abdllh/poetry202/app.py +0 -53
  20. spaces/AchyuthGamer/OpenGPT/g4f/__init__.py +0 -110
  21. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/simpledropdownlist/SimpleDropDownList.d.ts +0 -20
  22. spaces/AlexWang/lama/saicinpainting/training/modules/__init__.py +0 -31
  23. spaces/Alpaca233/SadTalker/src/utils/paste_pic.py +0 -69
  24. spaces/Amrrs/DragGan-Inversion/stylegan_human/edit/edit_config.py +0 -19
  25. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/in_translation.md +0 -16
  26. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/inference/inpainting.py +0 -9
  27. spaces/Andy1621/uniformer_image_detection/tools/model_converters/publish_model.py +0 -39
  28. spaces/AriaMei/TTSdemo/app.py +0 -164
  29. spaces/Artrajz/vits-simple-api/vits/hubert_model.py +0 -221
  30. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/dotenv/version.py +0 -1
  31. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/network/__init__.py +0 -2
  32. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/colorama/tests/initialise_test.py +0 -189
  33. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/checkpoint/catalog.py +0 -115
  34. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/structures/masks.py +0 -532
  35. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/test_model_analysis.py +0 -80
  36. spaces/AzumaSeren100/XuanShen-Bert-VITS2/resample.py +0 -42
  37. spaces/Bart92/RVC_HF/infer/lib/infer_pack/attentions.py +0 -417
  38. spaces/Bart92/RVC_HF/tools/infer/train-index-v2.py +0 -79
  39. spaces/BetterAPI/BetterChat/src/lib/utils/sha256.ts +0 -7
  40. spaces/CVH-vn1210/make_hair/minigpt4/common/config.py +0 -468
  41. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/apply_net.py +0 -318
  42. spaces/CVPR/LIVE/pybind11/tests/test_eval.cpp +0 -91
  43. spaces/CVPR/LIVE/thrust/cmake/ThrustCudaConfig.cmake +0 -140
  44. spaces/ChallengeHub/Chinese-LangChain/README.md +0 -116
  45. spaces/ChandraMohanNayal/AutoGPT/autogpt/json_utils/utilities.py +0 -54
  46. spaces/CikeyQI/meme-api/meme_generator/memes/capoo_rub/__init__.py +0 -30
  47. spaces/CognitiveLabs/Research-Assistant/test/test.py +0 -17
  48. spaces/CorvaeOboro/gen_ability_icon/dnnlib/util.py +0 -477
  49. spaces/Cpp4App/Cpp4App/CDM/detect_compo/ip_region_proposal.py +0 -200
  50. spaces/DCandE/rvc-models/infer_pack/modules.py +0 -522
spaces/101-5/gpt4free/g4f/.v1/gpt4free/forefront/__init__.py DELETED
@@ -1,214 +0,0 @@
1
- import hashlib
2
- from base64 import b64encode
3
- from json import loads
4
- from re import findall
5
- from time import time, sleep
6
- from typing import Generator, Optional
7
- from uuid import uuid4
8
-
9
- from Crypto.Cipher import AES
10
- from Crypto.Random import get_random_bytes
11
- from fake_useragent import UserAgent
12
- from mailgw_temporary_email import Email
13
- from requests import post
14
- from tls_client import Session
15
-
16
- from .typing import ForeFrontResponse, AccountData
17
-
18
-
19
- class Account:
20
- @staticmethod
21
- def create(proxy: Optional[str] = None, logging: bool = False) -> AccountData:
22
- proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else False
23
-
24
- start = time()
25
-
26
- mail_client = Email()
27
- mail_client.register()
28
- mail_address = mail_client.address
29
-
30
- client = Session(client_identifier='chrome110')
31
- client.proxies = proxies
32
- client.headers = {
33
- 'origin': 'https://accounts.forefront.ai',
34
- 'user-agent': UserAgent().random,
35
- }
36
-
37
- response = client.post(
38
- 'https://clerk.forefront.ai/v1/client/sign_ups?_clerk_js_version=4.38.4',
39
- data={'email_address': mail_address},
40
- )
41
-
42
- try:
43
- trace_token = response.json()['response']['id']
44
- if logging:
45
- print(trace_token)
46
- except KeyError:
47
- raise RuntimeError('Failed to create account!')
48
-
49
- response = client.post(
50
- f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/prepare_verification?_clerk_js_version=4.38.4',
51
- data={
52
- 'strategy': 'email_link',
53
- 'redirect_url': 'https://accounts.forefront.ai/sign-up/verify'
54
- },
55
- )
56
-
57
- if logging:
58
- print(response.text)
59
-
60
- if 'sign_up_attempt' not in response.text:
61
- raise RuntimeError('Failed to create account!')
62
-
63
- while True:
64
- sleep(5)
65
- message_id = mail_client.message_list()[0]['id']
66
- message = mail_client.message(message_id)
67
- verification_url = findall(r'https:\/\/clerk\.forefront\.ai\/v1\/verify\?token=\w.+', message["text"])[0]
68
- if verification_url:
69
- break
70
-
71
- if logging:
72
- print(verification_url)
73
- client.get(verification_url)
74
-
75
- response = client.get('https://clerk.forefront.ai/v1/client?_clerk_js_version=4.38.4').json()
76
- session_data = response['response']['sessions'][0]
77
-
78
- user_id = session_data['user']['id']
79
- session_id = session_data['id']
80
- token = session_data['last_active_token']['jwt']
81
-
82
- with open('accounts.txt', 'a') as f:
83
- f.write(f'{mail_address}:{token}\n')
84
-
85
- if logging:
86
- print(time() - start)
87
-
88
- return AccountData(token=token, user_id=user_id, session_id=session_id)
89
-
90
-
91
- class StreamingCompletion:
92
- @staticmethod
93
- def create(
94
- prompt: str,
95
- account_data: AccountData,
96
- chat_id=None,
97
- action_type='new',
98
- default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
99
- model='gpt-4',
100
- proxy=None
101
- ) -> Generator[ForeFrontResponse, None, None]:
102
- token = account_data.token
103
- if not chat_id:
104
- chat_id = str(uuid4())
105
-
106
- proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else None
107
- base64_data = b64encode((account_data.user_id + default_persona + chat_id).encode()).decode()
108
- encrypted_signature = StreamingCompletion.__encrypt(base64_data, account_data.session_id)
109
-
110
- headers = {
111
- 'authority': 'chat-server.tenant-forefront-default.knative.chi.coreweave.com',
112
- 'accept': '*/*',
113
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
114
- 'cache-control': 'no-cache',
115
- 'content-type': 'application/json',
116
- 'origin': 'https://chat.forefront.ai',
117
- 'pragma': 'no-cache',
118
- 'referer': 'https://chat.forefront.ai/',
119
- 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
120
- 'sec-ch-ua-mobile': '?0',
121
- 'sec-ch-ua-platform': '"macOS"',
122
- 'sec-fetch-dest': 'empty',
123
- 'sec-fetch-mode': 'cors',
124
- 'sec-fetch-site': 'cross-site',
125
- 'authorization': f"Bearer {token}",
126
- 'X-Signature': encrypted_signature,
127
- 'user-agent': UserAgent().random,
128
- }
129
-
130
- json_data = {
131
- 'text': prompt,
132
- 'action': action_type,
133
- 'parentId': chat_id,
134
- 'workspaceId': chat_id,
135
- 'messagePersona': default_persona,
136
- 'model': model,
137
- }
138
-
139
- for chunk in post(
140
- 'https://streaming.tenant-forefront-default.knative.chi.coreweave.com/chat',
141
- headers=headers,
142
- proxies=proxies,
143
- json=json_data,
144
- stream=True,
145
- ).iter_lines():
146
- if b'finish_reason":null' in chunk:
147
- data = loads(chunk.decode('utf-8').split('data: ')[1])
148
- token = data['choices'][0]['delta'].get('content')
149
-
150
- if token is not None:
151
- yield ForeFrontResponse(
152
- **{
153
- 'id': chat_id,
154
- 'object': 'text_completion',
155
- 'created': int(time()),
156
- 'text': token,
157
- 'model': model,
158
- 'choices': [{'text': token, 'index': 0, 'logprobs': None, 'finish_reason': 'stop'}],
159
- 'usage': {
160
- 'prompt_tokens': len(prompt),
161
- 'completion_tokens': len(token),
162
- 'total_tokens': len(prompt) + len(token),
163
- },
164
- }
165
- )
166
-
167
- @staticmethod
168
- def __encrypt(data: str, key: str) -> str:
169
- hash_key = hashlib.sha256(key.encode()).digest()
170
- iv = get_random_bytes(16)
171
- cipher = AES.new(hash_key, AES.MODE_CBC, iv)
172
- encrypted_data = cipher.encrypt(StreamingCompletion.__pad_data(data.encode()))
173
- return iv.hex() + encrypted_data.hex()
174
-
175
- @staticmethod
176
- def __pad_data(data: bytes) -> bytes:
177
- block_size = AES.block_size
178
- padding_size = block_size - len(data) % block_size
179
- padding = bytes([padding_size] * padding_size)
180
- return data + padding
181
-
182
-
183
- class Completion:
184
- @staticmethod
185
- def create(
186
- prompt: str,
187
- account_data: AccountData,
188
- chat_id=None,
189
- action_type='new',
190
- default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
191
- model='gpt-4',
192
- proxy=None
193
- ) -> ForeFrontResponse:
194
- text = ''
195
- final_response = None
196
- for response in StreamingCompletion.create(
197
- account_data=account_data,
198
- chat_id=chat_id,
199
- prompt=prompt,
200
- action_type=action_type,
201
- default_persona=default_persona,
202
- model=model,
203
- proxy=proxy
204
- ):
205
- if response:
206
- final_response = response
207
- text += response.text
208
-
209
- if final_response:
210
- final_response.text = text
211
- else:
212
- raise RuntimeError('Unable to get the response, Please try again')
213
-
214
- return final_response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/101-5/gpt4free/g4f/.v1/testing/hpgptai_test.py DELETED
@@ -1,41 +0,0 @@
1
- import hpgptai
2
-
3
- #single completion
4
- res = hpgptai.Completion.create("你是谁","127.0.0.1:7890")
5
- print(res["reply"])
6
-
7
-
8
- #chat completion
9
- messages = [
10
- {
11
- "content": "你是谁",
12
- "html": "你是谁",
13
- "id": hpgptai.ChatCompletion.randomStr(),
14
- "role": "user",
15
- "who": "User: ",
16
- },
17
- {
18
- "content": "我是一位AI助手,专门为您提供各种服务和支持。我可以回答您的问题,帮助您解决问题,提供相关信息,并执行一些任务。请随时告诉我您需要什么帮助。",
19
- "html": "我是一位AI助手,专门为您提供各种服务和支持。我可以回答您的问题,帮助您解决问题,提供相关信息,并执行一些任务。请随时告诉我您需要什么帮助。",
20
- "id": hpgptai.ChatCompletion.randomStr(),
21
- "role": "assistant",
22
- "who": "AI: ",
23
- },
24
- {
25
- "content": "我上一句问的是什么?",
26
- "html": "我上一句问的是什么?",
27
- "id": hpgptai.ChatCompletion.randomStr(),
28
- "role": "user",
29
- "who": "User: ",
30
- },
31
- ]
32
- res = hpgptai.ChatCompletion.create(messages,proxy="127.0.0.1:7890")
33
- print(res["reply"])
34
-
35
-
36
-
37
-
38
-
39
-
40
-
41
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/CorelDRAW Graphics Suite 2020 Crack Free Download Pros and Cons.md DELETED
@@ -1,25 +0,0 @@
1
-
2
- <h1>How to Download and Install CorelDRAW Graphics Suite 2020 Crack for Free</h1>
3
- <p>If you are looking for a powerful and versatile graphic design software, you may want to try CorelDRAW Graphics Suite 2020. This software offers a comprehensive set of tools for creating vector illustrations, layouts, photo editing, typography, and more. You can also access thousands of clip art, digital images, fonts, templates, and fills to enhance your projects.</p>
4
- <p>However, CorelDRAW Graphics Suite 2020 is not a free software. You need to purchase a subscription or a perpetual license to use it. The subscription costs $249 per year or $20.75 per month, while the perpetual license costs $499. If you want to save money and still enjoy the features of CorelDRAW Graphics Suite 2020, you may be tempted to download a cracked version from the internet. But is it safe and legal to do so?</p>
5
- <h2>coreldraw 2020 crack only free download</h2><br /><p><b><b>DOWNLOAD</b> &#9999; &#9999; &#9999; <a href="https://byltly.com/2uKxFw">https://byltly.com/2uKxFw</a></b></p><br /><br />
6
- <h2>The Risks of Using CorelDRAW Graphics Suite 2020 Crack</h2>
7
- <p>A cracked version of CorelDRAW Graphics Suite 2020 is a modified version that bypasses the activation process and allows you to use the software without paying for it. However, using a cracked version comes with many risks and disadvantages. Here are some of them:</p>
8
- <ul>
9
- <li><b>It is illegal.</b> Downloading and using a cracked version of CorelDRAW Graphics Suite 2020 is a violation of the software's license agreement and copyright laws. You may face legal consequences such as fines or lawsuits if you are caught using it.</li>
10
- <li><b>It is unsafe.</b> Downloading and installing a cracked version of CorelDRAW Graphics Suite 2020 may expose your computer to malware, viruses, spyware, ransomware, or other malicious programs that can harm your system or steal your personal information. You may also lose your data or damage your hardware if the crack is faulty or incompatible.</li>
11
- <li><b>It is unreliable.</b> A cracked version of CorelDRAW Graphics Suite 2020 may not work properly or have missing or corrupted features. You may also experience crashes, errors, bugs, or performance issues that can affect your work quality and productivity. You may not be able to update the software or access online services such as cloud storage, collaboration tools, or customer support.</li>
12
- <li><b>It is unethical.</b> Downloading and using a cracked version of CorelDRAW Graphics Suite 2020 is unfair to the developers who spent time and resources to create the software. You are also depriving yourself of the opportunity to learn and improve your skills with a legitimate and professional software.</li>
13
- </ul>
14
- <h2>The Benefits of Using CorelDRAW Graphics Suite 2020 Official Version</h2>
15
- <p>If you want to avoid the risks and disadvantages of using a cracked version of CorelDRAW Graphics Suite 2020, you should consider using the official version instead. Here are some of the benefits of using the official version:</p>
16
- <ul>
17
- <li><b>It is legal.</b> Using the official version of CorelDRAW Graphics Suite 2020 means that you are complying with the software's license agreement and copyright laws. You can use the software without worrying about legal issues or penalties.</li>
18
- <li><b>It is safe.</b> Using the official version of CorelDRAW Graphics Suite 2020 means that you are downloading and installing the software from a trusted source. You can be sure that the software is free from malware, viruses, spyware, ransomware, or other malicious programs that can harm your computer or compromise your security.</li>
19
- <li><b>It is reliable.</b> Using the official version of CorelDRAW Graphics Suite 2020 means that you are getting the full and latest version of the software with all its features and functions. You can also enjoy updates, bug fixes, enhancements, and new features that can improve your work quality and productivity. You can also access online services such as cloud storage, collaboration tools, customer support, and more.</li>
20
- <li><b>It is ethical.</b> Using the official version of CorelDRAW Graphics Suite 2020 means that you are supporting the developers who created the software. You are also investing in your own learning and development with a legitimate and professional software.</li>
21
- </ul>
22
- <h2>How to Download and Install CorelDRAW Graphics Suite 2020 Official Version</h2>
23
- <p>If you are convinced that using the official version of Corel</p> ddb901b051<br />
24
- <br />
25
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Creatink V1.0.6 ? Multi-Concept Responsive WordPress Theme ((EXCLUSIVE)).md DELETED
@@ -1,67 +0,0 @@
1
- <br />
2
- <h1>Creatink v1.0.6 – Multi-Concept Responsive WordPress Theme Review</h1>
3
- <p>If you are looking for a powerful, versatile, and creative WordPress theme for your website, you might want to check out <strong>Creatink</strong>. Creatink is an awesome looking, multipurpose WordPress theme that comes with various UI elements and countless features. You can create a unique and productive website with the help of tons of options and tools. This theme will be a great solution for business owners, artists, photographers, creative agencies, digital studios, personal freelancers, and anyone who wants to showcase their work in a stunning way.</p>
4
- <p>In this article, we will review Creatink v1.0.6 – Multi-Concept Responsive WordPress Theme and show you how it can help you create an amazing website that stands out from the crowd. We will cover the following topics:</p>
5
- <h2>Creatink v1.0.6 – Multi-Concept Responsive WordPress Theme</h2><br /><p><b><b>Download</b> &rarr; <a href="https://byltly.com/2uKxEh">https://byltly.com/2uKxEh</a></b></p><br /><br />
6
- <ul>
7
- <li>How to install and customize Creatink?</li>
8
- <li>What are the different demos and elements of Creatink?</li>
9
- <li>How to optimize your website with Creatink?</li>
10
- </ul>
11
- <p>By the end of this article, you will have a clear idea of what Creatink can do for you and why you should choose it for your website. So let's get started!</p>
12
- <h2>How to install and customize Creatink?</h2>
13
- <p>One of the best things about Creatink is that it is very easy to install and customize. You don't need any coding skills or technical knowledge to set up your website with this theme. Here are the steps you need to follow:</p>
14
- <h3>How to download and activate Creatink?</h3>
15
- <p>To download Creatink, you need to purchase it from <a href="(^1^)">ThemeForest</a>, where it costs $59 for a regular license. This license includes 6 months of support from the developer team, future updates, quality checks, and access to all features and plugins. You can also extend your support to 12 months for an extra $17.63.</p>
16
- <p>After purchasing the theme, you will receive a zip file containing the theme files and documentation. You need to unzip the file and upload the creatink.zip file to your WordPress dashboard. Go to Appearance > Themes > Add New > Upload Theme and select the file. Then click on Install Now and Activate the theme.</p>
17
- <h3>How to use WordPress Live Customizer and Visual Composer?</h3>
18
- <p>Creatink comes with two powerful tools that will help you customize your website with ease: WordPress Live Customizer and Visual Composer. WordPress Live Customizer allows you to change various aspects of your website, such as colors, fonts, logos, menus, widgets, and more. You can preview the changes in real-time and save them when you are satisfied. To access the Live Customizer, go to Appearance > Customize and explore the options.</p>
19
- <p>Visual Composer is a drag-and-drop page builder that lets you create stunning pages with no coding required. You can choose from hundreds of elements and templates, and arrange them according to your preferences. You can also edit the content, style, and animation of each element with a few clicks. To use Visual Composer, go to Pages > Add New and click on Backend Editor or Frontend Editor. Then start building your page with the available elements.</p>
20
- <h3>How to change color schemes, fonts, headers, footers, and layouts?</h3>
21
- <p>Creatink gives you complete control over the appearance of your website. You can change the color schemes, fonts, headers, footers, and layouts of your website with a few simple steps. Here is how:</p>
22
- <ul>
23
- <li>To change the color schemes, go to Appearance > Customize > Colors and choose from the predefined colors or use the color picker to select your own colors.</li>
24
- <li>To change the fonts, go to Appearance > Customize > Typography and choose from hundreds of Google Fonts for your body text, headings, menus, and buttons.</li>
25
- <li>To change the headers, go to Appearance > Customize > Header Options and choose from 10 different header styles. You can also customize the logo, menu, social icons, search bar, and sticky header.</li>
26
- <li>To change the footers, go to Appearance > Customize > Footer Options and choose from 6 different footer styles. You can also customize the widgets, columns, background image, and copyright text.</li>
27
- <li>To change the layouts, go to Appearance > Customize > Layout Options and choose from 4 different layout styles: wide, boxed, framed, or bordered. You can also customize the background image, color, or pattern for each layout.</li>
28
- </ul> <h2>What are the different demos and elements of Creatink?</h2>
29
- <p>Another great thing about Creatink is that it comes with a huge collection of demos and elements that you can use to create your website. You can choose from 40+ different demos for various niches and purposes, such as agency, business, portfolio, blog, shop, photography, and more. You can also use the amazing slider revolution and social feed plugins to add some dynamic and interactive features to your website. Moreover, you can create stunning portfolios and blogs with various options and styles. Let's take a look at some of the demos and elements of Creatink:</p>
30
- <p></p>
31
- <h3>How to choose from 40+ different demos for various niches and purposes?</h3>
32
- <p>Creatink offers you a wide range of demos that you can import with one click and customize according to your needs. You can browse the demos from the <a href="">Creatink website</a> and see how they look on different devices. You can also preview the demos from your WordPress dashboard by going to Appearance > Import Demo Data and clicking on the Preview button. To import a demo, simply click on the Import button and wait for the process to complete. You can then edit the content, images, and settings of the demo as you wish.</p>
33
- <h3>How to use the amazing slider revolution and social feed plugins?</h3>
34
- <p>Creatink comes with two premium plugins that will enhance your website: Slider Revolution and Social Feed. Slider Revolution is a powerful plugin that lets you create beautiful sliders, carousels, hero scenes, and animations with ease. You can choose from hundreds of templates or create your own from scratch. You can also add layers, effects, transitions, and parallax to your sliders. To use Slider Revolution, go to Slider Revolution > New Slider and start creating your slider with the available options.</p>
35
- <p>Social Feed is a plugin that lets you display your social media posts from various platforms, such as Facebook, Twitter, Instagram, YouTube, Pinterest, and more. You can create a grid, list, or carousel layout for your social feed and customize the appearance and behavior of each element. You can also filter, sort, and search your social feed by keywords, hashtags, or usernames. To use Social Feed, go to Social Feed > Add New Feed and start creating your feed with the available options.</p>
36
- <h3>How to create stunning portfolios and blogs with various options and styles?</h3>
37
- <p>Creatink also allows you to showcase your work and share your stories with stunning portfolios and blogs. You can create unlimited portfolios and blogs with various options and styles, such as grid, masonry, metro, carousel, slider, lightbox, video, audio, gallery, and more. You can also customize the columns, gaps, filters, pagination, hover effects, and animations of each portfolio or blog. To create a portfolio or blog, go to Portfolio > Add New or Posts > Add New and start creating your portfolio or blog with the available options.</p> <h2>How to optimize your website with Creatink?</h2>
38
- <p>The last thing we want to talk about is how Creatink can help you optimize your website for better performance and user experience. Creatink is designed to make your website SEO-friendly and fast-loading, as well as responsive and compatible with any device. You can also get top-notch support and free updates with Creatink. Here is how:</p>
39
- <h3>How to make your website SEO-friendly and fast-loading with Creatink?</h3>
40
- <p>Creatink is built with SEO in mind, meaning that it follows the best practices and standards for search engine optimization. It has clean and semantic code, proper heading tags, schema markup, breadcrumbs, and social media integration. It also supports Yoast SEO plugin, which is one of the most popular and powerful plugins for SEO. You can use Yoast SEO to optimize your titles, meta descriptions, keywords, sitemaps, and more.</p>
41
- <p>Creatink is also optimized for speed, meaning that it loads faster and consumes less resources. It has a lightweight and modular framework, minified CSS and JS files, lazy loading images, and cache plugins compatibility. It also supports WP Rocket plugin, which is one of the best plugins for speed optimization. You can use WP Rocket to improve your page loading time, caching, compression, minification, and more.</p>
42
- <h3>How to ensure your website is responsive and compatible with any device?</h3>
43
- <p>Creatink is fully responsive and mobile-friendly, meaning that it adapts to any screen size and resolution. It has a fluid and flexible layout, retina-ready graphics, touch-enabled sliders, and responsive menus. It also supports WPBakery Page Builder plugin, which is one of the best plugins for creating responsive websites. You can use WPBakery Page Builder to create custom layouts for different devices and breakpoints.</p>
44
- <p>Creatink is also compatible with any browser and platform, meaning that it works flawlessly on any device and operating system. It has cross-browser compatibility, RTL support, translation readiness, and WPML plugin compatibility. It also supports WooCommerce plugin, which is one of the best plugins for creating online shops. You can use WooCommerce to sell your products or services on your website with ease.</p>
45
- <h3>How to get top-notch support and free updates with Creatink?</h3>
46
- <p>Creatink comes with a dedicated and professional support team that will help you with any issues or questions you might have with the theme. You can contact them via email or through the <a href="">support forum</a>. They will respond to you within 24 hours and provide you with solutions or guidance. You can also access the <a href="">online documentation</a> that covers all the aspects of the theme in detail.</p>
47
- <p>Creatink also comes with free lifetime updates that will keep your theme up to date with the latest features and improvements. You can update your theme automatically from your WordPress dashboard or manually by downloading the latest version from ThemeForest. You will also receive notifications whenever a new update is available.</p>
48
- <h2>Conclusion</h2>
49
- <p>In conclusion, Creatink is a fantastic WordPress theme that can help you create a stunning website that suits your needs and goals. It has a lot of features and options that will make your website unique, creative, and productive. You can easily install and customize Creatink with no coding required. You can also choose from 40+ different demos and elements that will give you a head start on your website creation. Moreover, you can optimize your website with Creatink for better performance and user experience. You can also get top-notch support and free updates with Creatink.</p>
50
- <p>If you are looking for a powerful, versatile, and creative WordPress theme for your website, you should definitely give Creatink a try. You will not regret it!</p>
51
- <p>To buy Creatink v1.0.6 – Multi-Concept Responsive WordPress Theme, click on the button below:</p>
52
- <a href="" >Buy Creatink Now</a>
53
- <h2>FAQs</h2>
54
- <ul>
55
- <li><strong>What are the requirements for using Creatink?</strong></li>
56
- <p>To use Creatink, you need to have WordPress 4.7 or higher installed on your website. You also need to have PHP 5.6 or higher and MySQL 5.6 or higher on your server. You also need to have enough memory limit and max execution time for your website.</p>
57
- <li><strong>How much does Creatink cost and what are the payment options?</strong></li>
58
- <p>Creatink costs $59 for a regular license that includes 6 months of support from the developer team, future updates, quality checks, and access to all features and plugins. You can also extend your support to 12 months for an extra $17.63. You can pay with any major credit card, PayPal, or Skrill. You can also use Envato Credits or Envato Tokens to pay for your purchase.</p>
59
- <li><strong>How can I get help if I have any issues or questions with Creatink?</strong></li>
60
- <p>If you have any issues or questions with Creatink, you can contact the support team via email at [email protected] or through the <a href="">support forum</a>. They will respond to you within 24 hours and provide you with solutions or guidance. You can also access the <a href="">online documentation</a> that covers all the aspects of the theme in detail. You can also check the <a href="">video tutorials</a> that show you how to use the theme step by step.</p>
61
- <li><strong>Can I use Creatink for multiple websites or projects?</strong></li>
62
- <p>Yes, you can use Creatink for multiple websites or projects, as long as you have a valid license for each website or project. You can buy multiple licenses from ThemeForest or extend your existing license to cover more websites or projects. You can also transfer your license to another website or project, as long as you deactivate the theme from the previous one.</p>
63
- <li><strong>Can I customize Creatink to suit my brand identity and preferences?</strong></li>
64
- <p>Yes, you can customize Creatink to suit your brand identity and preferences. You can change the color schemes, fonts, headers, footers, layouts, and more with the WordPress Live Customizer and Visual Composer. You can also create your own custom elements and templates with Visual Composer. You can also use child themes and custom CSS to make further changes to the theme.</p>
65
- </ul></p> b2dd77e56b<br />
66
- <br />
67
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Adobe Photoshop Elements 10 (serial Crack) Crack.md DELETED
@@ -1,7 +0,0 @@
1
- <br />
2
- <p>* * * # opening documents when youre ready to open a document, click the open files button. alternatively, you can drag a file onto the open window. double-click on a document to open it. click on a document in the elements window to open it in photoshop.</p>
3
- <p> <strong>the edge of tomorrow</strong> opens with a brilliant prologue that covers the same ground as the movie in a few quick scenes, creating a reality-universe-in-distress story line. but this time, we're not just watching a war from the sidelines, we're seeing one from the safety of a little ball that floats away from the chaos. it's like a video game, so we're having the kind of grand-scale war we've only seen in video games. the cgi is so gorgeous, the set design so clean and confident, that we're not even thinking about the lack of effects work -- it looks too perfect to be real. it's a film that uses cinematic technique to create a reality that feels very real. it's obvious that <strong>adobe photoshop elements 10 (serial crack)</strong> costar tom cruise did all of his own stunts. but he's not beating himself up about it. in fact, he's not doing any of them at all. </p>
4
- <h2>Adobe Photoshop Elements 10 (serial Crack) crack</h2><br /><p><b><b>Download File</b> &#8230; <a href="https://imgfil.com/2uxX75">https://imgfil.com/2uxX75</a></b></p><br /><br />
5
- <p> <strong>the edge of tomorrow</strong> stars tom cruise as a soldier called major william cage, who is trapped in a time loop in which the day he is killed over and over again until he can learn how to fight the invading alien force. <strong>adobe photoshop elements 10 (serial crack)</strong> chris hemsworth plays major jeffery henderson, a man who is trapped in the same loop, and the two must team up to beat back the alien invasion. </p> 899543212b<br />
6
- <br />
7
- <br />
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Autodata Cd 3 Cd Code.md DELETED
@@ -1,11 +0,0 @@
1
- <h2>autodata cd 3 cd code</h2><br /><p><b><b>DOWNLOAD</b> ->->->-> <a href="https://imgfil.com/2uy16c">https://imgfil.com/2uy16c</a></b></p><br /><br />
2
-
3
- AUTODATA CD3 2012 IS VALID FOR LIFE. WORKS ON XP. VISTA. WINDOWS 7. ALL PRODUCTIONS, BEFORE 2012. FAULT CODES, KEY PROGRAMMING, TIMING BELTS, REPAIR TIME. BEWARE OF FAKE! AUTO DATA, INSTALLATION, PROGRAMMING, REPAIR, LIST OF FAULT CODES. AVAILABLE, HAVE ALL ECU PROGRAMMING, CAR REPAIR LIST.
4
- FOR BUYERS FROM KAZAKHSTAN. SHIPPING IS FREE. FOR KAZAKHSTAN.
5
- CARS, MOTORCYCLES, TRUCKS. CALL FOR ALL QUESTIONS.
6
- + 770 - Show number - , + 777 - Show number - .
7
- + 770 - Show number - .
8
- AUTO SETUP, AUTO SETUP CODE, FAULT CODE LIST, ECU FIRMWARE 8a78ff9644<br />
9
- <br />
10
- <br />
11
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Download Ebuddy Java Facebook.md DELETED
@@ -1,93 +0,0 @@
1
-
2
- <h1>Download eBuddy Java Facebook: The Ultimate Guide</h1>
3
- <p>If you are looking for a way to chat with your friends on Facebook, MSN, Yahoo!, Orkut, AIM, ICQ and MySpace from your Java phone, then you should download eBuddy Java Facebook. This is the most popular free IM app for your mobile, with more than 100 million downloads to date. In this article, we will show you how to download eBuddy Java Facebook, how to use its features and why it is the best choice for your chatting needs.</p>
4
- <h2>download ebuddy java facebook</h2><br /><p><b><b>Download Zip</b> &#9193; <a href="https://imgfil.com/2uxYDM">https://imgfil.com/2uxYDM</a></b></p><br /><br />
5
- <h2>How to Download eBuddy Java Facebook</h2>
6
- <p>Downloading eBuddy Java Facebook is very easy and fast. You just need to follow these simple steps:</p>
7
- <ol>
8
- <li>Go to <a href="https://phoneky.com/java-software/?id=a9a25026">PHONEKY</a>, a website that offers free Java apps and games for your phone.</li>
9
- <li>Search for "eBuddy Mobile Messenger 2.3.1" or click on this <a href="https://phoneky.com/java-software/?id=a9a25026">link</a>.</li>
10
- <li>Select your phone model and screen size.</li>
11
- <li>Click on the "Download" button and save the file on your phone.</li>
12
- <li>Open the file and install the app on your phone.</li>
13
- <li>Launch the app and sign in with your Facebook account or create a new eBuddy account.</li>
14
- </ol>
15
- <p>Congratulations! You have successfully downloaded eBuddy Java Facebook on your phone. Now you can start chatting with your friends on various platforms.</p>
16
- <h2>How to Use eBuddy Java Facebook Features</h2>
17
- <p>eBuddy Java Facebook has many features that make it a great app for chatting. Here are some of them:</p>
18
- <ul>
19
- <li>You can chat with your friends on Facebook, MSN, Yahoo!, Orkut, AIM, ICQ and MySpace from one app.</li>
20
- <li>You can send pictures from your phone camera or memory to your friends.</li>
21
- <li>You can set your display picture from your phone camera or gallery.</li>
22
- <li>You can choose colorful themes to customize your app.</li>
23
- <li>You can enjoy the new and improved Facebook Chat and follow all your Facebook updates.</li>
24
- <li>You can switch between multiple chats and manage your contact list and groups.</li>
25
- <li>You can use emoticons and shortcuts to express yourself better.</li>
26
- </ul>
27
- <p>eBuddy Java Facebook is designed to be user-friendly and easy to use. You just need to tap on the screen to access the menu, select a contact or a chat, type a message or send a picture. You can also adjust the settings according to your preferences.</p>
28
- <p></p>
29
- <h2>Why Download eBuddy Java Facebook</h2>
30
- <p>There are many reasons why you should download eBuddy Java Facebook for your Java phone. Here are some of them:</p>
31
- <ul>
32
- <li>It is free and safe to download and use.</li>
33
- <li>It is compatible with most Java phones and supports various screen sizes.</li>
34
- <li>It is fast and reliable and does not consume much data or battery.</li>
35
- <li>It is fun and entertaining and lets you chat with your friends anytime and anywhere.</li>
36
- <li>It is updated regularly and offers new features and improvements.</li>
37
- </ul>
38
- <p>eBuddy Java Facebook is the ultimate IM app for your Java phone. It lets you chat with your friends on multiple platforms, send pictures, choose themes, enjoy Facebook Chat and more. Download eBuddy Java Facebook today and discover a new way of chatting!</p>
39
- <h2>Download eBuddy Java Facebook: The Benefits</h2>
40
- <p>By downloading eBuddy Java Facebook, you can enjoy many benefits that will enhance your chatting experience. Here are some of them:</p>
41
- <ul>
42
- <li>You can save money and data by using one app instead of multiple apps for different platforms.</li>
43
- <li>You can stay connected with your friends and family across the world without any hassle.</li>
44
- <li>You can chat in real-time and get instant notifications of new messages and updates.</li>
45
- <li>You can share your moments and feelings with your friends by sending pictures and emoticons.</li>
46
- <li>You can express your personality and mood by choosing themes and display pictures.</li>
47
- </ul>
48
- <p>Download eBuddy Java Facebook today and enjoy these benefits and more!</p>
49
- <h2>Download eBuddy Java Facebook: The Reviews</h2>
50
- <p>Don't just take our word for it. See what other users have to say about eBuddy Java Facebook. Here are some of the reviews from PHONEKY, a website that offers free Java apps and games for your phone:</p>
51
- <blockquote>
52
- <p>"This app is awesome. I can chat with all my friends on different platforms. It is fast and easy to use. I love it." - 5 stars by John</p>
53
- <p>"I like this app very much. It is very useful and fun. I can send pictures and choose themes. It is the best IM app for Java phones." - 5 stars by Mary</p>
54
- <p>"This app is amazing. It has improved a lot. The new Facebook Chat is great. I can follow all my Facebook updates. It is very reliable and stable." - 5 stars by David</p>
55
- </blockquote>
56
- <p>Download eBuddy Java Facebook today and join the millions of satisfied users!</p>
57
- <h2>Download eBuddy Java Facebook: The Alternatives</h2>
58
- <p>Although eBuddy Java Facebook is a great app for chatting, you may want to try some other alternatives that offer similar or different features. Here are some of them:</p>
59
- <ul>
60
- <li><a href="https://phoneky.com/java-software/?id=a9a48271">eBuddy Messenger</a>: This is a newer version of eBuddy Java Facebook that supports more platforms and has more features. You can chat on Facebook, MSN, Yahoo!, Orkut, AIM, ICQ, MySpace, Hyves and Google Talk. You can also send pictures from memory, choose your color theme, and enjoy the improved stability and reliability.</li>
61
- <li><a href="https://soundcloud.com/plicarfadi1981/download-ebuddy-java-facebook">Download Ebuddy Java Facebook</a>: This is an audiobook that explains how to download eBuddy Java Facebook and use its features. You can listen to it on SoundCloud desktop and mobile. It is a good option if you prefer audio over text.</li>
62
- <li><a href="https://soundcloud.com/collumqadary9/download-ebuddy-java-facebook-top">Download Ebuddy Java Facebook [TOP]</a>: This is another audiobook that gives you the top tips and tricks for using eBuddy Java Facebook. You can also listen to it on SoundCloud desktop and mobile. It is a good option if you want to learn more about the app.</li>
63
- </ul>
64
- <p>Download eBuddy Java Facebook today and compare it with these alternatives!</p>
65
- <h2>Download eBuddy Java Facebook: The Conclusion</h2>
66
- <p>In conclusion, eBuddy Java Facebook is the ultimate IM app for your Java phone. It lets you chat with your friends on multiple platforms, send pictures, choose themes, enjoy Facebook Chat and more. It is free, safe, compatible, fast, reliable, fun and entertaining. It is updated regularly and offers new features and improvements. It has many benefits and positive reviews from users. It also has some alternatives that you can try if you want to explore other options. Download eBuddy Java Facebook today and discover a new way of chatting!</p>
67
- <h2>Download eBuddy Java Facebook: The FAQs</h2>
68
- <p>You may have some questions about eBuddy Java Facebook and how to use it. Here are some of the frequently asked questions and their answers:</p>
69
- <dl>
70
- <dt>Q: Is eBuddy Java Facebook compatible with my phone?</dt>
71
- <dd>A: eBuddy Java Facebook is compatible with most Java phones and supports various screen sizes. You can check the compatibility of your phone model and screen size on PHONEKY, a website that offers free Java apps and games for your phone.</dd>
72
- <dt>Q: How much data and battery does eBuddy Java Facebook consume?</dt>
73
- <dd>A: eBuddy Java Facebook is designed to be fast and reliable and does not consume much data or battery. However, the actual consumption may vary depending on your network, phone settings and usage.</dd>
74
- <dt>Q: How can I update eBuddy Java Facebook to the latest version?</dt>
75
- <dd>A: You can update eBuddy Java Facebook to the latest version by downloading it again from PHONEKY or from the official website of eBuddy. You can also check for updates within the app by going to Menu > Settings > About > Check for updates.</dd>
76
- <dt>Q: How can I contact eBuddy Java Facebook support?</dt>
77
- <dd>A: You can contact eBuddy Java Facebook support by sending an email to [email protected] or by visiting the official website of eBuddy and filling out the contact form.</dd>
78
- </dl>
79
- <p>Download eBuddy Java Facebook today and get answers to your questions!</p>
80
- <h2>Download eBuddy Java Facebook: The Summary</h2>
81
- <p>To summarize, here are the main points of this article:</p>
82
- <ul>
83
- <li>eBuddy Java Facebook is the ultimate IM app for your Java phone that lets you chat with your friends on multiple platforms, send pictures, choose themes, enjoy Facebook Chat and more.</li>
84
- <li>You can download eBuddy Java Facebook for free and safely from PHONEKY, a website that offers free Java apps and games for your phone.</li>
85
- <li>You can use eBuddy Java Facebook features such as chatting on various platforms, sending pictures from memory or camera, choosing themes and display pictures, enjoying Facebook Chat and updates, switching between chats and managing contacts and groups, and using emoticons and shortcuts.</li>
86
- <li>You can enjoy many benefits by downloading eBuddy Java Facebook such as saving money and data, staying connected with friends and family, chatting in real-time, sharing moments and feelings, expressing personality and mood, and having fun and entertainment.</li>
87
- <li>You can also try some alternatives to eBuddy Java Facebook that offer similar or different features such as eBuddy Messenger, Download Ebuddy Java Facebook audiobook, and Download Ebuddy Java Facebook [TOP] audiobook.</li>
88
- <li>You can find answers to some of the frequently asked questions about eBuddy Java Facebook such as compatibility, data and battery consumption, updates, and support.</li>
89
- </ul>
90
- <p>Download eBuddy Java Facebook today and discover a new way of chatting!</p>
91
- <p>Download eBuddy Java Facebook today and discover a new way of chatting!</p> 3cee63e6c2<br />
92
- <br />
93
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Epic Race 3D and Compete with Other Players Online.md DELETED
@@ -1,125 +0,0 @@
1
-
2
- <h1>How to Download and Play Epic Race 3D on Your PC</h1>
3
- <p>Do you love parkour games? Do you want to experience the thrill of running, jumping, sliding, and dodging obstacles in a 3D environment? If yes, then you should try Epic Race 3D, a fun and challenging game that will test your skills and reflexes. In this article, we will show you how to download and play Epic Race 3D on your PC, as well as some tips and tricks to help you win the races.</p>
4
- <h2>What is Epic Race 3D?</h2>
5
- <p>Epic Race 3D is a game developed by Good Job Games, the creators of Run Race 3D and Fun Race 3D. It is a parkour game that lets you compete with other players in various levels with different obstacles. You have to complete the parkours as fast as possible, while avoiding falling or crashing into anything. You can also collect coins and unlock new characters and skins.</p>
6
- <h2>download epic race 3d</h2><br /><p><b><b>Download File</b> &#9745; <a href="https://jinyurl.com/2uNKZy">https://jinyurl.com/2uNKZy</a></b></p><br /><br />
7
- <h3>A fun and challenging parkour game</h3>
8
- <p>Epic Race 3D is not just a simple running game. It is a game that requires you to have good timing, rhythm, and coordination. Each level has its own theme and design, with different types of obstacles that you have to overcome. Some of them are easy, while others are tricky and require precise movements. For example, you may have to jump over gaps, slide under bars, swing on ropes, balance on beams, or avoid moving spikes. You have to be careful not to fall off the platforms or hit any obstacles, or else you will lose time or even get eliminated.</p>
9
- <h3>Features of Epic Race 3D</h3>
10
- <p>Some of the features of Epic Race 3D are:</p>
11
- <ul>
12
- <li>Realistic parkour animations and physics</li>
13
- <li>Simple and fast controls</li>
14
- <li>Dozens of levels with various themes and obstacles</li>
15
- <li>Multiple characters and skins to choose from</li>
16
- <li>Epic levels that change the way you play</li>
17
- <li>Online and offline modes</li>
18
- <li>Leaderboards and achievements</li>
19
- </ul>
20
- <h2>Why play Epic Race 3D on PC?</h2>
21
- <p>Epic Race 3D is a great game to play on your mobile device, but it can also be enjoyed on your PC. Playing on PC has some advantages that can enhance your gaming experience. Here are some of them:</p>
22
- <h3>Advantages of playing on PC</h3>
23
- <ul>
24
- <li>Bigger screen size: You can see the details of the graphics and animations better on a larger screen. You can also have a wider view of the level and the obstacles ahead.</li>
25
- <li>Better performance: You can avoid lagging or crashing issues that may occur on some mobile devices. You can also adjust the settings to optimize the game for your PC.</li>
26
- <li>Easier controls: You can use your keyboard and mouse to control your character more comfortably and accurately. You can also customize the key mapping to suit your preferences.</li>
27
- <li>More storage space: You can save more data and progress on your PC without worrying about running out of space or losing your data.</li>
28
- </ul>
29
- <h <h3>Requirements for playing on PC</h3>
30
- <p>To play Epic Race 3D on your PC, you need to have the following requirements:</p>
31
- <ul>
32
- <li>A Windows PC with at least 2 GB of RAM and 4 GB of free disk space</li>
33
- <li>An internet connection for downloading and playing online</li>
34
- <li>An emulator software that can run Android apps on your PC</li>
35
- </ul>
36
- <h2>How to download and install Epic Race 3D on PC?</h2>
37
- <p>Now that you know the benefits and requirements of playing Epic Race 3D on PC, you may be wondering how to do it. The process is actually quite simple and easy. All you need to do is follow these steps:</p>
38
- <p>download epic race 3d apk<br />
39
- download epic race 3d mod apk<br />
40
- download epic race 3d for pc<br />
41
- download epic race 3d game<br />
42
- download epic race 3d app<br />
43
- download epic race 3d android<br />
44
- download epic race 3d ios<br />
45
- download epic race 3d online<br />
46
- download epic race 3d free<br />
47
- download epic race 3d latest version<br />
48
- download epic race 3d hack<br />
49
- download epic race 3d unlimited money<br />
50
- download epic race 3d from google play<br />
51
- download epic race 3d from app store<br />
52
- download epic race 3d for windows<br />
53
- download epic race 3d for mac<br />
54
- download epic race 3d for laptop<br />
55
- download epic race 3d for tablet<br />
56
- download epic race 3d for chromebook<br />
57
- download epic race 3d for iphone<br />
58
- download epic race 3d for ipad<br />
59
- download epic race 3d for ipod touch<br />
60
- download epic race 3d offline<br />
61
- download epic race 3d without ads<br />
62
- download epic race 3d with cheats<br />
63
- download epic race 3d with all characters unlocked<br />
64
- download epic race 3d with new levels<br />
65
- download epic race 3d with parkour experience<br />
66
- download epic race 3d by good job games<br />
67
- download epic race 3d by run race 3d developers<br />
68
- how to download epic race 3d on pc<br />
69
- how to download epic race 3d on mac<br />
70
- how to download epic race 3d on laptop<br />
71
- how to download epic race 3d on tablet<br />
72
- how to download epic race 3d on chromebook<br />
73
- how to download epic race 3d on iphone<br />
74
- how to download epic race 3d on ipad<br />
75
- how to download epic race 3d on ipod touch<br />
76
- how to download epic race 3d without ads<br />
77
- how to download epic race 3d with cheats<br />
78
- where to download epic race 3d apk<br />
79
- where to download epic race 3d mod apk<br />
80
- where to download epic race 3d hack<br />
81
- where to download epic race 3d unlimited money<br />
82
- where to download epic race 3d latest version<br />
83
- where to download epic race 3d for pc<br />
84
- where to download epic race 3d for mac<br />
85
- where to download epic race 3d for laptop<br />
86
- where to download epic race 3d for tablet</p>
87
- <h3>Step 1: Download an emulator</h3>
88
- <p>An emulator is a software that can mimic the Android operating system on your PC. This way, you can run any Android app or game on your PC as if you were using a mobile device. There are many emulators available online, but we recommend using BlueStacks, as it is one of the most popular and reliable ones. You can download BlueStacks from its official website for free.</p>
89
- <h3>Step 2: Install the emulator</h3>
90
- <p>Once you have downloaded the emulator, you need to install it on your PC. The installation process is very simple and straightforward. Just follow the instructions on the screen and agree to the terms and conditions. It may take a few minutes for the installation to complete, depending on your PC's specifications.</p>
91
- <h3>Step 3: Launch the emulator and search for Epic Race 3D</h3>
92
- <p>After the installation is done, you can launch the emulator by clicking on its icon on your desktop or start menu. You will see a window that looks like an Android device, with various apps and icons. To search for Epic Race 3D, you can use the search bar on the top right corner of the window. Type in "Epic Race 3D" and hit enter. You will see a list of results from the Google Play Store.</p>
93
- <h3>Step 4: Download and install Epic Race 3D</h3>
94
- <p>From the list of results, click on the one that says "Epic Race 3D" by Good Job Games. You will be taken to the app's page on the Google Play Store. Here, you can see more information about the game, such as its description, screenshots, reviews, and ratings. To download and install the game, just click on the green "Install" button. The game will start downloading and installing automatically.</p>
95
- <h3>Step 5: Enjoy the game on your PC</h3>
96
- <p>Congratulations! You have successfully downloaded and installed Epic Race 3D on your PC. You can now enjoy the game on your bigger screen, with better performance and easier controls. To launch the game, just click on its icon on the emulator's home screen or app drawer. You can also create a shortcut on your desktop for easier access.</p>
97
- <h2>Tips and tricks for playing Epic Race 3D on PC</h2>
98
- <p>Epic Race 3D is a fun and addictive game that will keep you entertained for hours. However, it can also be challenging and frustrating at times, especially when you face tough opponents or tricky obstacles. To help you improve your skills and win more races, here are some tips and tricks that you can use:</p>
99
- <h3>Customize your character and unlock new skins</h3>
100
- <p>One of the cool things about Epic Race 3D is that you can customize your character's appearance and style. You can choose from different colors, outfits, hats, glasses, shoes, and more. You can also unlock new skins by collecting coins or completing achievements. Some of the skins are funny, cute, or cool, while others are based on popular characters or celebrities. For example, you can unlock skins like Spider-Man, Iron Man, Batman, Superman, Harry Potter, Donald Trump, Kim Jong-un, etc. Customizing your character and unlocking new skins can make the game more fun and interesting.</p>
101
- <h3>Learn the timing and rhythm of each obstacle</h3>
102
- <p>Epic Race 3D is a game that requires you to have good timing and rhythm. Each obstacle has its own pattern and speed that you have to match in order to pass it safely. For example, some obstacles move up and down, left and right, or rotate in different directions. You have to time your jumps or slides accordingly to avoid hitting them or falling off. You also have to pay attention to the sound effects that indicate when an obstacle is about to move or change direction. Learning the timing and rhythm of each obstacle can help you avoid mistakes and save time.</p>
103
- <h [user](#message <h3>Use the boosters and power-ups wisely</h3>
104
- <p>Epic Race 3D also has some boosters and power-ups that can help you gain an edge over your opponents. For example, you can use the rocket booster to fly over obstacles, the magnet to attract coins, the shield to protect yourself from collisions, or the slow motion to reduce the speed of the obstacles. However, these boosters and power-ups are not always available and have a limited duration. You have to use them wisely and strategically, depending on the situation and the level. For instance, you may want to save the rocket booster for the final stretch of the race, or use the slow motion when you face a complex obstacle.</p>
105
- <h3>Compete with other players online and offline</h3>
106
- <p>Epic Race 3D is a game that can be played both online and offline. When you play online, you can compete with other players from around the world in real-time. You can see their names, countries, and ranks on the screen. You can also chat with them before and after the race. Playing online can be more exciting and challenging, as you can test your skills against different players with different styles and strategies. However, playing online also requires a stable internet connection and may consume more data.</p>
107
- <p>When you play offline, you can compete with computer-controlled players or bots. You can choose from different difficulty levels, ranging from easy to hard. Playing offline can be more relaxing and convenient, as you can play anytime and anywhere without worrying about your internet connection or data usage. However, playing offline may also be less rewarding and satisfying, as you may not feel the same thrill and competition as playing online.</p>
108
- <h2>Conclusion</h2>
109
- <p>Epic Race 3D is a game that will keep you entertained for hours with its fun and challenging parkour levels. You can download and play it on your PC using an emulator software like BlueStacks. Playing on PC has some advantages that can enhance your gaming experience, such as bigger screen size, better performance, easier controls, and more storage space. You can also use some tips and tricks to improve your skills and win more races, such as customizing your character, learning the timing and rhythm of each obstacle, using the boosters and power-ups wisely, and competing with other players online and offline. Epic Race 3D is a game that you should definitely try if you love parkour games.</p>
110
- <h2>FAQs</h2>
111
- <p>Here are some frequently asked questions about Epic Race 3D:</p>
112
- <ul>
113
- <li><b>Is Epic Race 3D free to play?</b><br>
114
- Yes, Epic Race 3D is free to play on both mobile devices and PC. However, it may contain some in-app purchases or ads that may require real money.</li>
115
- <li><b>Can I play Epic Race 3D with my friends?</b><br>
116
- Yes, you can play Epic Race 3D with your friends online. You can invite them to join your race or join their race by using the friend code feature. You can also chat with them before and after the race.</li>
117
- <li><b>How many levels are there in Epic Race 3D?</b><br>
118
- There are dozens of levels in Epic Race 3D, each with its own theme and design. The levels are divided into different categories, such as normal, epic, special, bonus, etc. The levels are also updated regularly with new content and features.</li>
119
- <li><b>How do I unlock new characters and skins in Epic Race 3D?</b><br>
120
- You can unlock new characters and skins in Epic Race 3D by collecting coins or completing achievements. Coins are earned by playing the game or watching ads. Achievements are earned by completing certain tasks or challenges in the game.</li>
121
- <li><b>What are the best emulators for playing Epic Race 3D on PC?</b><br>
122
- There are many emulators for playing Epic Race 3D on PC, but we recommend using BlueStacks, as it is one of the most popular and reliable ones. BlueStacks has many features that can improve your gaming experience, such as high compatibility, fast performance, easy controls, key mapping, multi-instance mode, etc.</li>
123
- </ul></p> 401be4b1e0<br />
124
- <br />
125
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download J Image for PC A Step-by-Step Guide.md DELETED
@@ -1,131 +0,0 @@
1
- <br />
2
- <h1>How to Download J Image</h1>
3
- <p>Do you want to download j image files from the web? J image is a special image format that is used by ImageJ, a free and open source software for image processing and analysis. J image files have many features and benefits that make them suitable for scientific and medical applications. In this article, we will explain what j image is, how to open it, and how to download it from the web.</p>
4
- <h2>download j image</h2><br /><p><b><b>Download</b> &middot; <a href="https://jinyurl.com/2uNUt5">https://jinyurl.com/2uNUt5</a></b></p><br /><br />
5
- <h2>What is J Image?</h2>
6
- <p>J image is a file format that stores images in a compressed and lossless way. It is based on the TIFF (Tagged Image File Format) standard, but it adds some extra features that are specific to ImageJ. Some of these features are:</p>
7
- <ul>
8
- <li>Support for multiple images in a single file, which can be organized as stacks or hyperstacks.</li>
9
- <li>Support for various types of images, such as 8-bit, 16-bit, 32-bit, RGB color, or binary.</li>
10
- <li>Support for metadata, such as calibration, spatial scale, units, color lookup tables, overlays, ROIs (regions of interest), and annotations.</li>
11
- <li>Support for plugins, which can extend the functionality of ImageJ and add support for more formats.</li>
12
- </ul>
13
- <p>J image files have many benefits that make them ideal for scientific and medical imaging. Some of these benefits are:</p>
14
- <ul>
15
- <li>They preserve the original quality and information of the images, without any loss or distortion.</li>
16
- <li>They reduce the file size and save disk space, without compromising the image resolution or bit depth.</li>
17
- <li>They enable fast and easy access to the images, without the need to unzip or decompress them.</li>
18
- <li>They facilitate the analysis and processing of the images, using the powerful tools and features of ImageJ.</li>
19
- </ul>
20
- <h2>How to Open J Image Files</h2>
21
- <p>If you have j image files on your computer or device, you may wonder how to open them and view their contents. There are two main ways to do this:</p>
22
- <h3>Using ImageJ Software</h3>
23
- <p>The best way to open j image files is to use ImageJ software, which is the program that created them in the first place. ImageJ is a free and open source software that can run on Windows, Mac OS X, Linux, and other platforms. You can download it from <a href="(^1^)">https://imagej.net/</a>. To open a j image file with ImageJ, follow these steps:</p>
24
- <p>download imagej for mac<br />
25
- download imagej for windows<br />
26
- download imagej for linux<br />
27
- download imagej with java<br />
28
- download imagej user guide<br />
29
- download imagej source code<br />
30
- download imagej sample images<br />
31
- download imagej zip archive<br />
32
- download imagej plugins<br />
33
- download imagej macros<br />
34
- download imagej lut files<br />
35
- download imagej update<br />
36
- download imagej softonic<br />
37
- download imagej zulu openjdk<br />
38
- download imagej path randomization<br />
39
- how to download imagej on macbook<br />
40
- how to download imagej on windows 10<br />
41
- how to download imagej on ubuntu<br />
42
- how to download imagej with java 8<br />
43
- how to download imagej user manual<br />
44
- how to download imagej from github<br />
45
- how to download imagej stacks<br />
46
- how to download imagej in browser<br />
47
- how to download imagej extensions<br />
48
- how to download imagej scripts<br />
49
- how to download imagej color tables<br />
50
- how to download imagej latest version<br />
51
- how to download imagej softonic safe<br />
52
- how to download imagej for m1 macs<br />
53
- how to download imagej without admin rights<br />
54
- where to download imagej for free<br />
55
- where to download imagej for pc<br />
56
- where to download imagej for android<br />
57
- where to download imagej with java runtime environment<br />
58
- where to download imagej documentation<br />
59
- where to download imagej api javadoc<br />
60
- where to download imagej examples<br />
61
- where to download imagej online<br />
62
- where to download imagej modules<br />
63
- where to download imagej functions<br />
64
- where to download imagej full distribution<br />
65
- where to download imagej new scientist article<br />
66
- where to download imagej for arm processors<br />
67
- where to download imagej with no installation required<br />
68
- why download imagej for scientific analysis<br />
69
- why download imagej for biomedical research <br />
70
- why download imagej for digital processing <br />
71
- why download imagej for open source software <br />
72
- why download imagej for cross-platform compatibility <br />
73
- why download imagej for community support</p>
74
- <ol>
75
- <li>Launch ImageJ on your computer or device.</li>
76
- <li>Go to File > Open and browse to the location of the j image file you want to open.</li>
77
- <li>Select the file and click Open. The file will be displayed in a new window.</li>
78
- <li>You can use the toolbar, menus, and plugins of ImageJ to manipulate, analyze, or export the image as you wish.</li>
79
- </ol>
80
- <h3>Using Online Converters</h3>
81
- <p>If you don't have ImageJ software installed on your computer or device, or if you want to convert j image files to other formats, you can use online converters. These are websites that allow you to upload j image files and convert them to common formats such as JPEG, PNG, GIF, BMP, etc. Some examples of online converters are:</p>
82
- <ul>
83
- <li><a href="(^2^)">https://www.freefileconvert.com/jimage-converter</a></li>
84
- <li><a href="(^3^)">https://www.zamzar.com/convert/jimage-to-jpg/</a></li>
85
- <li><a href="(^4^)">https://onlineconvertfree.com/convert-format/jimage-to-jpg/</a></li>
86
- </ul>
87
- <p>To use an online converter, follow these steps:</p>
88
- <ol>
89
- <li>Go to the website of the online converter you want to use.</li>
90
- <li>Click on Choose File or Browse and select the j image file you want to convert.</li>
91
- <li>Select the output format you want from the drop-down menu or list.</li>
92
- <li>Click on Convert or Start Conversion and wait for the process to finish.</li>
93
- <li>Download the converted file to your computer or device, or share it via email or social media.</li>
94
- </ol>
95
- <h2>How to Download J Image Files from the Web</h2>
96
- <p>If you want to download j image files from the web, you have two options:</p>
97
- <h3>Using a Web Browser</h3>
98
- <p>You can use any web browser, such as Chrome, Firefox, Safari, or Edge, to download j image files from the web. To do this, follow these steps:</p>
99
- <ol>
100
- <li>Go to the website that contains the j image file you want to download.</li>
101
- <li>Right-click on the j image file and select Save Image As or Save Link As.</li>
102
- <li>Choose a location and a name for the file and click Save.</li>
103
- <li>The file will be downloaded to your computer or device.</li>
104
- </ol>
105
- <h3>Using a Download Manager</h3>
106
- <p>If you want to download multiple j image files at once, or if you want to resume interrupted downloads, you can use a download manager. A download manager is a software that can speed up and manage your downloads. Some examples of download managers are:</p>
107
- <ul>
108
- <li><a href="">https://www.internetdownloadmanager.com/</a></li>
109
- <li><a href="">https://www.freedownloadmanager.org/</a></li>
110
- <li><a href="">https://www.flashget.com/en/index.htm</a></li>
111
- </ul>
112
- <p>To use a download manager, follow these steps:</p>
113
- <ol>
114
- <li>Download and install the download manager of your choice on your computer or device.</li>
115
- <li>Launch the download manager and go to its settings or options.</li>
116
- <li>Enable the integration with your web browser, or copy and paste the URLs of the j image files you want to download.</li>
117
- <li>Start the download and wait for it to finish.</li>
118
- <li>The files will be downloaded to your computer or device.</li>
119
- </ol>
120
- <h2>Conclusion</h2>
121
- <p>J image is a powerful and versatile image format that is used by ImageJ software for image processing and analysis. It has many features and benefits that make it suitable for scientific and medical applications. You can open j image files using ImageJ software or online converters, and you can download them from the web using a web browser or a download manager. We hope this article has helped you understand how to download j image files easily and efficiently.</p>
122
- <h2>FAQs</h2>
123
- <ul>
124
- <li><b>What is the difference between j image and TIFF?</b><br>J image is based on TIFF, but it adds some extra features that are specific to ImageJ, such as support for multiple images in a single file, metadata, plugins, etc.</li>
125
- <li><b>Can I edit j image files?</b><br>Yes, you can edit j image files using ImageJ software, which offers a variety of tools and features for image manipulation, enhancement, measurement, segmentation, etc.</li>
126
- <li><b>Can I view j image files on my smartphone or tablet?</b><br>Yes, you can view j image files on your smartphone or tablet using ImageJ for Android or iOS apps, which are available on Google Play Store and App Store respectively.</li>
127
- <li><b>How can I convert j image files to PDF?</b><br>You can convert j image files to PDF using online converters such as <a href="">https://www.zamzar.com/convert/jimage-to-pdf/</a> or <a href="">https://onlineconvertfree.com/convert-format/jimage-to-pdf/</a>.</li>
128
- <li><b>Where can I find more information about j image format?</b><br>You can find more information about j image format on the official website of ImageJ at <a href="">https://imagej.net/JImage_Format</a>.</li>
129
- </ul></p> 197e85843d<br />
130
- <br />
131
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download and Install Word 2017 for Windows 7 Without Paying a Cent.md DELETED
@@ -1,232 +0,0 @@
1
- <br />
2
- <h1>How to Download Word 2017 Free for Windows 7</h1>
3
- <p>Word 2017 is a word processor developed by Microsoft that allows you to create, edit, and share professional-looking documents. Whether you need to write a report, a resume, a letter, or a blog post, Word 2017 can help you with its powerful features and tools.</p>
4
- <h2>download word 2017 free for windows 7</h2><br /><p><b><b>Download</b> &mdash; <a href="https://jinyurl.com/2uNUzv">https://jinyurl.com/2uNUzv</a></b></p><br /><br />
5
- <p>In this article, we will show you how to download Word 2017 free for Windows 7, what are the features and system requirements of Word 2017, how to install and use Word 2017 on Windows 7, and answer some frequently asked questions.</p>
6
- <h2>Features of Word 2017</h2>
7
- <p>Word 2017 is more than just a simple word processor. It offers a range of features that make it easy and convenient to work with documents.</p>
8
- <h3>Editing and formatting options</h3>
9
- <p>With Word 2017, you can edit and format text, images, tables, charts, shapes, and more with ease. You can use the ribbon menu or the context menu to access various commands and options. You can also use keyboard shortcuts or voice commands to perform actions faster.</p>
10
- <p>Some of the editing and formatting options in Word 2017 include:</p>
11
- <p>How to download and use Microsoft Word for free[^1^]<br />
12
- Download Microsoft Word for Windows 7 - Best Software & Apps[^2^]<br />
13
- Microsoft Word 2017 free download full version for Windows 7<br />
14
- How to get Microsoft Word for free on Windows 10, Mac, Chrome OS, and Linux[^1^]<br />
15
- Microsoft Word 2017 free trial download for Windows 7<br />
16
- Microsoft Word 2017 offline installer download for Windows 7<br />
17
- Microsoft Word 2017 product key free download for Windows 7<br />
18
- Microsoft Word 2017 crack free download for Windows 7<br />
19
- Microsoft Word 2017 portable free download for Windows 7<br />
20
- Microsoft Word 2017 setup free download for Windows 7<br />
21
- Download Microsoft Word 2017 for Windows 7 32 bit<br />
22
- Download Microsoft Word 2017 for Windows 7 64 bit<br />
23
- Download Microsoft Word 2017 for Windows 7 with activation key<br />
24
- Download Microsoft Word 2017 for Windows 7 without Microsoft account<br />
25
- Download Microsoft Word 2017 for Windows 7 from official website<br />
26
- How to install Microsoft Word 2017 on Windows 7<br />
27
- How to update Microsoft Word 2017 on Windows 7<br />
28
- How to uninstall Microsoft Word 2017 on Windows 7<br />
29
- How to use Microsoft Word 2017 on Windows 7<br />
30
- How to fix Microsoft Word 2017 not working on Windows 7<br />
31
- Microsoft Word online free download for Windows 7<br />
32
- Microsoft Word app free download for Windows 7<br />
33
- Microsoft Word viewer free download for Windows 7<br />
34
- Microsoft Word document free download for Windows 7<br />
35
- Microsoft Word templates free download for Windows 7<br />
36
- Download latest version of Microsoft Word for Windows 7<br />
37
- Download old version of Microsoft Word for Windows 7<br />
38
- Download previous version of Microsoft Word for Windows 7<br />
39
- Download compatible version of Microsoft Word for Windows 7<br />
40
- Download alternative version of Microsoft Word for Windows 7<br />
41
- Download free version of Microsoft Office for Windows 7<br />
42
- Download free version of WPS Office for Windows 7<br />
43
- Download free version of LibreOffice for Windows 7<br />
44
- Download free version of OpenOffice for Windows 7<br />
45
- Download free version of Google Docs for Windows 7<br />
46
- Compare different versions of Microsoft Word for Windows 7<br />
47
- Compare different word processors for Windows 7<br />
48
- Review of Microsoft Word 2017 features and benefits for Windows 7 users<br />
49
- Review of Microsoft Word alternatives and competitors for Windows 7 users<br />
50
- Review of best practices and tips for using Microsoft Word on Windows 7<br />
51
- Tutorial on how to create and edit documents with Microsoft Word on Windows 7<br />
52
- Tutorial on how to format and style text with Microsoft Word on Windows 7<br />
53
- Tutorial on how to insert and manage images and tables with Microsoft Word on Windows 7<br />
54
- Tutorial on how to use advanced features like web page formatting, instant translation, and 3D model insertion with Microsoft Word on Windows[^1^]<br />
55
- Tutorial on how to collaborate and share documents with others using Microsoft Word on Windows[^1^]<br />
56
- FAQ on how to download and use Microsoft Word for free on various devices[^1^]<br />
57
- FAQ on how to solve common problems and errors with Microsoft Word on Windows[^1^]<br />
58
- FAQ on how to upgrade or downgrade your subscription or license of Microsoft Word or Office[^1^]<br />
59
- FAQ on how to contact customer support or get help from the community regarding Microsoft Word or Office[^1^]</p>
60
- <ul>
61
- <li>Cut, copy, paste, undo, redo</li>
62
- <li>Find and replace</li>
63
- <li>Font, size, color, style, alignment</li>
64
- <li>Bullets, numbers, indents</li>
65
- <li>Headers, footers, page numbers</li>
66
- <li>Styles, themes, templates</li>
67
- <li>Insert pictures, shapes, icons, stickers</li>
68
- <li>Add hyperlinks, bookmarks, cross-references</li>
69
- <li>Create tables, charts, SmartArt</li>
70
- <li>Add footnotes, endnotes, citations</li>
71
- <li>Insert captions, table of contents</li>
72
- <li>Use track changes, comments</li>
73
- <li>Check spelling, grammar</li>
74
- <li>Show word count</li>
75
- <li>Use Smart Lookup</li>
76
- </ul>
77
- <h3>Collaboration and sharing features</h3>
78
- <p>Word 2017 also enables you to collaborate and share your documents with others online Office 365 apps and services, such as Outlook, Excel, PowerPoint, OneNote, and Teams</li>
79
- <li>Use Microsoft Editor, Translator, Researcher, or Designer to enhance your documents</li>
80
- <li>Open and edit documents from older versions of Word or other word processors</li>
81
- <li>Use accessibility features, such as Read Aloud, Dictate, or Immersive Reader</li>
82
- </ul>
83
- <h2>System Requirements for Word 2017</h2>
84
- <p>Before you download Word 2017 for Windows 7, you need to make sure that your computer meets the minimum and recommended system requirements for running the software.</p>
85
- <h3>Hardware requirements</h3>
86
- <p>The hardware requirements for Word 2017 are as follows:</p>
87
- <table>
88
- <tr>
89
- <th>Component</th>
90
- <th>Minimum</th>
91
- <th>Recommended</th>
92
- </tr>
93
- <tr>
94
- <td>CPU</td>
95
- <td>1 GHz or faster processor with SSE2 instruction set</td>
96
- <td>2 GHz or faster processor with SSE2 instruction set</td>
97
- </tr>
98
- <tr>
99
- <td>RAM</td>
100
- <td>2 GB</td>
101
- <td>4 GB or more</td>
102
- </tr>
103
- <tr>
104
- <td>HDD</td>
105
- <td>3 GB of available disk space</td>
106
- <td>4 GB or more of available disk space</td>
107
- </tr>
108
- <tr>
109
- <td>Display</td>
110
- <td>1024 x 768 resolution</td>
111
- <td>1280 x 800 resolution or higher</td>
112
- </tr>
113
- <tr>
114
- <td>Graphics card</td>
115
- <td>DirectX 9 or later, with WDDM 2.0 or higher for Windows 10 (or WDDM 1.3 or higher for Windows 10 Fall Creators Update)</td>
116
- <td>DirectX 10 or later, with WDDM 2.0 or higher for Windows 10 (or WDDM 1.3 or higher for Windows 10 Fall Creators Update)</td>
117
- </tr>
118
- <tr>
119
- <td>Sound card</td <td>N/A</td>
120
- <td>N/A</td>
121
- </tr>
122
- </table>
123
- <h3>Software requirements</h3>
124
- <p>The software requirements for Word 2017 are as follows:</p>
125
- <ul>
126
- <li>Operating system: Windows 7 Service Pack 1 or later</li>
127
- <li>Browser: Microsoft Edge, Internet Explorer 11, Mozilla Firefox, or Google Chrome</li>
128
- <li>.NET Framework: 3.5 or 4.6 and higher</li>
129
- <li>Other software: Some features may require additional or advanced hardware or server connectivity, such as Skype for Business, OneDrive, Exchange Server, SharePoint Server, or Microsoft 365.</li>
130
- </ul>
131
- <h2>Download Options for Word 2017</h2>
132
- <p>There are three main ways to download Word 2017 for Windows 7: through a Microsoft 365 subscription, through a standalone Office Home & Business or Home & Student edition, or through Word Online.</p>
133
- <h3>Microsoft 365 subscription</h3>
134
- <p>A Microsoft 365 subscription is the best way to get Word 2017 and other Office apps, such as Excel, PowerPoint, Outlook, OneNote, and more. You also get access to online services, such as OneDrive, Skype, Teams, and SharePoint. You can choose from different plans and prices depending on your needs and preferences.</p>
135
- <p>Some of the benefits of a Microsoft 365 subscription include:</p>
136
- <ul>
137
- <li>Always have the latest version of Word and other Office apps</li>
138
- <li>Install Word on up to five PCs or Macs, five tablets, and five phones per user</li>
139
- <li>Get 1 TB of cloud storage per user with OneDrive</li>
140
- <li>Get 60 minutes of Skype calls per month per user</li>
141
- <li>Get premium support from Microsoft experts</li>
142
- <li>Get access to exclusive features and updates</li>
143
- </ul>
144
- <p>To download Word 2017 with a Microsoft 365 subscription, you need to:</p>
145
- <ol>
146
- <li>Go to the <a href="">Microsoft 365 website</a> and choose a plan that suits you.</li>
147
- <li>Sign in with your Microsoft account or create one if you don't have one.</li>
148
- <li>Enter your payment details and confirm your purchase.</li <li>Go to the <a href="">Microsoft 365 portal</a> and sign in with your account.</li>
149
- <li>Select Install Office and follow the instructions to download and install Word 2017 on your Windows 7 computer.</li>
150
- </ol>
151
- <h3>Office Home & Business or Home & Student edition</h3>
152
- <p>If you don't want to pay for a monthly or yearly subscription, you can also buy a standalone version of Word 2017 that comes with a perpetual license. This means that you can use Word 2017 as long as you want, but you won't get any updates or online services.</p>
153
- <p>You can choose between two editions: Office Home & Business or Office Home & Student. The main difference is that Office Home & Business includes Outlook, while Office Home & Student does not. Both editions include Word, Excel, PowerPoint, and OneNote.</p>
154
- <p>Some of the benefits of buying a standalone version of Word 2017 include:</p>
155
- <ul>
156
- <li>Pay once and use Word 2017 forever</li>
157
- <li>Install Word on one PC or Mac per license</li>
158
- <li>Get basic support from Microsoft</li>
159
- <li>Get access to some features and updates</li>
160
- </ul>
161
- <p>To download Word 2017 with a standalone version, you need to:</p>
162
- <ol>
163
- <li>Go to the <a href="">Microsoft Store website</a> and choose the edition that suits you.</li>
164
- <li>Sign in with your Microsoft account or create one if you don't have one.</li>
165
- <li>Enter your payment details and confirm your purchase.</li>
166
- <li>Go to the <a href="">Microsoft Store portal</a> and sign in with your account.</li>
167
- <li>Select Install Office and follow the instructions to download and install Word 2017 on your Windows 7 computer.</li>
168
- </ol>
169
- <h3>Word Online</h3>
170
- <p>If you don't want to pay or install anything, you can also use Word for free in your web browser. This is called Word Online, and it allows you to create, edit, and share documents online with limited features and functionality.</p>
171
- <p>Some of the benefits of using Word Online include:</p>
172
- <ul>
173
- <li>No installation or payment required</li>
174
- <li>Access Word from any device or platform</li>
175
- <li>Save and open documents from OneDrive or other cloud services</li>
176
- <li>Co-author documents with others online</li>
177
- <li>Export documents to PDF or other formats</li>
178
- </ul>
179
- <p>To use Word Online, you need to:</p>
180
- <ol>
181
- <li>Go to the <a href="">Word Online website</a>.</li <li>Sign in with your Microsoft account or create one if you don't have one.</li>
182
- <li>Create a new document or open an existing one from OneDrive or other cloud services.</li>
183
- <li>Edit and format your document as you wish, using the available features and tools.</li>
184
- <li>Save and share your document as you wish, using the available options and commands.</li>
185
- </ol>
186
- <h2>How to Install Word 2017 on Windows 7</h2>
187
- <p>Once you have downloaded Word 2017 for Windows 7, you need to install it on your computer. The installation process may vary depending on the download option you chose, but generally, it involves the following steps:</p>
188
- <ol>
189
- <li>Run the setup file that you downloaded or received from Microsoft.</li>
190
- <li>Follow the instructions on the screen to complete the installation.</li>
191
- <li>Activate Word 2017 with your Microsoft account or product key, if required.</li>
192
- <li>Launch Word 2017 from the Start menu or the desktop shortcut.</li>
193
- </ol>
194
- <h2>How to Use Word 2017 on Windows 7</h2>
195
- <p>After you have installed Word 2017 on Windows 7, you can start using it to create, edit, and share documents. Here are some basic tips and tricks to help you get started:</p>
196
- <ul>
197
- <li>To create a new document, click File > New and choose a blank document or a template.</li>
198
- <li>To open an existing document, click File > Open and browse to the location of your document, or use the Recent list to access your recent documents.</li>
199
- <li>To edit your document, use the ribbon menu or the context menu to access various commands and options, or use keyboard shortcuts or voice commands to perform actions faster.</li>
200
- <li>To format your document, use the ribbon menu or the context menu to access various commands and options, or use keyboard shortcuts or voice commands to perform actions faster.</li>
201
- <li>To save your document, click File > Save or Save As and choose a location and a format for your document, or use keyboard shortcuts or voice commands to perform actions faster.</li <li>To share your document, click File > Share and choose an option to send your document as an email attachment or a link, or to co-author your document with others online.</li>
202
- </ul>
203
- <h2>Conclusion</h2>
204
- <p>Word 2017 is a powerful and versatile word processor that can help you create, edit, and share professional-looking documents. You can download Word 2017 for Windows 7 in three ways: through a Microsoft 365 subscription, through a standalone Office Home & Business or Home & Student edition, or through Word Online. You can also install and use Word 2017 on Windows 7 easily and conveniently, with the help of the features and tools that Word 2017 offers.</p>
205
- <p>We hope that this article has helped you learn how to download Word 2017 free for Windows 7, and that you enjoy using Word 2017 for your word processing needs. If you have any questions or feedback, please feel free to contact us or leave a comment below.</p>
206
- <h2>FAQs</h2>
207
- <p>Here are some common questions and answers about downloading and using Word 2017 on Windows 7.</p>
208
- <h3>Q: Is Word 2017 compatible with Windows 7?</h3>
209
- <p>A: Yes, Word 2017 is compatible with Windows 7 Service Pack 1 or later. However, you may need to install some updates or patches to ensure the best performance and security of Word 2017 on Windows 7.</p>
210
- <h3>Q: How can I update Word 2017 on Windows 7?</h3>
211
- <p>A: If you have a Microsoft 365 subscription, you can update Word 2017 automatically or manually through the Microsoft 365 portal. If you have a standalone version of Word 2017, you can update Word 2017 manually through the Microsoft Update website or the Windows Update service.</p>
212
- <h3>Q: How can I uninstall Word 2017 from Windows 7?</h3>
213
- <p>A: If you want to uninstall Word 2017 from Windows 7, you can do so through the Control Panel or the Settings app. You can choose to uninstall only Word 2017 or the entire Office suite that includes Word 2017.</p>
214
- <h3>Q: How can I get help with Word 2017 on Windows 7?</h3>
215
- <p>A: If you need help with Word 2017 on Windows 7, you can use the following resources:</p>
216
- <ul>
217
- <li>The built-in Help feature in Word 2017</li>
218
- <li>The online support website for Word</li>
219
- <li>The online community forum for Word</li>
220
- <li>The online learning center for Word</li>
221
- <li>The premium support service from Microsoft (for Microsoft 365 subscribers)</li>
222
- </ul>
223
- <h3>Q: How can I get more features and functionality with Word 2017 on Windows 7?</h3>
224
- <p>A: If you want to get more features and functionality with Word 2017 on Windows 7, you can do the following:</p>
225
- <ul>
226
- <li>Upgrade to a Microsoft 365 subscription that includes Word 2017 and other Office apps and services</li>
227
- <li>Install add-ins or extensions for Word 2017 that enhance its capabilities and integration with other apps and services</li>
228
- <li>Use macros or VBA scripts to automate tasks and customize Word 2017</li>
229
- <li>Use third-party tools or software that work with Word 2017</li>
230
- </ul></p> 401be4b1e0<br />
231
- <br />
232
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1vash/demo-flask-docker-template/api_server.py DELETED
@@ -1,164 +0,0 @@
1
- # official fastapi HF example https://huggingface.co/docs/hub/spaces-sdks-docker-examples#docker-spaces-examples
2
-
3
- ##################
4
- # Flask API usages:
5
- # 1. Just a wrapper over OpenAI API
6
- # 2. You can use Chain calls of OpenAI API
7
- # 3. Using your own ML model in combination with openAPI functionality
8
- # 4. ...
9
- ##################
10
-
11
- import os
12
- import time
13
- import numpy as np
14
- from PIL import Image
15
-
16
- from pathlib import Path
17
-
18
- # Disable tensorflow warnings
19
- os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
20
-
21
- from tensorflow import keras
22
- from flask import Flask, jsonify, request, render_template
23
-
24
- load_type = 'remote_hub_from_pretrained'
25
- """
26
- local;
27
- remote_hub_download;
28
- remote_hub_from_pretrained;
29
- remote_hub_pipeline; - needs config.json and this is not easy to grasp how to do it with custom models
30
- https://discuss.huggingface.co/t/how-to-create-a-config-json-after-saving-a-model/10459/4
31
- """
32
-
33
- REPO_ID = "1vash/mnist_demo_model"
34
- MODEL_DIR = "./artifacts/models"
35
-
36
- # Load the saved model into memory
37
- if load_type == 'local':
38
- model = keras.models.load_model(f'{MODEL_DIR}/mnist_model.h5')
39
- elif load_type == 'remote_hub_download':
40
- from huggingface_hub import hf_hub_download
41
-
42
- model = keras.models.load_model(hf_hub_download(repo_id=REPO_ID, filename="saved_model.pb"))
43
- elif load_type == 'remote_hub_from_pretrained':
44
- # https://huggingface.co/docs/hub/keras
45
- os.environ['TRANSFORMERS_CACHE'] = str(Path(MODEL_DIR).absolute())
46
- from huggingface_hub import from_pretrained_keras
47
- model = from_pretrained_keras(REPO_ID, cache_dir=MODEL_DIR)
48
- elif load_type == 'remote_hub_pipeline':
49
- from transformers import pipeline
50
-
51
- model = pipeline("image-classification", model=REPO_ID)
52
- else:
53
- raise AssertionError('No load type is specified!')
54
-
55
- # Initialize the Flask application
56
- app = Flask(__name__)
57
-
58
-
59
- # API route for prediction
60
- @app.route('/predict', methods=['POST'])
61
- def predict():
62
- """
63
- Predicts the class label of an input image.
64
-
65
- Request format:
66
- {
67
- "image": [[pixel_values_gray]]
68
- }
69
-
70
- Response format:
71
- {
72
- "label": predicted_label,
73
- "pred_proba" prediction class probability
74
- "ml-latency-ms": latency_in_milliseconds
75
- (Measures time only for ML operations preprocessing with predict)
76
- }
77
- """
78
- if 'image' not in request.files:
79
- # Handle if no file is selected
80
- return 'No file selected'
81
-
82
- start_time = time.time()
83
-
84
- file = request.files['image']
85
-
86
- # Get pixels out of file
87
- image_data = Image.open(file)
88
-
89
- # Check image shape
90
- if image_data.size != (28, 28):
91
- return "Invalid image shape. Expected (28, 28), take from 'demo images' folder."
92
-
93
- # Preprocess the image
94
- processed_image = preprocess_image(image_data)
95
-
96
- # Make a prediction, verbose=0 to disable progress bar in logs
97
- prediction = model.predict(processed_image, verbose=0)
98
-
99
- # Get the predicted class label
100
- predicted_label = np.argmax(prediction)
101
- proba = prediction[0][predicted_label]
102
-
103
- # Calculate latency in milliseconds
104
- latency_ms = (time.time() - start_time) * 1000
105
-
106
- # Return the prediction result and latency as dictionary response
107
- response = {
108
- 'label': int(predicted_label),
109
- 'pred_proba': float(proba),
110
- 'ml-latency-ms': round(latency_ms, 4)
111
- }
112
-
113
- # dictionary is not a JSON: https://www.quora.com/What-is-the-difference-between-JSON-and-a-dictionary
114
- # flask.jsonify vs json.dumps https://sentry.io/answers/difference-between-json-dumps-and-flask-jsonify/
115
- # The flask.jsonify() function returns a Response object with Serializable JSON and content_type=application/json.
116
- return jsonify(response)
117
-
118
-
119
- # Helper function to preprocess the image
120
- def preprocess_image(image_data):
121
- """Preprocess image for Model Inference
122
-
123
- :param image_data: Raw image
124
- :return: image: Preprocessed Image
125
- """
126
- # Resize the image to match the input shape of the model
127
- image = np.array(image_data).reshape(1, 28, 28)
128
-
129
- # Normalize the pixel values
130
- image = image.astype('float32') / 255.0
131
-
132
- return image
133
-
134
-
135
- # API route for health check
136
- @app.route('/health', methods=['GET'])
137
- def health():
138
- """
139
- Health check API to ensure the application is running.
140
- Returns "OK" if the application is healthy.
141
- Demo Usage: "curl http://localhost:5000/health" or using alias "curl http://127.0.0.1:5000/health"
142
- """
143
- return 'OK'
144
-
145
-
146
- # API route for version
147
- @app.route('/version', methods=['GET'])
148
- def version():
149
- """
150
- Returns the version of the application.
151
- Demo Usage: "curl http://127.0.0.1:5000/version" or using alias "curl http://127.0.0.1:5000/version"
152
- """
153
- return '1.0'
154
-
155
-
156
- @app.route("/")
157
- def hello_world():
158
- return render_template("index.html")
159
- # return "<p>Hello, Team!</p>"
160
-
161
-
162
- # Start the Flask application
163
- if __name__ == '__main__':
164
- app.run(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2ndelement/voicevox/voicevox_engine/synthesis_engine/make_synthesis_engines.py DELETED
@@ -1,122 +0,0 @@
1
- import json
2
- import sys
3
- from pathlib import Path
4
- from typing import Dict, List, Optional
5
-
6
- from ..utility import engine_root, get_save_dir
7
- from .core_wrapper import CoreWrapper, load_runtime_lib
8
- from .synthesis_engine import SynthesisEngine, SynthesisEngineBase
9
-
10
-
11
- def make_synthesis_engines(
12
- use_gpu: bool,
13
- voicelib_dirs: Optional[List[Path]] = None,
14
- voicevox_dir: Optional[Path] = None,
15
- runtime_dirs: Optional[List[Path]] = None,
16
- cpu_num_threads: Optional[int] = None,
17
- enable_mock: bool = True,
18
- load_all_models: bool = False,
19
- ) -> Dict[str, SynthesisEngineBase]:
20
- """
21
- 音声ライブラリをロードして、音声合成エンジンを生成
22
-
23
- Parameters
24
- ----------
25
- use_gpu: bool
26
- 音声ライブラリに GPU を使わせるか否か
27
- voicelib_dirs: List[Path], optional, default=None
28
- 音声ライブラリ自体があるディレクトリのリスト
29
- voicevox_dir: Path, optional, default=None
30
- コンパイル済みのvoicevox、またはvoicevox_engineがあるディレクトリ
31
- runtime_dirs: List[Path], optional, default=None
32
- コアで使用するライブラリのあるディレクトリのリスト
33
- None のとき、voicevox_dir、カレントディレクトリになる
34
- cpu_num_threads: int, optional, default=None
35
- 音声ライブラリが、推論に用いるCPUスレッド数を設定する
36
- Noneのとき、ライブラリ側の挙動により論理コア数の半分か、物理コア数が指定される
37
- enable_mock: bool, optional, default=True
38
- コア読み込みに失敗したとき、代わりにmockを使用するかどうか
39
- load_all_models: bool, optional, default=False
40
- 起動時に全てのモデルを読み込むかどうか
41
- """
42
- if cpu_num_threads == 0 or cpu_num_threads is None:
43
- print(
44
- "Warning: cpu_num_threads is set to 0. "
45
- + "( The library leaves the decision to the synthesis runtime )",
46
- file=sys.stderr,
47
- )
48
- cpu_num_threads = 0
49
-
50
- if voicevox_dir is not None:
51
- if voicelib_dirs is not None:
52
- voicelib_dirs.append(voicevox_dir)
53
- else:
54
- voicelib_dirs = [voicevox_dir]
55
- if runtime_dirs is not None:
56
- runtime_dirs.append(voicevox_dir)
57
- else:
58
- runtime_dirs = [voicevox_dir]
59
- else:
60
- root_dir = engine_root()
61
- if voicelib_dirs is None:
62
- voicelib_dirs = [root_dir]
63
- if runtime_dirs is None:
64
- runtime_dirs = [root_dir]
65
-
66
- voicelib_dirs = [p.expanduser() for p in voicelib_dirs]
67
- runtime_dirs = [p.expanduser() for p in runtime_dirs]
68
-
69
- load_runtime_lib(runtime_dirs)
70
-
71
- synthesis_engines = {}
72
-
73
- if not enable_mock:
74
-
75
- def load_core_library(core_dir: Path, suppress_error: bool = False):
76
- """
77
- 指定されたディレクトリにあるコアを読み込む。
78
- ユーザーディレクトリの場合は存在しないこともあるので、エラーを抑制すると良い。
79
- """
80
- try:
81
- core = CoreWrapper(use_gpu, core_dir, cpu_num_threads, load_all_models)
82
- metas = json.loads(core.metas())
83
- core_version = metas[0]["version"]
84
- if core_version in synthesis_engines:
85
- print(
86
- "Warning: Core loading is skipped because of version duplication.",
87
- file=sys.stderr,
88
- )
89
- else:
90
- synthesis_engines[core_version] = SynthesisEngine(core=core)
91
- except Exception:
92
- if not suppress_error:
93
- raise
94
-
95
- for core_dir in voicelib_dirs:
96
- load_core_library(core_dir)
97
-
98
- # ユーザーディレクトリにあるコアを読み込む
99
- user_voicelib_dirs = []
100
- core_libraries_dir = get_save_dir() / "core_libraries"
101
- core_libraries_dir.mkdir(exist_ok=True)
102
- user_voicelib_dirs.append(core_libraries_dir)
103
- for path in core_libraries_dir.glob("*"):
104
- if not path.is_dir():
105
- continue
106
- user_voicelib_dirs.append(path)
107
-
108
- for core_dir in user_voicelib_dirs:
109
- load_core_library(core_dir, suppress_error=True)
110
-
111
- else:
112
- # モック追加
113
- from ..dev.core import metas as mock_metas
114
- from ..dev.core import supported_devices as mock_supported_devices
115
- from ..dev.synthesis_engine import MockSynthesisEngine
116
-
117
- if "0.0.0" not in synthesis_engines:
118
- synthesis_engines["0.0.0"] = MockSynthesisEngine(
119
- speakers=mock_metas(), supported_devices=mock_supported_devices()
120
- )
121
-
122
- return synthesis_engines
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/7hao/bingo/src/components/user-menu.tsx DELETED
@@ -1,113 +0,0 @@
1
- 'use client'
2
-
3
- import { useEffect, useState } from 'react'
4
- import Image from 'next/image'
5
- import { toast } from 'react-hot-toast'
6
- import { Button } from '@/components/ui/button'
7
- import pkg from '../../package.json'
8
- import {
9
- DropdownMenu,
10
- DropdownMenuContent,
11
- DropdownMenuItem,
12
- DropdownMenuSeparator,
13
- DropdownMenuTrigger
14
- } from '@/components/ui/dropdown-menu'
15
- import { IconCopy, IconExternalLink, IconGitHub } from '@/components/ui/icons'
16
- import SettingIcon from '@/assets/images/settings.svg'
17
- import { useCopyToClipboard } from '@/lib/hooks/use-copy-to-clipboard'
18
-
19
- export function UserMenu() {
20
- const [host, setHost] = useState('')
21
- const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 })
22
- useEffect(() => {
23
- setHost(location.host)
24
- }, [])
25
-
26
- useEffect(() => {
27
- if (isCopied) {
28
- toast.success('复制成功')
29
- }
30
- }, [isCopied])
31
- return (
32
- <div className="flex items-center justify-between">
33
- <DropdownMenu>
34
- <DropdownMenuTrigger asChild>
35
- <Button className="pl-0">
36
- <div className="flex items-center justify-center text-xs font-medium uppercase rounded-full select-none h-7 w-7 shrink-0 bg-muted/50 text-muted-foreground">
37
- <Image alt="settings" src={SettingIcon} width={20} />
38
- </div>
39
- <span className="ml-2">设置</span>
40
- </Button>
41
- </DropdownMenuTrigger>
42
- <DropdownMenuContent sideOffset={8} align="start" className="w-[180px] bg-background">
43
- <DropdownMenuItem
44
- onClick={() =>
45
- location.href='#dialog="settings"'
46
- }
47
- className="cursor-pointer"
48
- >
49
- 设置用户
50
- </DropdownMenuItem>
51
- <DropdownMenuSeparator />
52
- <DropdownMenuItem
53
- onClick={() =>
54
- location.href='#dialog="voice"'
55
- }
56
- className="cursor-pointer"
57
- >
58
- 语音设置
59
- </DropdownMenuItem>
60
- <DropdownMenuSeparator />
61
- <DropdownMenuItem asChild>
62
- <a
63
- href="https://github.com/weaigc/bingo/"
64
- target="_blank"
65
- rel="noopener noreferrer"
66
- className="inline-flex items-center justify-between w-full gap-2 cursor-pointer"
67
- >
68
- 开源地址
69
- <IconGitHub />
70
- <IconExternalLink className="w-3 h-3 ml-auto" />
71
- </a>
72
- </DropdownMenuItem>
73
- <DropdownMenuSeparator />
74
- <DropdownMenuItem asChild>
75
- <a
76
- href="https://huggingface.co/spaces/hf4all/bingo"
77
- target="_blank"
78
- rel="noopener noreferrer"
79
- className="inline-flex items-center justify-between w-full gap-2 cursor-pointer"
80
- >
81
- 托管地址
82
- 🤗
83
- <IconExternalLink className="w-3 h-3 ml-auto" />
84
- </a>
85
- </DropdownMenuItem>
86
- <DropdownMenuSeparator />
87
- <DropdownMenuItem asChild>
88
- <a
89
- href="https://huggingface.co/login?next=%2Fspaces%2Fhf4all%2Fbingo%3Fduplicate%3Dtrue%26visibility%3Dpublic"
90
- target="_blank"
91
- rel="noopener noreferrer"
92
- className="inline-flex items-center justify-between w-full gap-2 cursor-pointer"
93
- >
94
- 复制站点
95
- <IconExternalLink className="w-3 h-3 ml-auto" />
96
- </a>
97
- </DropdownMenuItem>
98
- <DropdownMenuSeparator />
99
- <DropdownMenuItem className="flex-col items-start">
100
- <div className="font-medium">版本信息 {pkg.version}</div>
101
- </DropdownMenuItem>
102
- <DropdownMenuSeparator />
103
- <DropdownMenuItem className="flex-col items-start">
104
- <div className="font-medium">站点域名</div>
105
- <div onClick={() => copyToClipboard(host)} className="flex gap-1 text-xs text-zinc-500 cursor-pointer">
106
- {host} <IconCopy />
107
- </div>
108
- </DropdownMenuItem>
109
- </DropdownMenuContent>
110
- </DropdownMenu>
111
- </div>
112
- )
113
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/utils/clonerepo_experimental.py DELETED
@@ -1,253 +0,0 @@
1
- import os
2
- import subprocess
3
- import shutil
4
- from concurrent.futures import ThreadPoolExecutor, as_completed
5
- from tqdm.notebook import tqdm
6
- from pathlib import Path
7
- import requests
8
-
9
- def run_script():
10
- def run_cmd(cmd):
11
- process = subprocess.run(cmd, shell=True, check=True, text=True)
12
- return process.stdout
13
-
14
- # Change the current directory to /content/
15
- os.chdir('/content/')
16
- print("Changing dir to /content/")
17
-
18
- # Your function to edit the file
19
- def edit_file(file_path):
20
- temp_file_path = "/tmp/temp_file.py"
21
- changes_made = False
22
- with open(file_path, "r") as file, open(temp_file_path, "w") as temp_file:
23
- previous_line = ""
24
- second_previous_line = ""
25
- for line in file:
26
- new_line = line.replace("value=160", "value=128")
27
- if new_line != line:
28
- print("Replaced 'value=160' with 'value=128'")
29
- changes_made = True
30
- line = new_line
31
-
32
- new_line = line.replace("crepe hop length: 160", "crepe hop length: 128")
33
- if new_line != line:
34
- print("Replaced 'crepe hop length: 160' with 'crepe hop length: 128'")
35
- changes_made = True
36
- line = new_line
37
-
38
- new_line = line.replace("value=0.88", "value=0.75")
39
- if new_line != line:
40
- print("Replaced 'value=0.88' with 'value=0.75'")
41
- changes_made = True
42
- line = new_line
43
-
44
- if "label=i18n(\"输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络\")" in previous_line and "value=1," in line:
45
- new_line = line.replace("value=1,", "value=0.25,")
46
- if new_line != line:
47
- print("Replaced 'value=1,' with 'value=0.25,' based on the condition")
48
- changes_made = True
49
- line = new_line
50
-
51
- if "label=i18n(\"总训练轮数total_epoch\")" in previous_line and "value=20," in line:
52
- new_line = line.replace("value=20,", "value=500,")
53
- if new_line != line:
54
- print("Replaced 'value=20,' with 'value=500,' based on the condition for DEFAULT EPOCH")
55
- changes_made = True
56
- line = new_line
57
-
58
- if 'choices=["pm", "harvest", "dio", "crepe", "crepe-tiny", "mangio-crepe", "mangio-crepe-tiny"], # Fork Feature. Add Crepe-Tiny' in previous_line:
59
- if 'value="pm",' in line:
60
- new_line = line.replace('value="pm",', 'value="mangio-crepe",')
61
- if new_line != line:
62
- print("Replaced 'value=\"pm\",' with 'value=\"mangio-crepe\",' based on the condition")
63
- changes_made = True
64
- line = new_line
65
-
66
- new_line = line.replace('label=i18n("输入训练文件夹路径"), value="E:\\\\语音音频+标注\\\\米津玄师\\\\src"', 'label=i18n("输入训练文件夹路径"), value="/content/dataset/"')
67
- if new_line != line:
68
- print("Replaced 'label=i18n(\"输入训练文件夹路径\"), value=\"E:\\\\语音音频+标注\\\\米津玄师\\\\src\"' with 'label=i18n(\"输入训练文件夹路径\"), value=\"/content/dataset/\"'")
69
- changes_made = True
70
- line = new_line
71
-
72
- if 'label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"),' in second_previous_line:
73
- if 'value=i18n("否"),' in line:
74
- new_line = line.replace('value=i18n("否"),', 'value=i18n("是"),')
75
- if new_line != line:
76
- print("Replaced 'value=i18n(\"否\"),' with 'value=i18n(\"是\"),' based on the condition for SAVE ONLY LATEST")
77
- changes_made = True
78
- line = new_line
79
-
80
- if 'label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"),' in second_previous_line:
81
- if 'value=i18n("否"),' in line:
82
- new_line = line.replace('value=i18n("否"),', 'value=i18n("是"),')
83
- if new_line != line:
84
- print("Replaced 'value=i18n(\"否\"),' with 'value=i18n(\"是\"),' based on the condition for SAVE SMALL WEIGHTS")
85
- changes_made = True
86
- line = new_line
87
-
88
- temp_file.write(line)
89
- second_previous_line = previous_line
90
- previous_line = line
91
-
92
- # After finished, we replace the original file with the temp one
93
- import shutil
94
- shutil.move(temp_file_path, file_path)
95
-
96
- if changes_made:
97
- print("Changes made and file saved successfully.")
98
- else:
99
- print("No changes were needed.")
100
-
101
- # Define the repo path
102
- repo_path = '/content/Applio-RVC-Fork'
103
-
104
- def copy_all_files_in_directory(src_dir, dest_dir):
105
- # Iterate over all files in source directory
106
- for item in Path(src_dir).glob('*'):
107
- if item.is_file():
108
- # Copy each file to destination directory
109
- shutil.copy(item, dest_dir)
110
- else:
111
- # If it's a directory, make a new directory in the destination and copy the files recursively
112
- new_dest = Path(dest_dir) / item.name
113
- new_dest.mkdir(exist_ok=True)
114
- copy_all_files_in_directory(str(item), str(new_dest))
115
-
116
- def clone_and_copy_repo(repo_path):
117
- # New repository link
118
- new_repo_link = "https://github.com/IAHispano/Applio-RVC-Fork/"
119
- # Temporary path to clone the repository
120
- temp_repo_path = "/content/temp_Applio-RVC-Fork"
121
- # New folder name
122
- new_folder_name = "Applio-RVC-Fork"
123
-
124
- # Clone the latest code from the new repository to a temporary location
125
- run_cmd(f"git clone {new_repo_link} {temp_repo_path}")
126
- os.chdir(temp_repo_path)
127
-
128
- run_cmd(f"git checkout 3fa4dad3d8961e5ca2522e9e12c0b4ddb71ad402")
129
- run_cmd(f"git checkout f9e606c279cb49420597519b0a83b92be81e42e4")
130
- run_cmd(f"git checkout 9e305588844c5442d58add1061b29beeca89d679")
131
- run_cmd(f"git checkout bf92dc1eb54b4f28d6396a4d1820a25896cc9af8")
132
- run_cmd(f"git checkout c3810e197d3cb98039973b2f723edf967ecd9e61")
133
- run_cmd(f"git checkout a33159efd134c2413b0afe26a76b7dc87926d2de")
134
- run_cmd(f"git checkout 24e251fb62c662e39ac5cf9253cc65deb9be94ec")
135
- run_cmd(f"git checkout ad5667d3017e93232dba85969cddac1322ba2902")
136
- run_cmd(f"git checkout ce9715392cf52dd5a0e18e00d1b5e408f08dbf27")
137
- run_cmd(f"git checkout 7c7da3f2ac68f3bd8f3ad5ca5c700f18ab9f90eb")
138
- run_cmd(f"git checkout 4ac395eab101955e8960b50d772c26f592161764")
139
- run_cmd(f"git checkout b15b358702294c7375761584e5276c811ffab5e8")
140
- run_cmd(f"git checkout 1501793dc490982db9aca84a50647764caa66e51")
141
- run_cmd(f"git checkout 21f7faf57219c75e6ba837062350391a803e9ae2")
142
- run_cmd(f"git checkout b5eb689fbc409b49f065a431817f822f554cebe7")
143
- run_cmd(f"git checkout 7e02fae1ebf24cb151bf6cbe787d06734aa65862")
144
- run_cmd(f"git checkout 6aea5ea18ed0b9a1e03fa5d268d6bc3c616672a9")
145
- run_cmd(f"git checkout f0f9b25717e59116473fb42bd7f9252cfc32b398")
146
- run_cmd(f"git checkout b394de424088a81fc081224bc27338a8651ad3b2")
147
- run_cmd(f"git checkout f1999406a88b80c965d2082340f5ea2bfa9ab67a")
148
- run_cmd(f"git checkout d98a0fa8dc715308dfc73eac5c553b69c6ee072b")
149
- run_cmd(f"git checkout d73267a415fb0eba98477afa43ef71ffd82a7157")
150
- run_cmd(f"git checkout 1a03d01356ae79179e1fb8d8915dc9cc79925742")
151
- run_cmd(f"git checkout 81497bb3115e92c754300c9b3992df428886a3e9")
152
- run_cmd(f"git checkout c5af1f8edcf79cb70f065c0110e279e78e48caf9")
153
- run_cmd(f"git checkout cdb3c90109387fa4dfa92f53c3864c71170ffc77")
154
-
155
- # Edit the file here, before copying
156
- #edit_file(f"{temp_repo_path}/infer-web.py")
157
-
158
- # Copy all files from the cloned repository to the existing path
159
- copy_all_files_in_directory(temp_repo_path, repo_path)
160
- print(f"Copying all {new_folder_name} files from GitHub.")
161
-
162
- # Change working directory back to /content/
163
- os.chdir('/content/')
164
- print("Changed path back to /content/")
165
-
166
- # Remove the temporary cloned repository
167
- shutil.rmtree(temp_repo_path)
168
-
169
- # Call the function
170
- clone_and_copy_repo(repo_path)
171
-
172
- # Download the credentials file for RVC archive sheet
173
- os.makedirs('/content/Applio-RVC-Fork/stats/', exist_ok=True)
174
- run_cmd("wget -q https://cdn.discordapp.com/attachments/945486970883285045/1114717554481569802/peppy-generator-388800-07722f17a188.json -O /content/Applio-RVC-Fork/stats/peppy-generator-388800-07722f17a188.json")
175
-
176
- # Forcefully delete any existing torchcrepe dependencies downloaded from an earlier run just in case
177
- shutil.rmtree('/content/Applio-RVC-Fork/torchcrepe', ignore_errors=True)
178
- shutil.rmtree('/content/torchcrepe', ignore_errors=True)
179
-
180
- # Download the torchcrepe folder from the maxrmorrison/torchcrepe repository
181
- run_cmd("git clone https://github.com/maxrmorrison/torchcrepe.git")
182
- shutil.move('/content/torchcrepe/torchcrepe', '/content/Applio-RVC-Fork/')
183
- shutil.rmtree('/content/torchcrepe', ignore_errors=True) # Delete the torchcrepe repository folder
184
-
185
- # Change the current directory to /content/Applio-RVC-Fork
186
- os.chdir('/content/Applio-RVC-Fork')
187
- os.makedirs('pretrained', exist_ok=True)
188
- os.makedirs('uvr5_weights', exist_ok=True)
189
-
190
- def download_file(url, filepath):
191
- response = requests.get(url, stream=True)
192
- response.raise_for_status()
193
-
194
- with open(filepath, "wb") as file:
195
- for chunk in response.iter_content(chunk_size=8192):
196
- if chunk:
197
- file.write(chunk)
198
-
199
- def download_pretrained_models():
200
- pretrained_models = {
201
- "pretrained": [
202
- "D40k.pth",
203
- "G40k.pth",
204
- "f0D40k.pth",
205
- "f0G40k.pth"
206
- ],
207
- "pretrained_v2": [
208
- "D40k.pth",
209
- "G40k.pth",
210
- "f0D40k.pth",
211
- "f0G40k.pth",
212
- "f0G48k.pth",
213
- "f0D48k.pth"
214
- ],
215
- "uvr5_weights": [
216
- "HP2-人声vocals+非人声instrumentals.pth",
217
- "HP5-主旋律人声vocals+其他instrumentals.pth",
218
- "VR-DeEchoNormal.pth",
219
- "VR-DeEchoDeReverb.pth",
220
- "VR-DeEchoAggressive.pth",
221
- "HP5_only_main_vocal.pth",
222
- "HP3_all_vocals.pth",
223
- "HP2_all_vocals.pth"
224
- ]
225
- }
226
- part2 = "I"
227
- base_url = "https://huggingface.co/lj1995/VoiceConversionWebU" + part2 + "/resolve/main/"
228
- base_path = "/content/Applio-RVC-Fork/"
229
- base_pathm = base_path
230
-
231
- # Calculate total number of files to download
232
- total_files = sum(len(files) for files in pretrained_models.values()) + 1 # +1 for hubert_base.pt
233
-
234
- with tqdm(total=total_files, desc="Downloading files") as pbar:
235
- for folder, models in pretrained_models.items():
236
- folder_path = os.path.join(base_path, folder)
237
- os.makedirs(folder_path, exist_ok=True)
238
- for model in models:
239
- url = base_url + folder + "/" + model
240
- filepath = os.path.join(folder_path, model)
241
- download_file(url, filepath)
242
- pbar.update()
243
-
244
- # Download hubert_base.pt to the base path
245
- hubert_url = base_url + "hubert_base.pt"
246
- hubert_filepath = os.path.join(base_pathm, "hubert_base.pt")
247
- download_file(hubert_url, hubert_filepath)
248
- pbar.update()
249
- def clone_repository(run_download):
250
- with ThreadPoolExecutor(max_workers=2) as executor:
251
- executor.submit(run_script)
252
- if run_download:
253
- executor.submit(download_pretrained_models)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI-Dashboards/Memory-Chat-Story-Generator-ChatGPT/app.py DELETED
@@ -1,217 +0,0 @@
1
- import gradio as gr
2
- import os
3
- import json
4
- import requests
5
- import pandas as pd
6
-
7
- #Streaming endpoint
8
- API_URL = "https://api.openai.com/v1/chat/completions" #os.getenv("API_URL") + "/generate_stream"
9
- OPENAI_API_KEY= os.environ["HF_TOKEN"] # Add a token to this space . Then copy it to the repository secret in this spaces settings panel. os.environ reads from there.
10
- # Keys for Open AI ChatGPT API usage are created from here: https://platform.openai.com/account/api-keys
11
-
12
- def predict(inputs, top_p, temperature, chat_counter, chatbot=[], history=[]): #repetition_penalty, top_k
13
-
14
- # 1. Set up a payload
15
- payload = {
16
- "model": "gpt-3.5-turbo",
17
- "messages": [{"role": "user", "content": f"{inputs}"}],
18
- "temperature" : 1.0,
19
- "top_p":1.0,
20
- "n" : 1,
21
- "stream": True,
22
- "presence_penalty":0,
23
- "frequency_penalty":0,
24
- }
25
-
26
- # 2. Define your headers and add a key from https://platform.openai.com/account/api-keys
27
- headers = {
28
- "Content-Type": "application/json",
29
- "Authorization": f"Bearer {OPENAI_API_KEY}"
30
- }
31
-
32
- # 3. Create a chat counter loop that feeds [Predict next best anything based on last input and attention with memory defined by introspective attention over time]
33
- print(f"chat_counter - {chat_counter}")
34
- if chat_counter != 0 :
35
- messages=[]
36
- for data in chatbot:
37
- temp1 = {}
38
- temp1["role"] = "user"
39
- temp1["content"] = data[0]
40
- temp2 = {}
41
- temp2["role"] = "assistant"
42
- temp2["content"] = data[1]
43
- messages.append(temp1)
44
- messages.append(temp2)
45
- temp3 = {}
46
- temp3["role"] = "user"
47
- temp3["content"] = inputs
48
- messages.append(temp3)
49
- payload = {
50
- "model": "gpt-3.5-turbo",
51
- "messages": messages, #[{"role": "user", "content": f"{inputs}"}],
52
- "temperature" : temperature, #1.0,
53
- "top_p": top_p, #1.0,
54
- "n" : 1,
55
- "stream": True,
56
- "presence_penalty":0,
57
- "frequency_penalty":0,
58
- }
59
- chat_counter+=1
60
-
61
- # 4. POST it to OPENAI API
62
- history.append(inputs)
63
- print(f"payload is - {payload}")
64
- response = requests.post(API_URL, headers=headers, json=payload, stream=True)
65
- token_counter = 0
66
- partial_words = ""
67
-
68
- # 5. Iterate through response lines and structure readable response
69
- counter=0
70
- for chunk in response.iter_lines():
71
- if counter == 0:
72
- counter+=1
73
- continue
74
- if chunk.decode() :
75
- chunk = chunk.decode()
76
- if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']:
77
- partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"]
78
- if token_counter == 0:
79
- history.append(" " + partial_words)
80
- else:
81
- history[-1] = partial_words
82
- chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list
83
- token_counter+=1
84
- yield chat, history, chat_counter
85
-
86
-
87
- def reset_textbox():
88
- return gr.update(value='')
89
-
90
-
91
-
92
-
93
- # Episodic and Semantic IO
94
- def list_files(file_path):
95
- import os
96
- icon_csv = "📄 "
97
- icon_txt = "📑 "
98
- current_directory = os.getcwd()
99
- file_list = []
100
- for filename in os.listdir(current_directory):
101
- if filename.endswith(".csv"):
102
- file_list.append(icon_csv + filename)
103
- elif filename.endswith(".txt"):
104
- file_list.append(icon_txt + filename)
105
- if file_list:
106
- return "\n".join(file_list)
107
- else:
108
- return "No .csv or .txt files found in the current directory."
109
-
110
- # Function to read a file
111
- def read_file(file_path):
112
- try:
113
- with open(file_path, "r") as file:
114
- contents = file.read()
115
- return f"{contents}"
116
- #return f"Contents of {file_path}:\n{contents}"
117
- except FileNotFoundError:
118
- return "File not found."
119
-
120
- # Function to delete a file
121
- def delete_file(file_path):
122
- try:
123
- import os
124
- os.remove(file_path)
125
- return f"{file_path} has been deleted."
126
- except FileNotFoundError:
127
- return "File not found."
128
-
129
- # Function to write to a file
130
- def write_file(file_path, content):
131
- try:
132
- with open(file_path, "w") as file:
133
- file.write(content)
134
- return f"Successfully written to {file_path}."
135
- except:
136
- return "Error occurred while writing to file."
137
-
138
- # Function to append to a file
139
- def append_file(file_path, content):
140
- try:
141
- with open(file_path, "a") as file:
142
- file.write(content)
143
- return f"Successfully appended to {file_path}."
144
- except:
145
- return "Error occurred while appending to file."
146
-
147
- def download_csv(file_path):
148
- df = pd.read_csv(file_path)
149
- csv = df.to_csv(index=False)
150
- b64 = base64.b64encode(csv.encode()).decode()
151
- href = f'<a href="data:application/octet-stream;base64,{b64}" download="{file_path}">Download</a>'
152
- return href
153
-
154
- title = """<h1 align="center">Memory Chat Story Generator ChatGPT</h1>"""
155
- description = """
156
- ## ChatGPT Datasets 📚
157
- - WebText
158
- - Common Crawl
159
- - BooksCorpus
160
- - English Wikipedia
161
- - Toronto Books Corpus
162
- - OpenWebText
163
- ## ChatGPT Datasets - Details 📚
164
- - **WebText:** A dataset of web pages crawled from domains on the Alexa top 5,000 list. This dataset was used to pretrain GPT-2.
165
- - [WebText: A Large-Scale Unsupervised Text Corpus by Radford et al.](https://paperswithcode.com/dataset/webtext)
166
- - **Common Crawl:** A dataset of web pages from a variety of domains, which is updated regularly. This dataset was used to pretrain GPT-3.
167
- - [Language Models are Few-Shot Learners](https://paperswithcode.com/dataset/common-crawl) by Brown et al.
168
- - **BooksCorpus:** A dataset of over 11,000 books from a variety of genres.
169
- - [Scalable Methods for 8 Billion Token Language Modeling](https://paperswithcode.com/dataset/bookcorpus) by Zhu et al.
170
- - **English Wikipedia:** A dump of the English-language Wikipedia as of 2018, with articles from 2001-2017.
171
- - [Improving Language Understanding by Generative Pre-Training](https://huggingface.co/spaces/awacke1/WikipediaUltimateAISearch?logs=build) Space for Wikipedia Search
172
- - **Toronto Books Corpus:** A dataset of over 7,000 books from a variety of genres, collected by the University of Toronto.
173
- - [Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond](https://paperswithcode.com/dataset/bookcorpus) by Schwenk and Douze.
174
- - **OpenWebText:** A dataset of web pages that were filtered to remove content that was likely to be low-quality or spammy. This dataset was used to pretrain GPT-3.
175
- - [Language Models are Few-Shot Learners](https://paperswithcode.com/dataset/openwebtext) by Brown et al.
176
- """
177
-
178
- # 6. Use Gradio to pull it all together
179
- with gr.Blocks(css = """#col_container {width: 1400px; margin-left: auto; margin-right: auto;} #chatbot {height: 600px; overflow: auto;}""") as demo:
180
- gr.HTML(title)
181
- with gr.Column(elem_id = "col_container"):
182
- inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter")
183
- chatbot = gr.Chatbot(elem_id='chatbot')
184
- state = gr.State([])
185
- b1 = gr.Button()
186
- with gr.Accordion("Parameters", open=False):
187
- top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",)
188
- temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",)
189
- chat_counter = gr.Number(value=0, visible=True, precision=0)
190
-
191
-
192
- # Episodic/Semantic IO
193
- fileName = gr.Textbox(label="Filename")
194
- fileContent = gr.TextArea(label="File Content")
195
- completedMessage = gr.Textbox(label="Completed")
196
- label = gr.Label()
197
- with gr.Row():
198
- listFiles = gr.Button("📄 List File(s)")
199
- readFile = gr.Button("📖 Read File")
200
- saveFile = gr.Button("💾 Save File")
201
- deleteFile = gr.Button("🗑️ Delete File")
202
- appendFile = gr.Button("➕ Append File")
203
- downloadCSV = gr.Button("📥 Download File")
204
- listFiles.click(list_files, inputs=fileName, outputs=fileContent)
205
- readFile.click(read_file, inputs=fileName, outputs=fileContent)
206
- saveFile.click(write_file, inputs=[fileName, fileContent], outputs=completedMessage)
207
- deleteFile.click(delete_file, inputs=fileName, outputs=completedMessage)
208
- appendFile.click(append_file, inputs=[fileName, fileContent], outputs=completedMessage )
209
- downloadCSV.click(download_csv, inputs=fileName, outputs=fileContent)
210
-
211
- inputs.submit(predict, [inputs, top_p, temperature,chat_counter, chatbot, state], [chatbot, state, chat_counter])
212
- b1.click(predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter])
213
- b1.click(reset_textbox, [], [inputs])
214
- inputs.submit(reset_textbox, [], [inputs])
215
- gr.Markdown(description)
216
-
217
- demo.queue().launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/StyleGANEX/models/stylegan2/op/__init__.py DELETED
@@ -1,2 +0,0 @@
1
- from .fused_act import FusedLeakyReLU, fused_leaky_relu
2
- from .upfirdn2d import upfirdn2d
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/__init__.py DELETED
File without changes
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnetv1d101_8xb32_in1k.py DELETED
@@ -1,5 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/resnetv1d101.py',
3
- '../_base_/datasets/imagenet_bs32_pil_resize.py',
4
- '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
5
- ]
 
 
 
 
 
 
spaces/Abdllh/poetry202/app.py DELETED
@@ -1,53 +0,0 @@
1
- import gc
2
- import gradio as gr
3
- from transformers import pipeline, set_seed
4
-
5
- pipe = pipeline('text-generation', framework='pt', model='akhooli/ap2023', tokenizer='akhooli/ap2023')
6
- #gc.collect()
7
- samples = [['أنت'
8
- ,1.0, 50, 1.0, 1.0, 114],['هل غادر'
9
- ,1.0, 50, 1.0, 1.0, 114 ],['ألا ليت'
10
- ,1.0, 50, 1.0, 1.0, 114 ],['يا قدس'
11
- ,1.0, 50, 1.0, 1.0, 114],['عيد بأية حال'
12
- ,1.0, 50, 1.0, 1.0, 114],['لكل شيء إذا ما'
13
- ,1.0, 50, 1.0, 1.0, 114 ],['.'
14
- ,1.0, 50, 1.0, 1.0, 114]]
15
-
16
- notes = """
17
- - Enter a short prompt or select (click) one of the examples and click SEND
18
- - Adjust parameters (temperture, top k, top p and penalty) through the slider (keep close to default values).
19
- - For the same seed (randomness), the same output is regenerated if other parameters are fixed
20
- - Clear and enter new prompt or select another example and SEND to regenerate
21
- - The '.' means start a new line from no prompt (your prompt need not be long)
22
- - Be patient: this runs on CPU (free tier)
23
- - Feedback (Twitter): @akhooli (https://twitter.com/akhooli/status/1611025232201977859)
24
- - Note/Disclaimer: may generate unaccepted or inappropriate content. Use at your own risk.
25
- """
26
- def sayPoetry(prompt, temp=1.0, topk = 50, topp = 1.0, penalty=1.0, seed=114):
27
- if not int(seed) >= 0: seed=114
28
- set_seed(seed)
29
- gen = pipe(prompt, max_length=96, do_sample=True, temperature=temp, top_k=topk, top_p=topp, repetition_penalty=penalty,
30
- min_length = 64, no_repeat_ngram_size = 3, return_full_text=True,
31
- num_beams=5, num_return_sequences=1)[0]["generated_text"]
32
- poetry =""
33
- for line in gen.split('.')[:-1]:
34
- poetry += line #+ "\n"
35
- return poetry
36
- poetry = gr.Interface(fn=sayPoetry,
37
- inputs=[
38
- gr.Textbox(label="Enter short prompt or select from examples:"),
39
- gr.Slider(0.70, 1.2, step=0.01,value=1.0, label='control temperature'),
40
- gr.Slider(25, 100, step=1,value=50, label='control top k'),
41
- gr.Slider(0.80, 1.0, step=0.01,value=1.0, label='control top p'),
42
- gr.Slider(0.90, 1.50, step=0.01,value=1.0, label='control penalty'),
43
- gr.Number(value=139750, precision=0, label='Seed'),
44
- ],
45
- outputs=[gr.Textbox(label="Generated Poetry:")],
46
-
47
- allow_flagging='never',
48
- title='Arabic Poetry Generation Demo (updated Jan. 2023)',
49
- description = "A simple demo of AI generated poetry based on 1M poems fine-tuned using AraGPT2 (be patient, runs on cpu)",
50
- examples=samples,
51
- cache_examples=False,
52
- article = notes)
53
- poetry.launch() # show_error = True, debug=True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/__init__.py DELETED
@@ -1,110 +0,0 @@
1
- from __future__ import annotations
2
- from requests import get
3
- #from g4f.models import Model, ModelUtils
4
- from .models import Model, ModelUtils
5
- from .Provider import BaseProvider
6
- from .typing import CreateResult, Union
7
- from .debug import logging
8
-
9
- version = '0.1.5.6'
10
- version_check = False
11
-
12
- def check_pypi_version() -> None:
13
- try:
14
- response = get("https://pypi.org/pypi/g4f/json").json()
15
- latest_version = response["info"]["version"]
16
-
17
- if version != latest_version:
18
- print(f'New pypi version: {latest_version} (current: {version}) | pip install -U g4f')
19
-
20
- except Exception as e:
21
- print(f'Failed to check g4f pypi version: {e}')
22
-
23
- def get_model_and_provider(model : Union[Model, str],
24
- provider : Union[type[BaseProvider], None],
25
- stream : bool) -> tuple[Model, type[BaseProvider]]:
26
-
27
- if isinstance(model, str):
28
- if model in ModelUtils.convert:
29
- model = ModelUtils.convert[model]
30
- else:
31
- raise Exception(f'The model: {model} does not exist')
32
-
33
- if not provider:
34
- provider = model.best_provider
35
-
36
- if not provider:
37
- raise Exception(f'No provider found for model: {model}')
38
-
39
- if not provider.working:
40
- raise Exception(f'{provider.__name__} is not working')
41
-
42
- if not provider.supports_stream and stream:
43
- raise Exception(f'ValueError: {provider.__name__} does not support "stream" argument')
44
-
45
- if logging:
46
- print(f'Using {provider.__name__} provider')
47
-
48
- return model, provider
49
-
50
- class ChatCompletion:
51
- @staticmethod
52
- def create(model: Union[Model, str],
53
- messages : list[dict[str, str]],
54
- provider : Union[type[BaseProvider], None] = None,
55
- stream : bool = False,
56
- auth : Union[str, None] = None, **kwargs) -> Union[CreateResult, str]:
57
-
58
- model, provider = get_model_and_provider(model, provider, stream)
59
-
60
- if provider.needs_auth and not auth:
61
- raise Exception(
62
- f'ValueError: {provider.__name__} requires authentication (use auth=\'cookie or token or jwt ...\' param)')
63
-
64
- if provider.needs_auth:
65
- kwargs['auth'] = auth
66
-
67
- result = provider.create_completion(model.name, messages, stream, **kwargs)
68
- return result if stream else ''.join(result)
69
-
70
- @staticmethod
71
- async def create_async(
72
- model: Union[Model, str],
73
- messages: list[dict[str, str]],
74
- provider: Union[type[BaseProvider], None] = None,
75
- **kwargs
76
- ) -> str:
77
- model, provider = get_model_and_provider(model, provider, False)
78
-
79
- return await provider.create_async(model.name, messages, **kwargs)
80
-
81
- class Completion:
82
- @staticmethod
83
- def create(
84
- model: str,
85
- prompt: str,
86
- provider: Union[type[BaseProvider], None] = None,
87
- stream: bool = False,
88
- **kwargs
89
- ) -> Union[CreateResult, str]:
90
-
91
- allowed_models = [
92
- 'code-davinci-002',
93
- 'text-ada-001',
94
- 'text-babbage-001',
95
- 'text-curie-001',
96
- 'text-davinci-002',
97
- 'text-davinci-003'
98
- ]
99
-
100
- if model not in allowed_models:
101
- raise Exception(f'ValueError: Can\'t use {model} with Completion.create()')
102
-
103
- model, provider = get_model_and_provider(model, provider, stream)
104
-
105
- result = provider.create_completion(model.name, [{"role": "user", "content": prompt}], stream, **kwargs)
106
-
107
- return result if stream else ''.join(result)
108
-
109
- if version_check:
110
- check_pypi_version()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/simpledropdownlist/SimpleDropDownList.d.ts DELETED
@@ -1,20 +0,0 @@
1
- import DropDownList from '../dropdownlist/DropDownList';
2
- import BuildListConfig from '../utils/build/BuildListConfig';
3
-
4
- export default SimpleDropDownList;
5
-
6
- declare namespace SimpleDropDownList {
7
- interface IConfig extends BuildListConfig.IConfig {
8
- }
9
-
10
- interface ICreatorsConfig extends BuildListConfig.ICreators {
11
- }
12
- }
13
-
14
- declare class SimpleDropDownList extends DropDownList {
15
- constructor(
16
- scene: Phaser.Scene,
17
- config?: SimpleDropDownList.IConfig,
18
- creators?: SimpleDropDownList.ICreatorsConfig
19
- );
20
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/saicinpainting/training/modules/__init__.py DELETED
@@ -1,31 +0,0 @@
1
- import logging
2
-
3
- from saicinpainting.training.modules.ffc import FFCResNetGenerator
4
- from saicinpainting.training.modules.pix2pixhd import GlobalGenerator, MultiDilatedGlobalGenerator, \
5
- NLayerDiscriminator, MultidilatedNLayerDiscriminator
6
-
7
- def make_generator(config, kind, **kwargs):
8
- logging.info(f'Make generator {kind}')
9
-
10
- if kind == 'pix2pixhd_multidilated':
11
- return MultiDilatedGlobalGenerator(**kwargs)
12
-
13
- if kind == 'pix2pixhd_global':
14
- return GlobalGenerator(**kwargs)
15
-
16
- if kind == 'ffc_resnet':
17
- return FFCResNetGenerator(**kwargs)
18
-
19
- raise ValueError(f'Unknown generator kind {kind}')
20
-
21
-
22
- def make_discriminator(kind, **kwargs):
23
- logging.info(f'Make discriminator {kind}')
24
-
25
- if kind == 'pix2pixhd_nlayer_multidilated':
26
- return MultidilatedNLayerDiscriminator(**kwargs)
27
-
28
- if kind == 'pix2pixhd_nlayer':
29
- return NLayerDiscriminator(**kwargs)
30
-
31
- raise ValueError(f'Unknown discriminator kind {kind}')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/utils/paste_pic.py DELETED
@@ -1,69 +0,0 @@
1
- import cv2, os
2
- import numpy as np
3
- from tqdm import tqdm
4
- import uuid
5
-
6
- from src.utils.videoio import save_video_with_watermark
7
-
8
- def paste_pic(video_path, pic_path, crop_info, new_audio_path, full_video_path, extended_crop=False):
9
-
10
- if not os.path.isfile(pic_path):
11
- raise ValueError('pic_path must be a valid path to video/image file')
12
- elif pic_path.split('.')[-1] in ['jpg', 'png', 'jpeg']:
13
- # loader for first frame
14
- full_img = cv2.imread(pic_path)
15
- else:
16
- # loader for videos
17
- video_stream = cv2.VideoCapture(pic_path)
18
- fps = video_stream.get(cv2.CAP_PROP_FPS)
19
- full_frames = []
20
- while 1:
21
- still_reading, frame = video_stream.read()
22
- if not still_reading:
23
- video_stream.release()
24
- break
25
- break
26
- full_img = frame
27
- frame_h = full_img.shape[0]
28
- frame_w = full_img.shape[1]
29
-
30
- video_stream = cv2.VideoCapture(video_path)
31
- fps = video_stream.get(cv2.CAP_PROP_FPS)
32
- crop_frames = []
33
- while 1:
34
- still_reading, frame = video_stream.read()
35
- if not still_reading:
36
- video_stream.release()
37
- break
38
- crop_frames.append(frame)
39
-
40
- if len(crop_info) != 3:
41
- print("you didn't crop the image")
42
- return
43
- else:
44
- r_w, r_h = crop_info[0]
45
- clx, cly, crx, cry = crop_info[1]
46
- lx, ly, rx, ry = crop_info[2]
47
- lx, ly, rx, ry = int(lx), int(ly), int(rx), int(ry)
48
- # oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx
49
- # oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx
50
-
51
- if extended_crop:
52
- oy1, oy2, ox1, ox2 = cly, cry, clx, crx
53
- else:
54
- oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx
55
-
56
- tmp_path = str(uuid.uuid4())+'.mp4'
57
- out_tmp = cv2.VideoWriter(tmp_path, cv2.VideoWriter_fourcc(*'MP4V'), fps, (frame_w, frame_h))
58
- for crop_frame in tqdm(crop_frames, 'seamlessClone:'):
59
- p = cv2.resize(crop_frame.astype(np.uint8), (ox2-ox1, oy2 - oy1))
60
-
61
- mask = 255*np.ones(p.shape, p.dtype)
62
- location = ((ox1+ox2) // 2, (oy1+oy2) // 2)
63
- gen_img = cv2.seamlessClone(p, full_img, mask, location, cv2.NORMAL_CLONE)
64
- out_tmp.write(gen_img)
65
-
66
- out_tmp.release()
67
-
68
- save_video_with_watermark(tmp_path, new_audio_path, full_video_path, watermark=False)
69
- os.remove(tmp_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/stylegan_human/edit/edit_config.py DELETED
@@ -1,19 +0,0 @@
1
- # Copyright (c) SenseTime Research. All rights reserved.
2
-
3
- attr_dict = dict(
4
- interface_gan={ # strength
5
- # strength: negative for shorter, positive for longer
6
- 'upper_length': [-1],
7
- 'bottom_length': [1]
8
- },
9
- stylespace={ # layer, strength, threshold
10
- # strength: negative for shorter, positive for longer
11
- 'upper_length': [5, -5, 0.0028],
12
- 'bottom_length': [3, 5, 0.003]
13
- },
14
- sefa={ # layer, strength
15
- # -5 # strength: negative for longer, positive for shorter
16
- 'upper_length': [[4, 5, 6, 7], 5],
17
- 'bottom_length': [[4, 5, 6, 7], 5]
18
- }
19
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/in_translation.md DELETED
@@ -1,16 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # 번역중
14
-
15
- 열심히 번역을 진행중입니다. 조금만 기다려주세요.
16
- 감사합니다!
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/inference/inpainting.py DELETED
@@ -1,9 +0,0 @@
1
- import warnings
2
-
3
- from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
4
-
5
-
6
- warnings.warn(
7
- "The `inpainting.py` script is outdated. Please use directly `from diffusers import"
8
- " StableDiffusionInpaintPipeline` instead."
9
- )
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/tools/model_converters/publish_model.py DELETED
@@ -1,39 +0,0 @@
1
- import argparse
2
- import subprocess
3
-
4
- import torch
5
-
6
-
7
- def parse_args():
8
- parser = argparse.ArgumentParser(
9
- description='Process a checkpoint to be published')
10
- parser.add_argument('in_file', help='input checkpoint filename')
11
- parser.add_argument('out_file', help='output checkpoint filename')
12
- args = parser.parse_args()
13
- return args
14
-
15
-
16
- def process_checkpoint(in_file, out_file):
17
- checkpoint = torch.load(in_file, map_location='cpu')
18
- # remove optimizer for smaller file size
19
- if 'optimizer' in checkpoint:
20
- del checkpoint['optimizer']
21
- # if it is necessary to remove some sensitive data in checkpoint['meta'],
22
- # add the code here.
23
- torch.save(checkpoint, out_file)
24
- sha = subprocess.check_output(['sha256sum', out_file]).decode()
25
- if out_file.endswith('.pth'):
26
- out_file_name = out_file[:-4]
27
- else:
28
- out_file_name = out_file
29
- final_file = out_file_name + f'-{sha[:8]}.pth'
30
- subprocess.Popen(['mv', out_file, final_file])
31
-
32
-
33
- def main():
34
- args = parse_args()
35
- process_checkpoint(args.in_file, args.out_file)
36
-
37
-
38
- if __name__ == '__main__':
39
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AriaMei/TTSdemo/app.py DELETED
@@ -1,164 +0,0 @@
1
- import gradio as gr
2
- import torch
3
- import commons
4
- import utils
5
- from models import SynthesizerTrn
6
- from text.symbols import symbols
7
- from text import text_to_sequence
8
- import random
9
- import os
10
- import datetime
11
- import numpy as np
12
-
13
-
14
- def get_text(text, hps):
15
- text_norm = text_to_sequence(text, hps.data.text_cleaners)
16
- if hps.data.add_blank:
17
- text_norm = commons.intersperse(text_norm, 0)
18
- text_norm = torch.LongTensor(text_norm)
19
- return text_norm
20
-
21
-
22
- def tts(txt, emotion, index, hps, net_g, random_emotion_root):
23
- """emotion为参考情感音频路径 或random_sample(随机抽取)"""
24
- stn_tst = get_text(txt, hps)
25
- rand_wav = ""
26
- with torch.no_grad():
27
- x_tst = stn_tst.unsqueeze(0)
28
- x_tst_lengths = torch.LongTensor([stn_tst.size(0)])
29
- sid = torch.LongTensor([index]) ##appoint character
30
- if os.path.exists(f"{emotion}"):
31
- emo = torch.FloatTensor(np.load(f"{emotion}")).unsqueeze(0)
32
- rand_wav = emotion
33
- elif emotion == "random_sample":
34
- while True:
35
- rand_wav = random.sample(os.listdir(random_emotion_root), 1)[0]
36
- if os.path.exists(f"{random_emotion_root}/{rand_wav}"):
37
- break
38
- emo = torch.FloatTensor(np.load(f"{random_emotion_root}/{rand_wav}")).unsqueeze(0)
39
- print(f"{random_emotion_root}/{rand_wav}")
40
- else:
41
- print("emotion参数不正确")
42
-
43
- audio = \
44
- net_g.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=0.667, noise_scale_w=0.8, length_scale=1, emo=emo)[
45
- 0][
46
- 0, 0].data.float().numpy()
47
- path = random_emotion_root+"/"+rand_wav
48
- return audio,path
49
-
50
-
51
- def random_generate(txt, index, hps, net_g, random_emotion_root):
52
-
53
- audio ,rand_wav= tts(txt, emotion='random_sample', index=index, hps=hps, net_g=net_g,
54
- random_emotion_root=random_emotion_root)
55
- return audio,rand_wav
56
-
57
-
58
- def charaterRoot(name):
59
- global random_emotion_root
60
- if name == '九条都':
61
- random_emotion_root = "9nineEmo/my"
62
- index = 0
63
- elif name == '新海天':
64
- random_emotion_root = "9nineEmo/sr"
65
- index = 1
66
- elif name == '结城希亚':
67
- random_emotion_root = "9nineEmo/na"
68
- index = 2
69
- elif name == '蕾娜':
70
- random_emotion_root = "9nineEmo/gt"
71
- index = 3
72
- elif name == '索菲':
73
- random_emotion_root = "9nineEmo/sf"
74
- index = 4
75
- return random_emotion_root, index
76
-
77
-
78
- def configSelect(config):
79
- global checkPonit, config_file
80
- if config == 'mul':
81
- config_file = "./configs/9nine_multi.json"
82
- checkPonit = "logs/9nineM/G_252000.pth"
83
- elif config == "single":
84
- config_file = "./configs/sora.json"
85
- checkPonit = "logs/sora/G_341200.pth"
86
- return config_file, checkPonit
87
-
88
-
89
- def runVits(name, config, txt,emotion):
90
- config_file, checkPoint = configSelect(config)
91
- random_emotion_root, index = charaterRoot(name=name)
92
- checkPonit = checkPoint
93
- hps = utils.get_hparams_from_file(config_file)
94
- net_g = SynthesizerTrn(
95
- len(symbols),
96
- hps.data.filter_length // 2 + 1,
97
- hps.train.segment_size // hps.data.hop_length,
98
- n_speakers=hps.data.n_speakers,
99
- **hps.model)
100
- _ = net_g.eval()
101
-
102
- _ = utils.load_checkpoint(checkPonit, net_g, None)
103
- audio, rand_wav = tts(txt, emotion=emotion, index=index, hps=hps, net_g=net_g,
104
- random_emotion_root=random_emotion_root)
105
- return (hps.data.sampling_rate, audio),rand_wav
106
-
107
-
108
- def nineMul(name, txt):
109
- config = 'mul'
110
- audio ,rand_wav= runVits(name, config, txt,'random_sample')
111
- return "multiple model success", audio,rand_wav
112
-
113
-
114
- def nineSingle(name,txt):
115
- config = 'single'
116
- # name = "新海天"
117
- audio ,rand_wav= runVits(name, config, txt,'random_sample')
118
- return "single model success", audio,rand_wav
119
-
120
- def nineMul_select_emo(name, txt,emo):
121
- config = 'mul'
122
- # emo = "./9nine"emotion
123
- print(emo)
124
- audio, _ = runVits(name, config, txt, emo)
125
- message = "情感依赖:" + emo + "sythesis success!"
126
- return message,audio
127
-
128
- app = gr.Blocks()
129
- with app:
130
- with gr.Tabs():
131
- with gr.TabItem("9nine multiple model"):
132
- character = gr.Radio(['九条都', '新海天', '结城希亚', '蕾娜', '索菲'], label='character',
133
- info="select character you want")
134
-
135
- text = gr.TextArea(label="input content,Japanese support only", value="祭りに行っただよね、知らない女の子と一緒にいて。")
136
-
137
- submit = gr.Button("generate", variant='privite')
138
- message = gr.Textbox(label="Message")
139
- audio = gr.Audio(label="output")
140
- emotion = gr.Textbox(label="参照情感:")
141
- submit.click(nineMul, [character, text], [message, audio,emotion])
142
- with gr.TabItem("9nine single model"):
143
- character = gr.Radio(['新海天'], label='character',
144
- info="single model for 新海天 only")
145
-
146
- text = gr.TextArea(label="input content,Japanese support only", value="祭りに行っただよね、知らない女の子と一緒にいて。")
147
-
148
- submit = gr.Button("generate", variant='privite')
149
- message = gr.Textbox(label="Message")
150
- audio = gr.Audio(label="output")
151
- emotion = gr.Textbox(label="参照情感:")
152
- submit.click(nineSingle, [character, text], [message, audio,emotion])
153
- with gr.TabItem("Choose Emotion Embedding"):
154
- character = gr.Radio(['九条都', '新海天', '结城希亚', '蕾娜', '索菲'], label='character',
155
- info="select character you want")
156
-
157
- text = gr.TextArea(label="input content, Japanese support only", value="祭りに行っただよね、知らない女の子と一緒にいて。")
158
- emotion = gr.Textbox(label="从多人模型中获得的情感依照。例如”./9nineEmo/sf/sf0207.wav.emo.npy“,尽量使用本人的情感他人的情感会串味")
159
- submit = gr.Button("generate", variant='privite')
160
- message = gr.Textbox(label="Message")
161
- audio = gr.Audio(label="output")
162
-
163
- submit.click(nineMul_select_emo, [character, text,emotion], [message, audio])
164
- app.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Artrajz/vits-simple-api/vits/hubert_model.py DELETED
@@ -1,221 +0,0 @@
1
- import copy
2
- from typing import Optional, Tuple
3
- import random
4
-
5
- import torch
6
- import torch.nn as nn
7
- import torch.nn.functional as F
8
- from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present
9
-
10
- class Hubert(nn.Module):
11
- def __init__(self, num_label_embeddings: int = 100, mask: bool = True):
12
- super().__init__()
13
- self._mask = mask
14
- self.feature_extractor = FeatureExtractor()
15
- self.feature_projection = FeatureProjection()
16
- self.positional_embedding = PositionalConvEmbedding()
17
- self.norm = nn.LayerNorm(768)
18
- self.dropout = nn.Dropout(0.1)
19
- self.encoder = TransformerEncoder(
20
- nn.TransformerEncoderLayer(
21
- 768, 12, 3072, activation="gelu", batch_first=True
22
- ),
23
- 12,
24
- )
25
- self.proj = nn.Linear(768, 256)
26
-
27
- self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_())
28
- self.label_embedding = nn.Embedding(num_label_embeddings, 256)
29
-
30
- def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
31
- mask = None
32
- if self.training and self._mask:
33
- mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2)
34
- x[mask] = self.masked_spec_embed.to(x.dtype)
35
- return x, mask
36
-
37
- def encode(
38
- self, x: torch.Tensor, layer: Optional[int] = None
39
- ) -> Tuple[torch.Tensor, torch.Tensor]:
40
- x = self.feature_extractor(x)
41
- x = self.feature_projection(x.transpose(1, 2))
42
- x, mask = self.mask(x)
43
- x = x + self.positional_embedding(x)
44
- x = self.dropout(self.norm(x))
45
- x = self.encoder(x, output_layer=layer)
46
- return x, mask
47
-
48
- def logits(self, x: torch.Tensor) -> torch.Tensor:
49
- logits = torch.cosine_similarity(
50
- x.unsqueeze(2),
51
- self.label_embedding.weight.unsqueeze(0).unsqueeze(0),
52
- dim=-1,
53
- )
54
- return logits / 0.1
55
-
56
- def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
57
- x, mask = self.encode(x)
58
- x = self.proj(x)
59
- logits = self.logits(x)
60
- return logits, mask
61
-
62
-
63
- class HubertSoft(Hubert):
64
- def __init__(self):
65
- super().__init__()
66
-
67
- @torch.inference_mode()
68
- def units(self, wav: torch.Tensor) -> torch.Tensor:
69
- wav = F.pad(wav, ((400 - 320) // 2, (400 - 320) // 2))
70
- x, _ = self.encode(wav)
71
- return self.proj(x)
72
-
73
-
74
- class FeatureExtractor(nn.Module):
75
- def __init__(self):
76
- super().__init__()
77
- self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False)
78
- self.norm0 = nn.GroupNorm(512, 512)
79
- self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False)
80
- self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False)
81
- self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False)
82
- self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False)
83
- self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False)
84
- self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False)
85
-
86
- def forward(self, x: torch.Tensor) -> torch.Tensor:
87
- x = F.gelu(self.norm0(self.conv0(x)))
88
- x = F.gelu(self.conv1(x))
89
- x = F.gelu(self.conv2(x))
90
- x = F.gelu(self.conv3(x))
91
- x = F.gelu(self.conv4(x))
92
- x = F.gelu(self.conv5(x))
93
- x = F.gelu(self.conv6(x))
94
- return x
95
-
96
-
97
- class FeatureProjection(nn.Module):
98
- def __init__(self):
99
- super().__init__()
100
- self.norm = nn.LayerNorm(512)
101
- self.projection = nn.Linear(512, 768)
102
- self.dropout = nn.Dropout(0.1)
103
-
104
- def forward(self, x: torch.Tensor) -> torch.Tensor:
105
- x = self.norm(x)
106
- x = self.projection(x)
107
- x = self.dropout(x)
108
- return x
109
-
110
-
111
- class PositionalConvEmbedding(nn.Module):
112
- def __init__(self):
113
- super().__init__()
114
- self.conv = nn.Conv1d(
115
- 768,
116
- 768,
117
- kernel_size=128,
118
- padding=128 // 2,
119
- groups=16,
120
- )
121
- self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
122
-
123
- def forward(self, x: torch.Tensor) -> torch.Tensor:
124
- x = self.conv(x.transpose(1, 2))
125
- x = F.gelu(x[:, :, :-1])
126
- return x.transpose(1, 2)
127
-
128
-
129
- class TransformerEncoder(nn.Module):
130
- def __init__(
131
- self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int
132
- ) -> None:
133
- super(TransformerEncoder, self).__init__()
134
- self.layers = nn.ModuleList(
135
- [copy.deepcopy(encoder_layer) for _ in range(num_layers)]
136
- )
137
- self.num_layers = num_layers
138
-
139
- def forward(
140
- self,
141
- src: torch.Tensor,
142
- mask: torch.Tensor = None,
143
- src_key_padding_mask: torch.Tensor = None,
144
- output_layer: Optional[int] = None,
145
- ) -> torch.Tensor:
146
- output = src
147
- for layer in self.layers[:output_layer]:
148
- output = layer(
149
- output, src_mask=mask, src_key_padding_mask=src_key_padding_mask
150
- )
151
- return output
152
-
153
-
154
- def _compute_mask(
155
- shape: Tuple[int, int],
156
- mask_prob: float,
157
- mask_length: int,
158
- device: torch.device,
159
- min_masks: int = 0,
160
- ) -> torch.Tensor:
161
- batch_size, sequence_length = shape
162
-
163
- if mask_length < 1:
164
- raise ValueError("`mask_length` has to be bigger than 0.")
165
-
166
- if mask_length > sequence_length:
167
- raise ValueError(
168
- f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`"
169
- )
170
-
171
- # compute number of masked spans in batch
172
- num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random())
173
- num_masked_spans = max(num_masked_spans, min_masks)
174
-
175
- # make sure num masked indices <= sequence_length
176
- if num_masked_spans * mask_length > sequence_length:
177
- num_masked_spans = sequence_length // mask_length
178
-
179
- # SpecAugment mask to fill
180
- mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool)
181
-
182
- # uniform distribution to sample from, make sure that offset samples are < sequence_length
183
- uniform_dist = torch.ones(
184
- (batch_size, sequence_length - (mask_length - 1)), device=device
185
- )
186
-
187
- # get random indices to mask
188
- mask_indices = torch.multinomial(uniform_dist, num_masked_spans)
189
-
190
- # expand masked indices to masked spans
191
- mask_indices = (
192
- mask_indices.unsqueeze(dim=-1)
193
- .expand((batch_size, num_masked_spans, mask_length))
194
- .reshape(batch_size, num_masked_spans * mask_length)
195
- )
196
- offsets = (
197
- torch.arange(mask_length, device=device)[None, None, :]
198
- .expand((batch_size, num_masked_spans, mask_length))
199
- .reshape(batch_size, num_masked_spans * mask_length)
200
- )
201
- mask_idxs = mask_indices + offsets
202
-
203
- # scatter indices to mask
204
- mask = mask.scatter(1, mask_idxs, True)
205
-
206
- return mask
207
-
208
-
209
- def hubert_soft(
210
- path: str
211
- ) -> HubertSoft:
212
- r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`.
213
- Args:
214
- path (str): path of a pretrained model
215
- """
216
- hubert = HubertSoft()
217
- checkpoint = torch.load(path)
218
- consume_prefix_in_state_dict_if_present(checkpoint, "module.")
219
- hubert.load_state_dict(checkpoint)
220
- hubert.eval()
221
- return hubert
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/dotenv/version.py DELETED
@@ -1 +0,0 @@
1
- __version__ = "1.0.0"
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/network/__init__.py DELETED
@@ -1,2 +0,0 @@
1
- """Contains purely network-related utilities.
2
- """
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/colorama/tests/initialise_test.py DELETED
@@ -1,189 +0,0 @@
1
- # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
2
- import sys
3
- from unittest import TestCase, main, skipUnless
4
-
5
- try:
6
- from unittest.mock import patch, Mock
7
- except ImportError:
8
- from mock import patch, Mock
9
-
10
- from ..ansitowin32 import StreamWrapper
11
- from ..initialise import init, just_fix_windows_console, _wipe_internal_state_for_tests
12
- from .utils import osname, replace_by
13
-
14
- orig_stdout = sys.stdout
15
- orig_stderr = sys.stderr
16
-
17
-
18
- class InitTest(TestCase):
19
-
20
- @skipUnless(sys.stdout.isatty(), "sys.stdout is not a tty")
21
- def setUp(self):
22
- # sanity check
23
- self.assertNotWrapped()
24
-
25
- def tearDown(self):
26
- _wipe_internal_state_for_tests()
27
- sys.stdout = orig_stdout
28
- sys.stderr = orig_stderr
29
-
30
- def assertWrapped(self):
31
- self.assertIsNot(sys.stdout, orig_stdout, 'stdout should be wrapped')
32
- self.assertIsNot(sys.stderr, orig_stderr, 'stderr should be wrapped')
33
- self.assertTrue(isinstance(sys.stdout, StreamWrapper),
34
- 'bad stdout wrapper')
35
- self.assertTrue(isinstance(sys.stderr, StreamWrapper),
36
- 'bad stderr wrapper')
37
-
38
- def assertNotWrapped(self):
39
- self.assertIs(sys.stdout, orig_stdout, 'stdout should not be wrapped')
40
- self.assertIs(sys.stderr, orig_stderr, 'stderr should not be wrapped')
41
-
42
- @patch('colorama.initialise.reset_all')
43
- @patch('colorama.ansitowin32.winapi_test', lambda *_: True)
44
- @patch('colorama.ansitowin32.enable_vt_processing', lambda *_: False)
45
- def testInitWrapsOnWindows(self, _):
46
- with osname("nt"):
47
- init()
48
- self.assertWrapped()
49
-
50
- @patch('colorama.initialise.reset_all')
51
- @patch('colorama.ansitowin32.winapi_test', lambda *_: False)
52
- def testInitDoesntWrapOnEmulatedWindows(self, _):
53
- with osname("nt"):
54
- init()
55
- self.assertNotWrapped()
56
-
57
- def testInitDoesntWrapOnNonWindows(self):
58
- with osname("posix"):
59
- init()
60
- self.assertNotWrapped()
61
-
62
- def testInitDoesntWrapIfNone(self):
63
- with replace_by(None):
64
- init()
65
- # We can't use assertNotWrapped here because replace_by(None)
66
- # changes stdout/stderr already.
67
- self.assertIsNone(sys.stdout)
68
- self.assertIsNone(sys.stderr)
69
-
70
- def testInitAutoresetOnWrapsOnAllPlatforms(self):
71
- with osname("posix"):
72
- init(autoreset=True)
73
- self.assertWrapped()
74
-
75
- def testInitWrapOffDoesntWrapOnWindows(self):
76
- with osname("nt"):
77
- init(wrap=False)
78
- self.assertNotWrapped()
79
-
80
- def testInitWrapOffIncompatibleWithAutoresetOn(self):
81
- self.assertRaises(ValueError, lambda: init(autoreset=True, wrap=False))
82
-
83
- @patch('colorama.win32.SetConsoleTextAttribute')
84
- @patch('colorama.initialise.AnsiToWin32')
85
- def testAutoResetPassedOn(self, mockATW32, _):
86
- with osname("nt"):
87
- init(autoreset=True)
88
- self.assertEqual(len(mockATW32.call_args_list), 2)
89
- self.assertEqual(mockATW32.call_args_list[1][1]['autoreset'], True)
90
- self.assertEqual(mockATW32.call_args_list[0][1]['autoreset'], True)
91
-
92
- @patch('colorama.initialise.AnsiToWin32')
93
- def testAutoResetChangeable(self, mockATW32):
94
- with osname("nt"):
95
- init()
96
-
97
- init(autoreset=True)
98
- self.assertEqual(len(mockATW32.call_args_list), 4)
99
- self.assertEqual(mockATW32.call_args_list[2][1]['autoreset'], True)
100
- self.assertEqual(mockATW32.call_args_list[3][1]['autoreset'], True)
101
-
102
- init()
103
- self.assertEqual(len(mockATW32.call_args_list), 6)
104
- self.assertEqual(
105
- mockATW32.call_args_list[4][1]['autoreset'], False)
106
- self.assertEqual(
107
- mockATW32.call_args_list[5][1]['autoreset'], False)
108
-
109
-
110
- @patch('colorama.initialise.atexit.register')
111
- def testAtexitRegisteredOnlyOnce(self, mockRegister):
112
- init()
113
- self.assertTrue(mockRegister.called)
114
- mockRegister.reset_mock()
115
- init()
116
- self.assertFalse(mockRegister.called)
117
-
118
-
119
- class JustFixWindowsConsoleTest(TestCase):
120
- def _reset(self):
121
- _wipe_internal_state_for_tests()
122
- sys.stdout = orig_stdout
123
- sys.stderr = orig_stderr
124
-
125
- def tearDown(self):
126
- self._reset()
127
-
128
- @patch("colorama.ansitowin32.winapi_test", lambda: True)
129
- def testJustFixWindowsConsole(self):
130
- if sys.platform != "win32":
131
- # just_fix_windows_console should be a no-op
132
- just_fix_windows_console()
133
- self.assertIs(sys.stdout, orig_stdout)
134
- self.assertIs(sys.stderr, orig_stderr)
135
- else:
136
- def fake_std():
137
- # Emulate stdout=not a tty, stderr=tty
138
- # to check that we handle both cases correctly
139
- stdout = Mock()
140
- stdout.closed = False
141
- stdout.isatty.return_value = False
142
- stdout.fileno.return_value = 1
143
- sys.stdout = stdout
144
-
145
- stderr = Mock()
146
- stderr.closed = False
147
- stderr.isatty.return_value = True
148
- stderr.fileno.return_value = 2
149
- sys.stderr = stderr
150
-
151
- for native_ansi in [False, True]:
152
- with patch(
153
- 'colorama.ansitowin32.enable_vt_processing',
154
- lambda *_: native_ansi
155
- ):
156
- self._reset()
157
- fake_std()
158
-
159
- # Regular single-call test
160
- prev_stdout = sys.stdout
161
- prev_stderr = sys.stderr
162
- just_fix_windows_console()
163
- self.assertIs(sys.stdout, prev_stdout)
164
- if native_ansi:
165
- self.assertIs(sys.stderr, prev_stderr)
166
- else:
167
- self.assertIsNot(sys.stderr, prev_stderr)
168
-
169
- # second call without resetting is always a no-op
170
- prev_stdout = sys.stdout
171
- prev_stderr = sys.stderr
172
- just_fix_windows_console()
173
- self.assertIs(sys.stdout, prev_stdout)
174
- self.assertIs(sys.stderr, prev_stderr)
175
-
176
- self._reset()
177
- fake_std()
178
-
179
- # If init() runs first, just_fix_windows_console should be a no-op
180
- init()
181
- prev_stdout = sys.stdout
182
- prev_stderr = sys.stderr
183
- just_fix_windows_console()
184
- self.assertIs(prev_stdout, sys.stdout)
185
- self.assertIs(prev_stderr, sys.stderr)
186
-
187
-
188
- if __name__ == '__main__':
189
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/checkpoint/catalog.py DELETED
@@ -1,115 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import logging
3
-
4
- from detectron2.utils.file_io import PathHandler, PathManager
5
-
6
-
7
- class ModelCatalog(object):
8
- """
9
- Store mappings from names to third-party models.
10
- """
11
-
12
- S3_C2_DETECTRON_PREFIX = "https://dl.fbaipublicfiles.com/detectron"
13
-
14
- # MSRA models have STRIDE_IN_1X1=True. False otherwise.
15
- # NOTE: all BN models here have fused BN into an affine layer.
16
- # As a result, you should only load them to a model with "FrozenBN".
17
- # Loading them to a model with regular BN or SyncBN is wrong.
18
- # Even when loaded to FrozenBN, it is still different from affine by an epsilon,
19
- # which should be negligible for training.
20
- # NOTE: all models here uses PIXEL_STD=[1,1,1]
21
- # NOTE: Most of the BN models here are no longer used. We use the
22
- # re-converted pre-trained models under detectron2 model zoo instead.
23
- C2_IMAGENET_MODELS = {
24
- "MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl",
25
- "MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl",
26
- "FAIR/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl",
27
- "FAIR/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl",
28
- "FAIR/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl",
29
- "FAIR/X-101-64x4d": "ImageNetPretrained/FBResNeXt/X-101-64x4d.pkl",
30
- "FAIR/X-152-32x8d-IN5k": "ImageNetPretrained/25093814/X-152-32x8d-IN5k.pkl",
31
- }
32
-
33
- C2_DETECTRON_PATH_FORMAT = (
34
- "{prefix}/{url}/output/train/{dataset}/{type}/model_final.pkl" # noqa B950
35
- )
36
-
37
- C2_DATASET_COCO = "coco_2014_train%3Acoco_2014_valminusminival"
38
- C2_DATASET_COCO_KEYPOINTS = "keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival"
39
-
40
- # format: {model_name} -> part of the url
41
- C2_DETECTRON_MODELS = {
42
- "35857197/e2e_faster_rcnn_R-50-C4_1x": "35857197/12_2017_baselines/e2e_faster_rcnn_R-50-C4_1x.yaml.01_33_49.iAX0mXvW", # noqa B950
43
- "35857345/e2e_faster_rcnn_R-50-FPN_1x": "35857345/12_2017_baselines/e2e_faster_rcnn_R-50-FPN_1x.yaml.01_36_30.cUF7QR7I", # noqa B950
44
- "35857890/e2e_faster_rcnn_R-101-FPN_1x": "35857890/12_2017_baselines/e2e_faster_rcnn_R-101-FPN_1x.yaml.01_38_50.sNxI7sX7", # noqa B950
45
- "36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "36761737/12_2017_baselines/e2e_faster_rcnn_X-101-32x8d-FPN_1x.yaml.06_31_39.5MIHi1fZ", # noqa B950
46
- "35858791/e2e_mask_rcnn_R-50-C4_1x": "35858791/12_2017_baselines/e2e_mask_rcnn_R-50-C4_1x.yaml.01_45_57.ZgkA7hPB", # noqa B950
47
- "35858933/e2e_mask_rcnn_R-50-FPN_1x": "35858933/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml.01_48_14.DzEQe4wC", # noqa B950
48
- "35861795/e2e_mask_rcnn_R-101-FPN_1x": "35861795/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_1x.yaml.02_31_37.KqyEK4tT", # noqa B950
49
- "36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "36761843/12_2017_baselines/e2e_mask_rcnn_X-101-32x8d-FPN_1x.yaml.06_35_59.RZotkLKI", # noqa B950
50
- "48616381/e2e_mask_rcnn_R-50-FPN_2x_gn": "GN/48616381/04_2018_gn_baselines/e2e_mask_rcnn_R-50-FPN_2x_gn_0416.13_23_38.bTlTI97Q", # noqa B950
51
- "37697547/e2e_keypoint_rcnn_R-50-FPN_1x": "37697547/12_2017_baselines/e2e_keypoint_rcnn_R-50-FPN_1x.yaml.08_42_54.kdzV35ao", # noqa B950
52
- "35998355/rpn_R-50-C4_1x": "35998355/12_2017_baselines/rpn_R-50-C4_1x.yaml.08_00_43.njH5oD9L", # noqa B950
53
- "35998814/rpn_R-50-FPN_1x": "35998814/12_2017_baselines/rpn_R-50-FPN_1x.yaml.08_06_03.Axg0r179", # noqa B950
54
- "36225147/fast_R-50-FPN_1x": "36225147/12_2017_baselines/fast_rcnn_R-50-FPN_1x.yaml.08_39_09.L3obSdQ2", # noqa B950
55
- }
56
-
57
- @staticmethod
58
- def get(name):
59
- if name.startswith("Caffe2Detectron/COCO"):
60
- return ModelCatalog._get_c2_detectron_baseline(name)
61
- if name.startswith("ImageNetPretrained/"):
62
- return ModelCatalog._get_c2_imagenet_pretrained(name)
63
- raise RuntimeError("model not present in the catalog: {}".format(name))
64
-
65
- @staticmethod
66
- def _get_c2_imagenet_pretrained(name):
67
- prefix = ModelCatalog.S3_C2_DETECTRON_PREFIX
68
- name = name[len("ImageNetPretrained/") :]
69
- name = ModelCatalog.C2_IMAGENET_MODELS[name]
70
- url = "/".join([prefix, name])
71
- return url
72
-
73
- @staticmethod
74
- def _get_c2_detectron_baseline(name):
75
- name = name[len("Caffe2Detectron/COCO/") :]
76
- url = ModelCatalog.C2_DETECTRON_MODELS[name]
77
- if "keypoint_rcnn" in name:
78
- dataset = ModelCatalog.C2_DATASET_COCO_KEYPOINTS
79
- else:
80
- dataset = ModelCatalog.C2_DATASET_COCO
81
-
82
- if "35998355/rpn_R-50-C4_1x" in name:
83
- # this one model is somehow different from others ..
84
- type = "rpn"
85
- else:
86
- type = "generalized_rcnn"
87
-
88
- # Detectron C2 models are stored in the structure defined in `C2_DETECTRON_PATH_FORMAT`.
89
- url = ModelCatalog.C2_DETECTRON_PATH_FORMAT.format(
90
- prefix=ModelCatalog.S3_C2_DETECTRON_PREFIX, url=url, type=type, dataset=dataset
91
- )
92
- return url
93
-
94
-
95
- class ModelCatalogHandler(PathHandler):
96
- """
97
- Resolve URL like catalog://.
98
- """
99
-
100
- PREFIX = "catalog://"
101
-
102
- def _get_supported_prefixes(self):
103
- return [self.PREFIX]
104
-
105
- def _get_local_path(self, path, **kwargs):
106
- logger = logging.getLogger(__name__)
107
- catalog_path = ModelCatalog.get(path[len(self.PREFIX) :])
108
- logger.info("Catalog entry {} points to {}".format(path, catalog_path))
109
- return PathManager.get_local_path(catalog_path, **kwargs)
110
-
111
- def _open(self, path, mode="r", **kwargs):
112
- return PathManager.open(self._get_local_path(path), mode, **kwargs)
113
-
114
-
115
- PathManager.register_handler(ModelCatalogHandler())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/structures/masks.py DELETED
@@ -1,532 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import copy
3
- import itertools
4
- import numpy as np
5
- from typing import Any, Iterator, List, Union
6
- import pycocotools.mask as mask_util
7
- import torch
8
- from torch import device
9
-
10
- from detectron2.layers.roi_align import ROIAlign
11
- from detectron2.utils.memory import retry_if_cuda_oom
12
-
13
- from .boxes import Boxes
14
-
15
-
16
- def polygon_area(x, y):
17
- # Using the shoelace formula
18
- # https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
19
- return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
20
-
21
-
22
- def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray:
23
- """
24
- Args:
25
- polygons (list[ndarray]): each array has shape (Nx2,)
26
- height, width (int)
27
-
28
- Returns:
29
- ndarray: a bool mask of shape (height, width)
30
- """
31
- if len(polygons) == 0:
32
- # COCOAPI does not support empty polygons
33
- return np.zeros((height, width)).astype(np.bool)
34
- rles = mask_util.frPyObjects(polygons, height, width)
35
- rle = mask_util.merge(rles)
36
- return mask_util.decode(rle).astype(np.bool)
37
-
38
-
39
- def rasterize_polygons_within_box(
40
- polygons: List[np.ndarray], box: np.ndarray, mask_size: int
41
- ) -> torch.Tensor:
42
- """
43
- Rasterize the polygons into a mask image and
44
- crop the mask content in the given box.
45
- The cropped mask is resized to (mask_size, mask_size).
46
-
47
- This function is used when generating training targets for mask head in Mask R-CNN.
48
- Given original ground-truth masks for an image, new ground-truth mask
49
- training targets in the size of `mask_size x mask_size`
50
- must be provided for each predicted box. This function will be called to
51
- produce such targets.
52
-
53
- Args:
54
- polygons (list[ndarray[float]]): a list of polygons, which represents an instance.
55
- box: 4-element numpy array
56
- mask_size (int):
57
-
58
- Returns:
59
- Tensor: BoolTensor of shape (mask_size, mask_size)
60
- """
61
- # 1. Shift the polygons w.r.t the boxes
62
- w, h = box[2] - box[0], box[3] - box[1]
63
-
64
- polygons = copy.deepcopy(polygons)
65
- for p in polygons:
66
- p[0::2] = p[0::2] - box[0]
67
- p[1::2] = p[1::2] - box[1]
68
-
69
- # 2. Rescale the polygons to the new box size
70
- # max() to avoid division by small number
71
- ratio_h = mask_size / max(h, 0.1)
72
- ratio_w = mask_size / max(w, 0.1)
73
-
74
- if ratio_h == ratio_w:
75
- for p in polygons:
76
- p *= ratio_h
77
- else:
78
- for p in polygons:
79
- p[0::2] *= ratio_w
80
- p[1::2] *= ratio_h
81
-
82
- # 3. Rasterize the polygons with coco api
83
- mask = polygons_to_bitmask(polygons, mask_size, mask_size)
84
- mask = torch.from_numpy(mask)
85
- return mask
86
-
87
-
88
- class BitMasks:
89
- """
90
- This class stores the segmentation masks for all objects in one image, in
91
- the form of bitmaps.
92
-
93
- Attributes:
94
- tensor: bool Tensor of N,H,W, representing N instances in the image.
95
- """
96
-
97
- def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):
98
- """
99
- Args:
100
- tensor: bool Tensor of N,H,W, representing N instances in the image.
101
- """
102
- device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
103
- tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device)
104
- assert tensor.dim() == 3, tensor.size()
105
- self.image_size = tensor.shape[1:]
106
- self.tensor = tensor
107
-
108
- @torch.jit.unused
109
- def to(self, *args: Any, **kwargs: Any) -> "BitMasks":
110
- return BitMasks(self.tensor.to(*args, **kwargs))
111
-
112
- @property
113
- def device(self) -> torch.device:
114
- return self.tensor.device
115
-
116
- @torch.jit.unused
117
- def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks":
118
- """
119
- Returns:
120
- BitMasks: Create a new :class:`BitMasks` by indexing.
121
-
122
- The following usage are allowed:
123
-
124
- 1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.
125
- 2. `new_masks = masks[2:10]`: return a slice of masks.
126
- 3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
127
- with `length = len(masks)`. Nonzero elements in the vector will be selected.
128
-
129
- Note that the returned object might share storage with this object,
130
- subject to Pytorch's indexing semantics.
131
- """
132
- if isinstance(item, int):
133
- return BitMasks(self.tensor[item].unsqueeze(0))
134
- m = self.tensor[item]
135
- assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format(
136
- item, m.shape
137
- )
138
- return BitMasks(m)
139
-
140
- @torch.jit.unused
141
- def __iter__(self) -> torch.Tensor:
142
- yield from self.tensor
143
-
144
- @torch.jit.unused
145
- def __repr__(self) -> str:
146
- s = self.__class__.__name__ + "("
147
- s += "num_instances={})".format(len(self.tensor))
148
- return s
149
-
150
- def __len__(self) -> int:
151
- return self.tensor.shape[0]
152
-
153
- def nonempty(self) -> torch.Tensor:
154
- """
155
- Find masks that are non-empty.
156
-
157
- Returns:
158
- Tensor: a BoolTensor which represents
159
- whether each mask is empty (False) or non-empty (True).
160
- """
161
- return self.tensor.flatten(1).any(dim=1)
162
-
163
- @staticmethod
164
- def from_polygon_masks(
165
- polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int
166
- ) -> "BitMasks":
167
- """
168
- Args:
169
- polygon_masks (list[list[ndarray]] or PolygonMasks)
170
- height, width (int)
171
- """
172
- if isinstance(polygon_masks, PolygonMasks):
173
- polygon_masks = polygon_masks.polygons
174
- masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]
175
- if len(masks):
176
- return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))
177
- else:
178
- return BitMasks(torch.empty(0, height, width, dtype=torch.bool))
179
-
180
- @staticmethod
181
- def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks":
182
- """
183
- Args:
184
- roi_masks:
185
- height, width (int):
186
- """
187
- return roi_masks.to_bitmasks(height, width)
188
-
189
- def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
190
- """
191
- Crop each bitmask by the given box, and resize results to (mask_size, mask_size).
192
- This can be used to prepare training targets for Mask R-CNN.
193
- It has less reconstruction error compared to rasterization with polygons.
194
- However we observe no difference in accuracy,
195
- but BitMasks requires more memory to store all the masks.
196
-
197
- Args:
198
- boxes (Tensor): Nx4 tensor storing the boxes for each mask
199
- mask_size (int): the size of the rasterized mask.
200
-
201
- Returns:
202
- Tensor:
203
- A bool tensor of shape (N, mask_size, mask_size), where
204
- N is the number of predicted boxes for this image.
205
- """
206
- assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
207
- device = self.tensor.device
208
-
209
- batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]
210
- rois = torch.cat([batch_inds, boxes], dim=1) # Nx5
211
-
212
- bit_masks = self.tensor.to(dtype=torch.float32)
213
- rois = rois.to(device=device)
214
- output = (
215
- ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)
216
- .forward(bit_masks[:, None, :, :], rois)
217
- .squeeze(1)
218
- )
219
- output = output >= 0.5
220
- return output
221
-
222
- def get_bounding_boxes(self) -> Boxes:
223
- """
224
- Returns:
225
- Boxes: tight bounding boxes around bitmasks.
226
- If a mask is empty, it's bounding box will be all zero.
227
- """
228
- boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32)
229
- x_any = torch.any(self.tensor, dim=1)
230
- y_any = torch.any(self.tensor, dim=2)
231
- for idx in range(self.tensor.shape[0]):
232
- x = torch.where(x_any[idx, :])[0]
233
- y = torch.where(y_any[idx, :])[0]
234
- if len(x) > 0 and len(y) > 0:
235
- boxes[idx, :] = torch.as_tensor(
236
- [x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32
237
- )
238
- return Boxes(boxes)
239
-
240
- @staticmethod
241
- def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks":
242
- """
243
- Concatenates a list of BitMasks into a single BitMasks
244
-
245
- Arguments:
246
- bitmasks_list (list[BitMasks])
247
-
248
- Returns:
249
- BitMasks: the concatenated BitMasks
250
- """
251
- assert isinstance(bitmasks_list, (list, tuple))
252
- assert len(bitmasks_list) > 0
253
- assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list)
254
-
255
- cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0))
256
- return cat_bitmasks
257
-
258
-
259
- class PolygonMasks:
260
- """
261
- This class stores the segmentation masks for all objects in one image, in the form of polygons.
262
-
263
- Attributes:
264
- polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon.
265
- """
266
-
267
- def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]):
268
- """
269
- Arguments:
270
- polygons (list[list[np.ndarray]]): The first
271
- level of the list correspond to individual instances,
272
- the second level to all the polygons that compose the
273
- instance, and the third level to the polygon coordinates.
274
- The third level array should have the format of
275
- [x0, y0, x1, y1, ..., xn, yn] (n >= 3).
276
- """
277
- if not isinstance(polygons, list):
278
- raise ValueError(
279
- "Cannot create PolygonMasks: Expect a list of list of polygons per image. "
280
- "Got '{}' instead.".format(type(polygons))
281
- )
282
-
283
- def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:
284
- # Use float64 for higher precision, because why not?
285
- # Always put polygons on CPU (self.to is a no-op) since they
286
- # are supposed to be small tensors.
287
- # May need to change this assumption if GPU placement becomes useful
288
- if isinstance(t, torch.Tensor):
289
- t = t.cpu().numpy()
290
- return np.asarray(t).astype("float64")
291
-
292
- def process_polygons(
293
- polygons_per_instance: List[Union[torch.Tensor, np.ndarray]]
294
- ) -> List[np.ndarray]:
295
- if not isinstance(polygons_per_instance, list):
296
- raise ValueError(
297
- "Cannot create polygons: Expect a list of polygons per instance. "
298
- "Got '{}' instead.".format(type(polygons_per_instance))
299
- )
300
- # transform each polygon to a numpy array
301
- polygons_per_instance = [_make_array(p) for p in polygons_per_instance]
302
- for polygon in polygons_per_instance:
303
- if len(polygon) % 2 != 0 or len(polygon) < 6:
304
- raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.")
305
- return polygons_per_instance
306
-
307
- self.polygons: List[List[np.ndarray]] = [
308
- process_polygons(polygons_per_instance) for polygons_per_instance in polygons
309
- ]
310
-
311
- def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks":
312
- return self
313
-
314
- @property
315
- def device(self) -> torch.device:
316
- return torch.device("cpu")
317
-
318
- def get_bounding_boxes(self) -> Boxes:
319
- """
320
- Returns:
321
- Boxes: tight bounding boxes around polygon masks.
322
- """
323
- boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32)
324
- for idx, polygons_per_instance in enumerate(self.polygons):
325
- minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32)
326
- maxxy = torch.zeros(2, dtype=torch.float32)
327
- for polygon in polygons_per_instance:
328
- coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32)
329
- minxy = torch.min(minxy, torch.min(coords, dim=0).values)
330
- maxxy = torch.max(maxxy, torch.max(coords, dim=0).values)
331
- boxes[idx, :2] = minxy
332
- boxes[idx, 2:] = maxxy
333
- return Boxes(boxes)
334
-
335
- def nonempty(self) -> torch.Tensor:
336
- """
337
- Find masks that are non-empty.
338
-
339
- Returns:
340
- Tensor:
341
- a BoolTensor which represents whether each mask is empty (False) or not (True).
342
- """
343
- keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons]
344
- return torch.from_numpy(np.asarray(keep, dtype=np.bool))
345
-
346
- def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks":
347
- """
348
- Support indexing over the instances and return a `PolygonMasks` object.
349
- `item` can be:
350
-
351
- 1. An integer. It will return an object with only one instance.
352
- 2. A slice. It will return an object with the selected instances.
353
- 3. A list[int]. It will return an object with the selected instances,
354
- correpsonding to the indices in the list.
355
- 4. A vector mask of type BoolTensor, whose length is num_instances.
356
- It will return an object with the instances whose mask is nonzero.
357
- """
358
- if isinstance(item, int):
359
- selected_polygons = [self.polygons[item]]
360
- elif isinstance(item, slice):
361
- selected_polygons = self.polygons[item]
362
- elif isinstance(item, list):
363
- selected_polygons = [self.polygons[i] for i in item]
364
- elif isinstance(item, torch.Tensor):
365
- # Polygons is a list, so we have to move the indices back to CPU.
366
- if item.dtype == torch.bool:
367
- assert item.dim() == 1, item.shape
368
- item = item.nonzero().squeeze(1).cpu().numpy().tolist()
369
- elif item.dtype in [torch.int32, torch.int64]:
370
- item = item.cpu().numpy().tolist()
371
- else:
372
- raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype))
373
- selected_polygons = [self.polygons[i] for i in item]
374
- return PolygonMasks(selected_polygons)
375
-
376
- def __iter__(self) -> Iterator[List[np.ndarray]]:
377
- """
378
- Yields:
379
- list[ndarray]: the polygons for one instance.
380
- Each Tensor is a float64 vector representing a polygon.
381
- """
382
- return iter(self.polygons)
383
-
384
- def __repr__(self) -> str:
385
- s = self.__class__.__name__ + "("
386
- s += "num_instances={})".format(len(self.polygons))
387
- return s
388
-
389
- def __len__(self) -> int:
390
- return len(self.polygons)
391
-
392
- def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
393
- """
394
- Crop each mask by the given box, and resize results to (mask_size, mask_size).
395
- This can be used to prepare training targets for Mask R-CNN.
396
-
397
- Args:
398
- boxes (Tensor): Nx4 tensor storing the boxes for each mask
399
- mask_size (int): the size of the rasterized mask.
400
-
401
- Returns:
402
- Tensor: A bool tensor of shape (N, mask_size, mask_size), where
403
- N is the number of predicted boxes for this image.
404
- """
405
- assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
406
-
407
- device = boxes.device
408
- # Put boxes on the CPU, as the polygon representation is not efficient GPU-wise
409
- # (several small tensors for representing a single instance mask)
410
- boxes = boxes.to(torch.device("cpu"))
411
-
412
- results = [
413
- rasterize_polygons_within_box(poly, box.numpy(), mask_size)
414
- for poly, box in zip(self.polygons, boxes)
415
- ]
416
- """
417
- poly: list[list[float]], the polygons for one instance
418
- box: a tensor of shape (4,)
419
- """
420
- if len(results) == 0:
421
- return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device)
422
- return torch.stack(results, dim=0).to(device=device)
423
-
424
- def area(self):
425
- """
426
- Computes area of the mask.
427
- Only works with Polygons, using the shoelace formula:
428
- https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
429
-
430
- Returns:
431
- Tensor: a vector, area for each instance
432
- """
433
-
434
- area = []
435
- for polygons_per_instance in self.polygons:
436
- area_per_instance = 0
437
- for p in polygons_per_instance:
438
- area_per_instance += polygon_area(p[0::2], p[1::2])
439
- area.append(area_per_instance)
440
-
441
- return torch.tensor(area)
442
-
443
- @staticmethod
444
- def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks":
445
- """
446
- Concatenates a list of PolygonMasks into a single PolygonMasks
447
-
448
- Arguments:
449
- polymasks_list (list[PolygonMasks])
450
-
451
- Returns:
452
- PolygonMasks: the concatenated PolygonMasks
453
- """
454
- assert isinstance(polymasks_list, (list, tuple))
455
- assert len(polymasks_list) > 0
456
- assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list)
457
-
458
- cat_polymasks = type(polymasks_list[0])(
459
- list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list))
460
- )
461
- return cat_polymasks
462
-
463
-
464
- class ROIMasks:
465
- """
466
- Represent masks by N smaller masks defined in some ROIs. Once ROI boxes are given,
467
- full-image bitmask can be obtained by "pasting" the mask on the region defined
468
- by the corresponding ROI box.
469
- """
470
-
471
- def __init__(self, tensor: torch.Tensor):
472
- """
473
- Args:
474
- tensor: (N, M, M) mask tensor that defines the mask within each ROI.
475
- """
476
- if tensor.dim() != 3:
477
- raise ValueError("ROIMasks must take a masks of 3 dimension.")
478
- self.tensor = tensor
479
-
480
- def to(self, device: torch.device) -> "ROIMasks":
481
- return ROIMasks(self.tensor.to(device))
482
-
483
- @property
484
- def device(self) -> device:
485
- return self.tensor.device
486
-
487
- def __len__(self):
488
- return self.tensor.shape[0]
489
-
490
- def __getitem__(self, item) -> "ROIMasks":
491
- """
492
- Returns:
493
- ROIMasks: Create a new :class:`ROIMasks` by indexing.
494
-
495
- The following usage are allowed:
496
-
497
- 1. `new_masks = masks[2:10]`: return a slice of masks.
498
- 2. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
499
- with `length = len(masks)`. Nonzero elements in the vector will be selected.
500
-
501
- Note that the returned object might share storage with this object,
502
- subject to Pytorch's indexing semantics.
503
- """
504
- t = self.tensor[item]
505
- if t.dim() != 3:
506
- raise ValueError(
507
- f"Indexing on ROIMasks with {item} returns a tensor with shape {t.shape}!"
508
- )
509
- return ROIMasks(t)
510
-
511
- @torch.jit.unused
512
- def __repr__(self) -> str:
513
- s = self.__class__.__name__ + "("
514
- s += "num_instances={})".format(len(self.tensor))
515
- return s
516
-
517
- @torch.jit.unused
518
- def to_bitmasks(self, boxes: torch.Tensor, height, width, threshold=0.5):
519
- """
520
- Args: see documentation of :func:`paste_masks_in_image`.
521
- """
522
- from detectron2.layers.mask_ops import paste_masks_in_image, _paste_masks_tensor_shape
523
-
524
- if torch.jit.is_tracing():
525
- if isinstance(height, torch.Tensor):
526
- paste_func = _paste_masks_tensor_shape
527
- else:
528
- paste_func = paste_masks_in_image
529
- else:
530
- paste_func = retry_if_cuda_oom(paste_masks_in_image)
531
- bitmasks = paste_func(self.tensor, boxes.tensor, (height, width), threshold=threshold)
532
- return BitMasks(bitmasks)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/test_model_analysis.py DELETED
@@ -1,80 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
-
3
-
4
- import unittest
5
- import torch
6
- from torch import nn
7
-
8
- from detectron2.utils.analysis import find_unused_parameters, flop_count_operators, parameter_count
9
- from detectron2.utils.testing import get_model_no_weights
10
-
11
-
12
- class RetinaNetTest(unittest.TestCase):
13
- def setUp(self):
14
- self.model = get_model_no_weights("COCO-Detection/retinanet_R_50_FPN_1x.yaml")
15
-
16
- def test_flop(self):
17
- # RetinaNet supports flop-counting with random inputs
18
- inputs = [{"image": torch.rand(3, 800, 800), "test_unused": "abcd"}]
19
- res = flop_count_operators(self.model, inputs)
20
- self.assertEqual(int(res["conv"]), 146) # 146B flops
21
-
22
- def test_param_count(self):
23
- res = parameter_count(self.model)
24
- self.assertEqual(res[""], 37915572)
25
- self.assertEqual(res["backbone"], 31452352)
26
-
27
-
28
- class FasterRCNNTest(unittest.TestCase):
29
- def setUp(self):
30
- self.model = get_model_no_weights("COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml")
31
-
32
- def test_flop(self):
33
- # Faster R-CNN supports flop-counting with random inputs
34
- inputs = [{"image": torch.rand(3, 800, 800)}]
35
- res = flop_count_operators(self.model, inputs)
36
-
37
- # This only checks flops for backbone & proposal generator
38
- # Flops for box head is not conv, and depends on #proposals, which is
39
- # almost 0 for random inputs.
40
- self.assertEqual(int(res["conv"]), 117)
41
-
42
- def test_flop_with_output_shape(self):
43
- inputs = [{"image": torch.rand(3, 800, 800), "height": 700, "width": 700}]
44
- res = flop_count_operators(self.model, inputs)
45
- self.assertEqual(int(res["conv"]), 117)
46
-
47
- def test_param_count(self):
48
- res = parameter_count(self.model)
49
- self.assertEqual(res[""], 41699936)
50
- self.assertEqual(res["backbone"], 26799296)
51
-
52
-
53
- class MaskRCNNTest(unittest.TestCase):
54
- def setUp(self):
55
- self.model = get_model_no_weights("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml")
56
-
57
- def test_flop(self):
58
- inputs1 = [{"image": torch.rand(3, 800, 800)}]
59
- inputs2 = [{"image": torch.rand(3, 800, 800), "height": 700, "width": 700}]
60
-
61
- for inputs in [inputs1, inputs2]:
62
- res = flop_count_operators(self.model, inputs)
63
- # The mask head could have extra conv flops, so total >= 117
64
- self.assertGreaterEqual(int(res["conv"]), 117)
65
-
66
-
67
- class UnusedParamTest(unittest.TestCase):
68
- def test_unused(self):
69
- class TestMod(nn.Module):
70
- def __init__(self):
71
- super().__init__()
72
- self.fc1 = nn.Linear(10, 10)
73
- self.t = nn.Linear(10, 10)
74
-
75
- def forward(self, x):
76
- return self.fc1(x).mean()
77
-
78
- m = TestMod()
79
- ret = find_unused_parameters(m, torch.randn(10, 10))
80
- self.assertEqual(set(ret), {"t.weight", "t.bias"})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AzumaSeren100/XuanShen-Bert-VITS2/resample.py DELETED
@@ -1,42 +0,0 @@
1
- import os
2
- import argparse
3
- import librosa
4
- import numpy as np
5
- from multiprocessing import Pool, cpu_count
6
-
7
- import soundfile
8
- from scipy.io import wavfile
9
- from tqdm import tqdm
10
-
11
-
12
- def process(item):
13
- spkdir, wav_name, args = item
14
- speaker = spkdir.replace("\\", "/").split("/")[-1]
15
- wav_path = os.path.join(args.in_dir, speaker, wav_name)
16
- if os.path.exists(wav_path) and '.wav' in wav_path:
17
- os.makedirs(os.path.join(args.out_dir, speaker), exist_ok=True)
18
- wav, sr = librosa.load(wav_path, sr=args.sr)
19
- soundfile.write(
20
- os.path.join(args.out_dir, speaker, wav_name),
21
- wav,
22
- sr
23
- )
24
-
25
-
26
-
27
- if __name__ == "__main__":
28
- parser = argparse.ArgumentParser()
29
- parser.add_argument("--sr", type=int, default=44100, help="sampling rate")
30
- parser.add_argument("--in_dir", type=str, default="./raw", help="path to source dir")
31
- parser.add_argument("--out_dir", type=str, default="./dataset", help="path to target dir")
32
- args = parser.parse_args()
33
- # processs = 8
34
- processs = cpu_count()-2 if cpu_count() >4 else 1
35
- pool = Pool(processes=processs)
36
-
37
- for speaker in os.listdir(args.in_dir):
38
- spk_dir = os.path.join(args.in_dir, speaker)
39
- if os.path.isdir(spk_dir):
40
- print(spk_dir)
41
- for _ in tqdm(pool.imap_unordered(process, [(spk_dir, i, args) for i in os.listdir(spk_dir) if i.endswith("wav")])):
42
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/infer/lib/infer_pack/attentions.py DELETED
@@ -1,417 +0,0 @@
1
- import copy
2
- import math
3
-
4
- import numpy as np
5
- import torch
6
- from torch import nn
7
- from torch.nn import functional as F
8
-
9
- from infer.lib.infer_pack import commons, modules
10
- from infer.lib.infer_pack.modules import LayerNorm
11
-
12
-
13
- class Encoder(nn.Module):
14
- def __init__(
15
- self,
16
- hidden_channels,
17
- filter_channels,
18
- n_heads,
19
- n_layers,
20
- kernel_size=1,
21
- p_dropout=0.0,
22
- window_size=10,
23
- **kwargs
24
- ):
25
- super().__init__()
26
- self.hidden_channels = hidden_channels
27
- self.filter_channels = filter_channels
28
- self.n_heads = n_heads
29
- self.n_layers = n_layers
30
- self.kernel_size = kernel_size
31
- self.p_dropout = p_dropout
32
- self.window_size = window_size
33
-
34
- self.drop = nn.Dropout(p_dropout)
35
- self.attn_layers = nn.ModuleList()
36
- self.norm_layers_1 = nn.ModuleList()
37
- self.ffn_layers = nn.ModuleList()
38
- self.norm_layers_2 = nn.ModuleList()
39
- for i in range(self.n_layers):
40
- self.attn_layers.append(
41
- MultiHeadAttention(
42
- hidden_channels,
43
- hidden_channels,
44
- n_heads,
45
- p_dropout=p_dropout,
46
- window_size=window_size,
47
- )
48
- )
49
- self.norm_layers_1.append(LayerNorm(hidden_channels))
50
- self.ffn_layers.append(
51
- FFN(
52
- hidden_channels,
53
- hidden_channels,
54
- filter_channels,
55
- kernel_size,
56
- p_dropout=p_dropout,
57
- )
58
- )
59
- self.norm_layers_2.append(LayerNorm(hidden_channels))
60
-
61
- def forward(self, x, x_mask):
62
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
63
- x = x * x_mask
64
- for i in range(self.n_layers):
65
- y = self.attn_layers[i](x, x, attn_mask)
66
- y = self.drop(y)
67
- x = self.norm_layers_1[i](x + y)
68
-
69
- y = self.ffn_layers[i](x, x_mask)
70
- y = self.drop(y)
71
- x = self.norm_layers_2[i](x + y)
72
- x = x * x_mask
73
- return x
74
-
75
-
76
- class Decoder(nn.Module):
77
- def __init__(
78
- self,
79
- hidden_channels,
80
- filter_channels,
81
- n_heads,
82
- n_layers,
83
- kernel_size=1,
84
- p_dropout=0.0,
85
- proximal_bias=False,
86
- proximal_init=True,
87
- **kwargs
88
- ):
89
- super().__init__()
90
- self.hidden_channels = hidden_channels
91
- self.filter_channels = filter_channels
92
- self.n_heads = n_heads
93
- self.n_layers = n_layers
94
- self.kernel_size = kernel_size
95
- self.p_dropout = p_dropout
96
- self.proximal_bias = proximal_bias
97
- self.proximal_init = proximal_init
98
-
99
- self.drop = nn.Dropout(p_dropout)
100
- self.self_attn_layers = nn.ModuleList()
101
- self.norm_layers_0 = nn.ModuleList()
102
- self.encdec_attn_layers = nn.ModuleList()
103
- self.norm_layers_1 = nn.ModuleList()
104
- self.ffn_layers = nn.ModuleList()
105
- self.norm_layers_2 = nn.ModuleList()
106
- for i in range(self.n_layers):
107
- self.self_attn_layers.append(
108
- MultiHeadAttention(
109
- hidden_channels,
110
- hidden_channels,
111
- n_heads,
112
- p_dropout=p_dropout,
113
- proximal_bias=proximal_bias,
114
- proximal_init=proximal_init,
115
- )
116
- )
117
- self.norm_layers_0.append(LayerNorm(hidden_channels))
118
- self.encdec_attn_layers.append(
119
- MultiHeadAttention(
120
- hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
121
- )
122
- )
123
- self.norm_layers_1.append(LayerNorm(hidden_channels))
124
- self.ffn_layers.append(
125
- FFN(
126
- hidden_channels,
127
- hidden_channels,
128
- filter_channels,
129
- kernel_size,
130
- p_dropout=p_dropout,
131
- causal=True,
132
- )
133
- )
134
- self.norm_layers_2.append(LayerNorm(hidden_channels))
135
-
136
- def forward(self, x, x_mask, h, h_mask):
137
- """
138
- x: decoder input
139
- h: encoder output
140
- """
141
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
142
- device=x.device, dtype=x.dtype
143
- )
144
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
145
- x = x * x_mask
146
- for i in range(self.n_layers):
147
- y = self.self_attn_layers[i](x, x, self_attn_mask)
148
- y = self.drop(y)
149
- x = self.norm_layers_0[i](x + y)
150
-
151
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
152
- y = self.drop(y)
153
- x = self.norm_layers_1[i](x + y)
154
-
155
- y = self.ffn_layers[i](x, x_mask)
156
- y = self.drop(y)
157
- x = self.norm_layers_2[i](x + y)
158
- x = x * x_mask
159
- return x
160
-
161
-
162
- class MultiHeadAttention(nn.Module):
163
- def __init__(
164
- self,
165
- channels,
166
- out_channels,
167
- n_heads,
168
- p_dropout=0.0,
169
- window_size=None,
170
- heads_share=True,
171
- block_length=None,
172
- proximal_bias=False,
173
- proximal_init=False,
174
- ):
175
- super().__init__()
176
- assert channels % n_heads == 0
177
-
178
- self.channels = channels
179
- self.out_channels = out_channels
180
- self.n_heads = n_heads
181
- self.p_dropout = p_dropout
182
- self.window_size = window_size
183
- self.heads_share = heads_share
184
- self.block_length = block_length
185
- self.proximal_bias = proximal_bias
186
- self.proximal_init = proximal_init
187
- self.attn = None
188
-
189
- self.k_channels = channels // n_heads
190
- self.conv_q = nn.Conv1d(channels, channels, 1)
191
- self.conv_k = nn.Conv1d(channels, channels, 1)
192
- self.conv_v = nn.Conv1d(channels, channels, 1)
193
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
194
- self.drop = nn.Dropout(p_dropout)
195
-
196
- if window_size is not None:
197
- n_heads_rel = 1 if heads_share else n_heads
198
- rel_stddev = self.k_channels**-0.5
199
- self.emb_rel_k = nn.Parameter(
200
- torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
201
- * rel_stddev
202
- )
203
- self.emb_rel_v = nn.Parameter(
204
- torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
205
- * rel_stddev
206
- )
207
-
208
- nn.init.xavier_uniform_(self.conv_q.weight)
209
- nn.init.xavier_uniform_(self.conv_k.weight)
210
- nn.init.xavier_uniform_(self.conv_v.weight)
211
- if proximal_init:
212
- with torch.no_grad():
213
- self.conv_k.weight.copy_(self.conv_q.weight)
214
- self.conv_k.bias.copy_(self.conv_q.bias)
215
-
216
- def forward(self, x, c, attn_mask=None):
217
- q = self.conv_q(x)
218
- k = self.conv_k(c)
219
- v = self.conv_v(c)
220
-
221
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
222
-
223
- x = self.conv_o(x)
224
- return x
225
-
226
- def attention(self, query, key, value, mask=None):
227
- # reshape [b, d, t] -> [b, n_h, t, d_k]
228
- b, d, t_s, t_t = (*key.size(), query.size(2))
229
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
230
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
231
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
232
-
233
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
234
- if self.window_size is not None:
235
- assert (
236
- t_s == t_t
237
- ), "Relative attention is only available for self-attention."
238
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
239
- rel_logits = self._matmul_with_relative_keys(
240
- query / math.sqrt(self.k_channels), key_relative_embeddings
241
- )
242
- scores_local = self._relative_position_to_absolute_position(rel_logits)
243
- scores = scores + scores_local
244
- if self.proximal_bias:
245
- assert t_s == t_t, "Proximal bias is only available for self-attention."
246
- scores = scores + self._attention_bias_proximal(t_s).to(
247
- device=scores.device, dtype=scores.dtype
248
- )
249
- if mask is not None:
250
- scores = scores.masked_fill(mask == 0, -1e4)
251
- if self.block_length is not None:
252
- assert (
253
- t_s == t_t
254
- ), "Local attention is only available for self-attention."
255
- block_mask = (
256
- torch.ones_like(scores)
257
- .triu(-self.block_length)
258
- .tril(self.block_length)
259
- )
260
- scores = scores.masked_fill(block_mask == 0, -1e4)
261
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
262
- p_attn = self.drop(p_attn)
263
- output = torch.matmul(p_attn, value)
264
- if self.window_size is not None:
265
- relative_weights = self._absolute_position_to_relative_position(p_attn)
266
- value_relative_embeddings = self._get_relative_embeddings(
267
- self.emb_rel_v, t_s
268
- )
269
- output = output + self._matmul_with_relative_values(
270
- relative_weights, value_relative_embeddings
271
- )
272
- output = (
273
- output.transpose(2, 3).contiguous().view(b, d, t_t)
274
- ) # [b, n_h, t_t, d_k] -> [b, d, t_t]
275
- return output, p_attn
276
-
277
- def _matmul_with_relative_values(self, x, y):
278
- """
279
- x: [b, h, l, m]
280
- y: [h or 1, m, d]
281
- ret: [b, h, l, d]
282
- """
283
- ret = torch.matmul(x, y.unsqueeze(0))
284
- return ret
285
-
286
- def _matmul_with_relative_keys(self, x, y):
287
- """
288
- x: [b, h, l, d]
289
- y: [h or 1, m, d]
290
- ret: [b, h, l, m]
291
- """
292
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
293
- return ret
294
-
295
- def _get_relative_embeddings(self, relative_embeddings, length):
296
- max_relative_position = 2 * self.window_size + 1
297
- # Pad first before slice to avoid using cond ops.
298
- pad_length = max(length - (self.window_size + 1), 0)
299
- slice_start_position = max((self.window_size + 1) - length, 0)
300
- slice_end_position = slice_start_position + 2 * length - 1
301
- if pad_length > 0:
302
- padded_relative_embeddings = F.pad(
303
- relative_embeddings,
304
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
305
- )
306
- else:
307
- padded_relative_embeddings = relative_embeddings
308
- used_relative_embeddings = padded_relative_embeddings[
309
- :, slice_start_position:slice_end_position
310
- ]
311
- return used_relative_embeddings
312
-
313
- def _relative_position_to_absolute_position(self, x):
314
- """
315
- x: [b, h, l, 2*l-1]
316
- ret: [b, h, l, l]
317
- """
318
- batch, heads, length, _ = x.size()
319
- # Concat columns of pad to shift from relative to absolute indexing.
320
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
321
-
322
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
323
- x_flat = x.view([batch, heads, length * 2 * length])
324
- x_flat = F.pad(
325
- x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
326
- )
327
-
328
- # Reshape and slice out the padded elements.
329
- x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
330
- :, :, :length, length - 1 :
331
- ]
332
- return x_final
333
-
334
- def _absolute_position_to_relative_position(self, x):
335
- """
336
- x: [b, h, l, l]
337
- ret: [b, h, l, 2*l-1]
338
- """
339
- batch, heads, length, _ = x.size()
340
- # padd along column
341
- x = F.pad(
342
- x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
343
- )
344
- x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
345
- # add 0's in the beginning that will skew the elements after reshape
346
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
347
- x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
348
- return x_final
349
-
350
- def _attention_bias_proximal(self, length):
351
- """Bias for self-attention to encourage attention to close positions.
352
- Args:
353
- length: an integer scalar.
354
- Returns:
355
- a Tensor with shape [1, 1, length, length]
356
- """
357
- r = torch.arange(length, dtype=torch.float32)
358
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
359
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
360
-
361
-
362
- class FFN(nn.Module):
363
- def __init__(
364
- self,
365
- in_channels,
366
- out_channels,
367
- filter_channels,
368
- kernel_size,
369
- p_dropout=0.0,
370
- activation=None,
371
- causal=False,
372
- ):
373
- super().__init__()
374
- self.in_channels = in_channels
375
- self.out_channels = out_channels
376
- self.filter_channels = filter_channels
377
- self.kernel_size = kernel_size
378
- self.p_dropout = p_dropout
379
- self.activation = activation
380
- self.causal = causal
381
-
382
- if causal:
383
- self.padding = self._causal_padding
384
- else:
385
- self.padding = self._same_padding
386
-
387
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
388
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
389
- self.drop = nn.Dropout(p_dropout)
390
-
391
- def forward(self, x, x_mask):
392
- x = self.conv_1(self.padding(x * x_mask))
393
- if self.activation == "gelu":
394
- x = x * torch.sigmoid(1.702 * x)
395
- else:
396
- x = torch.relu(x)
397
- x = self.drop(x)
398
- x = self.conv_2(self.padding(x * x_mask))
399
- return x * x_mask
400
-
401
- def _causal_padding(self, x):
402
- if self.kernel_size == 1:
403
- return x
404
- pad_l = self.kernel_size - 1
405
- pad_r = 0
406
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
407
- x = F.pad(x, commons.convert_pad_shape(padding))
408
- return x
409
-
410
- def _same_padding(self, x):
411
- if self.kernel_size == 1:
412
- return x
413
- pad_l = (self.kernel_size - 1) // 2
414
- pad_r = self.kernel_size // 2
415
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
416
- x = F.pad(x, commons.convert_pad_shape(padding))
417
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/tools/infer/train-index-v2.py DELETED
@@ -1,79 +0,0 @@
1
- """
2
- 格式:直接cid为自带的index位;aid放不下了,通过字典来查,反正就5w个
3
- """
4
- import os
5
- import traceback
6
- import logging
7
-
8
- logger = logging.getLogger(__name__)
9
-
10
- from multiprocessing import cpu_count
11
-
12
- import faiss
13
- import numpy as np
14
- from sklearn.cluster import MiniBatchKMeans
15
-
16
- # ###########如果是原始特征要先写save
17
- n_cpu = 0
18
- if n_cpu == 0:
19
- n_cpu = cpu_count()
20
- inp_root = r"./logs/anz/3_feature768"
21
- npys = []
22
- listdir_res = list(os.listdir(inp_root))
23
- for name in sorted(listdir_res):
24
- phone = np.load("%s/%s" % (inp_root, name))
25
- npys.append(phone)
26
- big_npy = np.concatenate(npys, 0)
27
- big_npy_idx = np.arange(big_npy.shape[0])
28
- np.random.shuffle(big_npy_idx)
29
- big_npy = big_npy[big_npy_idx]
30
- logger.debug(big_npy.shape) # (6196072, 192)#fp32#4.43G
31
- if big_npy.shape[0] > 2e5:
32
- # if(1):
33
- info = "Trying doing kmeans %s shape to 10k centers." % big_npy.shape[0]
34
- logger.info(info)
35
- try:
36
- big_npy = (
37
- MiniBatchKMeans(
38
- n_clusters=10000,
39
- verbose=True,
40
- batch_size=256 * n_cpu,
41
- compute_labels=False,
42
- init="random",
43
- )
44
- .fit(big_npy)
45
- .cluster_centers_
46
- )
47
- except:
48
- info = traceback.format_exc()
49
- logger.warn(info)
50
-
51
- np.save("tools/infer/big_src_feature_mi.npy", big_npy)
52
-
53
- ##################train+add
54
- # big_npy=np.load("/bili-coeus/jupyter/jupyterhub-liujing04/vits_ch/inference_f0/big_src_feature_mi.npy")
55
- n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39)
56
- index = faiss.index_factory(768, "IVF%s,Flat" % n_ivf) # mi
57
- logger.info("Training...")
58
- index_ivf = faiss.extract_index_ivf(index) #
59
- index_ivf.nprobe = 1
60
- index.train(big_npy)
61
- faiss.write_index(
62
- index, "tools/infer/trained_IVF%s_Flat_baseline_src_feat_v2.index" % (n_ivf)
63
- )
64
- logger.info("Adding...")
65
- batch_size_add = 8192
66
- for i in range(0, big_npy.shape[0], batch_size_add):
67
- index.add(big_npy[i : i + batch_size_add])
68
- faiss.write_index(
69
- index, "tools/infer/added_IVF%s_Flat_mi_baseline_src_feat.index" % (n_ivf)
70
- )
71
- """
72
- 大小(都是FP32)
73
- big_src_feature 2.95G
74
- (3098036, 256)
75
- big_emb 4.43G
76
- (6196072, 192)
77
- big_emb双倍是因为求特征要repeat后再加pitch
78
-
79
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BetterAPI/BetterChat/src/lib/utils/sha256.ts DELETED
@@ -1,7 +0,0 @@
1
- export async function sha256(input: string): Promise<string> {
2
- const utf8 = new TextEncoder().encode(input);
3
- const hashBuffer = await crypto.subtle.digest("SHA-256", utf8);
4
- const hashArray = Array.from(new Uint8Array(hashBuffer));
5
- const hashHex = hashArray.map((bytes) => bytes.toString(16).padStart(2, "0")).join("");
6
- return hashHex;
7
- }
 
 
 
 
 
 
 
 
spaces/CVH-vn1210/make_hair/minigpt4/common/config.py DELETED
@@ -1,468 +0,0 @@
1
- """
2
- Copyright (c) 2022, salesforce.com, inc.
3
- All rights reserved.
4
- SPDX-License-Identifier: BSD-3-Clause
5
- For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
6
- """
7
-
8
- import logging
9
- import json
10
- from typing import Dict
11
-
12
- from omegaconf import OmegaConf
13
- from minigpt4.common.registry import registry
14
-
15
-
16
- class Config:
17
- def __init__(self, args):
18
- self.config = {}
19
-
20
- self.args = args
21
-
22
- # Register the config and configuration for setup
23
- registry.register("configuration", self)
24
-
25
- user_config = self._build_opt_list(self.args.options)
26
-
27
- config = OmegaConf.load(self.args.cfg_path)
28
-
29
- runner_config = self.build_runner_config(config)
30
- model_config = self.build_model_config(config, **user_config)
31
- dataset_config = self.build_dataset_config(config)
32
-
33
- # Validate the user-provided runner configuration
34
- # model and dataset configuration are supposed to be validated by the respective classes
35
- # [TODO] validate the model/dataset configuration
36
- # self._validate_runner_config(runner_config)
37
-
38
- # Override the default configuration with user options.
39
- self.config = OmegaConf.merge(
40
- runner_config, model_config, dataset_config, user_config
41
- )
42
-
43
- def _validate_runner_config(self, runner_config):
44
- """
45
- This method validates the configuration, such that
46
- 1) all the user specified options are valid;
47
- 2) no type mismatches between the user specified options and the config.
48
- """
49
- runner_config_validator = create_runner_config_validator()
50
- runner_config_validator.validate(runner_config)
51
-
52
- def _build_opt_list(self, opts):
53
- opts_dot_list = self._convert_to_dot_list(opts)
54
- return OmegaConf.from_dotlist(opts_dot_list)
55
-
56
- @staticmethod
57
- def build_model_config(config, **kwargs):
58
- model = config.get("model", None)
59
- assert model is not None, "Missing model configuration file."
60
-
61
- model_cls = registry.get_model_class(model.arch)
62
- assert model_cls is not None, f"Model '{model.arch}' has not been registered."
63
-
64
- model_type = kwargs.get("model.model_type", None)
65
- if not model_type:
66
- model_type = model.get("model_type", None)
67
- # else use the model type selected by user.
68
-
69
- assert model_type is not None, "Missing model_type."
70
-
71
- model_config_path = model_cls.default_config_path(model_type=model_type)
72
-
73
- model_config = OmegaConf.create()
74
- # hiararchy override, customized config > default config
75
- model_config = OmegaConf.merge(
76
- model_config,
77
- OmegaConf.load(model_config_path),
78
- {"model": config["model"]},
79
- )
80
-
81
- return model_config
82
-
83
- @staticmethod
84
- def build_runner_config(config):
85
- return {"run": config.run}
86
-
87
- @staticmethod
88
- def build_dataset_config(config):
89
- datasets = config.get("datasets", None)
90
- if datasets is None:
91
- raise KeyError(
92
- "Expecting 'datasets' as the root key for dataset configuration."
93
- )
94
-
95
- dataset_config = OmegaConf.create()
96
-
97
- for dataset_name in datasets:
98
- builder_cls = registry.get_builder_class(dataset_name)
99
-
100
- dataset_config_type = datasets[dataset_name].get("type", "default")
101
- dataset_config_path = builder_cls.default_config_path(
102
- type=dataset_config_type
103
- )
104
-
105
- # hiararchy override, customized config > default config
106
- dataset_config = OmegaConf.merge(
107
- dataset_config,
108
- OmegaConf.load(dataset_config_path),
109
- {"datasets": {dataset_name: config["datasets"][dataset_name]}},
110
- )
111
-
112
- return dataset_config
113
-
114
- def _convert_to_dot_list(self, opts):
115
- if opts is None:
116
- opts = []
117
-
118
- if len(opts) == 0:
119
- return opts
120
-
121
- has_equal = opts[0].find("=") != -1
122
-
123
- if has_equal:
124
- return opts
125
-
126
- return [(opt + "=" + value) for opt, value in zip(opts[0::2], opts[1::2])]
127
-
128
- def get_config(self):
129
- return self.config
130
-
131
- @property
132
- def run_cfg(self):
133
- return self.config.run
134
-
135
- @property
136
- def datasets_cfg(self):
137
- return self.config.datasets
138
-
139
- @property
140
- def model_cfg(self):
141
- return self.config.model
142
-
143
- def pretty_print(self):
144
- logging.info("\n===== Running Parameters =====")
145
- logging.info(self._convert_node_to_json(self.config.run))
146
-
147
- logging.info("\n====== Dataset Attributes ======")
148
- datasets = self.config.datasets
149
-
150
- for dataset in datasets:
151
- if dataset in self.config.datasets:
152
- logging.info(f"\n======== {dataset} =======")
153
- dataset_config = self.config.datasets[dataset]
154
- logging.info(self._convert_node_to_json(dataset_config))
155
- else:
156
- logging.warning(f"No dataset named '{dataset}' in config. Skipping")
157
-
158
- logging.info(f"\n====== Model Attributes ======")
159
- logging.info(self._convert_node_to_json(self.config.model))
160
-
161
- def _convert_node_to_json(self, node):
162
- container = OmegaConf.to_container(node, resolve=True)
163
- return json.dumps(container, indent=4, sort_keys=True)
164
-
165
- def to_dict(self):
166
- return OmegaConf.to_container(self.config)
167
-
168
-
169
- def node_to_dict(node):
170
- return OmegaConf.to_container(node)
171
-
172
-
173
- class ConfigValidator:
174
- """
175
- This is a preliminary implementation to centralize and validate the configuration.
176
- May be altered in the future.
177
-
178
- A helper class to validate configurations from yaml file.
179
-
180
- This serves the following purposes:
181
- 1. Ensure all the options in the yaml are defined, raise error if not.
182
- 2. when type mismatches are found, the validator will raise an error.
183
- 3. a central place to store and display helpful messages for supported configurations.
184
-
185
- """
186
-
187
- class _Argument:
188
- def __init__(self, name, choices=None, type=None, help=None):
189
- self.name = name
190
- self.val = None
191
- self.choices = choices
192
- self.type = type
193
- self.help = help
194
-
195
- def __str__(self):
196
- s = f"{self.name}={self.val}"
197
- if self.type is not None:
198
- s += f", ({self.type})"
199
- if self.choices is not None:
200
- s += f", choices: {self.choices}"
201
- if self.help is not None:
202
- s += f", ({self.help})"
203
- return s
204
-
205
- def __init__(self, description):
206
- self.description = description
207
-
208
- self.arguments = dict()
209
-
210
- self.parsed_args = None
211
-
212
- def __getitem__(self, key):
213
- assert self.parsed_args is not None, "No arguments parsed yet."
214
-
215
- return self.parsed_args[key]
216
-
217
- def __str__(self) -> str:
218
- return self.format_help()
219
-
220
- def add_argument(self, *args, **kwargs):
221
- """
222
- Assume the first argument is the name of the argument.
223
- """
224
- self.arguments[args[0]] = self._Argument(*args, **kwargs)
225
-
226
- def validate(self, config=None):
227
- """
228
- Convert yaml config (dict-like) to list, required by argparse.
229
- """
230
- for k, v in config.items():
231
- assert (
232
- k in self.arguments
233
- ), f"""{k} is not a valid argument. Support arguments are {self.format_arguments()}."""
234
-
235
- if self.arguments[k].type is not None:
236
- try:
237
- self.arguments[k].val = self.arguments[k].type(v)
238
- except ValueError:
239
- raise ValueError(f"{k} is not a valid {self.arguments[k].type}.")
240
-
241
- if self.arguments[k].choices is not None:
242
- assert (
243
- v in self.arguments[k].choices
244
- ), f"""{k} must be one of {self.arguments[k].choices}."""
245
-
246
- return config
247
-
248
- def format_arguments(self):
249
- return str([f"{k}" for k in sorted(self.arguments.keys())])
250
-
251
- def format_help(self):
252
- # description + key-value pair string for each argument
253
- help_msg = str(self.description)
254
- return help_msg + ", available arguments: " + self.format_arguments()
255
-
256
- def print_help(self):
257
- # display help message
258
- print(self.format_help())
259
-
260
-
261
- def create_runner_config_validator():
262
- validator = ConfigValidator(description="Runner configurations")
263
-
264
- validator.add_argument(
265
- "runner",
266
- type=str,
267
- choices=["runner_base", "runner_iter"],
268
- help="""Runner to use. The "runner_base" uses epoch-based training while iter-based
269
- runner runs based on iters. Default: runner_base""",
270
- )
271
- # add argumetns for training dataset ratios
272
- validator.add_argument(
273
- "train_dataset_ratios",
274
- type=Dict[str, float],
275
- help="""Ratios of training dataset. This is used in iteration-based runner.
276
- Do not support for epoch-based runner because how to define an epoch becomes tricky.
277
- Default: None""",
278
- )
279
- validator.add_argument(
280
- "max_iters",
281
- type=float,
282
- help="Maximum number of iterations to run.",
283
- )
284
- validator.add_argument(
285
- "max_epoch",
286
- type=int,
287
- help="Maximum number of epochs to run.",
288
- )
289
- # add arguments for iters_per_inner_epoch
290
- validator.add_argument(
291
- "iters_per_inner_epoch",
292
- type=float,
293
- help="Number of iterations per inner epoch. This is required when runner is runner_iter.",
294
- )
295
- lr_scheds_choices = registry.list_lr_schedulers()
296
- validator.add_argument(
297
- "lr_sched",
298
- type=str,
299
- choices=lr_scheds_choices,
300
- help="Learning rate scheduler to use, from {}".format(lr_scheds_choices),
301
- )
302
- task_choices = registry.list_tasks()
303
- validator.add_argument(
304
- "task",
305
- type=str,
306
- choices=task_choices,
307
- help="Task to use, from {}".format(task_choices),
308
- )
309
- # add arguments for init_lr
310
- validator.add_argument(
311
- "init_lr",
312
- type=float,
313
- help="Initial learning rate. This will be the learning rate after warmup and before decay.",
314
- )
315
- # add arguments for min_lr
316
- validator.add_argument(
317
- "min_lr",
318
- type=float,
319
- help="Minimum learning rate (after decay).",
320
- )
321
- # add arguments for warmup_lr
322
- validator.add_argument(
323
- "warmup_lr",
324
- type=float,
325
- help="Starting learning rate for warmup.",
326
- )
327
- # add arguments for learning rate decay rate
328
- validator.add_argument(
329
- "lr_decay_rate",
330
- type=float,
331
- help="Learning rate decay rate. Required if using a decaying learning rate scheduler.",
332
- )
333
- # add arguments for weight decay
334
- validator.add_argument(
335
- "weight_decay",
336
- type=float,
337
- help="Weight decay rate.",
338
- )
339
- # add arguments for training batch size
340
- validator.add_argument(
341
- "batch_size_train",
342
- type=int,
343
- help="Training batch size.",
344
- )
345
- # add arguments for evaluation batch size
346
- validator.add_argument(
347
- "batch_size_eval",
348
- type=int,
349
- help="Evaluation batch size, including validation and testing.",
350
- )
351
- # add arguments for number of workers for data loading
352
- validator.add_argument(
353
- "num_workers",
354
- help="Number of workers for data loading.",
355
- )
356
- # add arguments for warm up steps
357
- validator.add_argument(
358
- "warmup_steps",
359
- type=int,
360
- help="Number of warmup steps. Required if a warmup schedule is used.",
361
- )
362
- # add arguments for random seed
363
- validator.add_argument(
364
- "seed",
365
- type=int,
366
- help="Random seed.",
367
- )
368
- # add arguments for output directory
369
- validator.add_argument(
370
- "output_dir",
371
- type=str,
372
- help="Output directory to save checkpoints and logs.",
373
- )
374
- # add arguments for whether only use evaluation
375
- validator.add_argument(
376
- "evaluate",
377
- help="Whether to only evaluate the model. If true, training will not be performed.",
378
- )
379
- # add arguments for splits used for training, e.g. ["train", "val"]
380
- validator.add_argument(
381
- "train_splits",
382
- type=list,
383
- help="Splits to use for training.",
384
- )
385
- # add arguments for splits used for validation, e.g. ["val"]
386
- validator.add_argument(
387
- "valid_splits",
388
- type=list,
389
- help="Splits to use for validation. If not provided, will skip the validation.",
390
- )
391
- # add arguments for splits used for testing, e.g. ["test"]
392
- validator.add_argument(
393
- "test_splits",
394
- type=list,
395
- help="Splits to use for testing. If not provided, will skip the testing.",
396
- )
397
- # add arguments for accumulating gradient for iterations
398
- validator.add_argument(
399
- "accum_grad_iters",
400
- type=int,
401
- help="Number of iterations to accumulate gradient for.",
402
- )
403
-
404
- # ====== distributed training ======
405
- validator.add_argument(
406
- "device",
407
- type=str,
408
- choices=["cpu", "cuda"],
409
- help="Device to use. Support 'cuda' or 'cpu' as for now.",
410
- )
411
- validator.add_argument(
412
- "world_size",
413
- type=int,
414
- help="Number of processes participating in the job.",
415
- )
416
- validator.add_argument("dist_url", type=str)
417
- validator.add_argument("distributed", type=bool)
418
- # add arguments to opt using distributed sampler during evaluation or not
419
- validator.add_argument(
420
- "use_dist_eval_sampler",
421
- type=bool,
422
- help="Whether to use distributed sampler during evaluation or not.",
423
- )
424
-
425
- # ====== task specific ======
426
- # generation task specific arguments
427
- # add arguments for maximal length of text output
428
- validator.add_argument(
429
- "max_len",
430
- type=int,
431
- help="Maximal length of text output.",
432
- )
433
- # add arguments for minimal length of text output
434
- validator.add_argument(
435
- "min_len",
436
- type=int,
437
- help="Minimal length of text output.",
438
- )
439
- # add arguments number of beams
440
- validator.add_argument(
441
- "num_beams",
442
- type=int,
443
- help="Number of beams used for beam search.",
444
- )
445
-
446
- # vqa task specific arguments
447
- # add arguments for number of answer candidates
448
- validator.add_argument(
449
- "num_ans_candidates",
450
- type=int,
451
- help="""For ALBEF and BLIP, these models first rank answers according to likelihood to select answer candidates.""",
452
- )
453
- # add arguments for inference method
454
- validator.add_argument(
455
- "inference_method",
456
- type=str,
457
- choices=["genearte", "rank"],
458
- help="""Inference method to use for question answering. If rank, requires a answer list.""",
459
- )
460
-
461
- # ====== model specific ======
462
- validator.add_argument(
463
- "k_test",
464
- type=int,
465
- help="Number of top k most similar samples from ITC/VTC selection to be tested.",
466
- )
467
-
468
- return validator
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/apply_net.py DELETED
@@ -1,318 +0,0 @@
1
- #!/usr/bin/env python3
2
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
3
-
4
- import argparse
5
- import glob
6
- import logging
7
- import os
8
- import pickle
9
- import sys
10
- from typing import Any, ClassVar, Dict, List
11
- import torch
12
-
13
- from detectron2.config import get_cfg
14
- from detectron2.data.detection_utils import read_image
15
- from detectron2.engine.defaults import DefaultPredictor
16
- from detectron2.structures.boxes import BoxMode
17
- from detectron2.structures.instances import Instances
18
- from detectron2.utils.logger import setup_logger
19
-
20
- from densepose import add_densepose_config
21
- from densepose.utils.logger import verbosity_to_level
22
- from densepose.vis.base import CompoundVisualizer
23
- from densepose.vis.bounding_box import ScoredBoundingBoxVisualizer
24
- from densepose.vis.densepose import (
25
- DensePoseResultsContourVisualizer,
26
- DensePoseResultsFineSegmentationVisualizer,
27
- DensePoseResultsUVisualizer,
28
- DensePoseResultsVVisualizer,
29
- )
30
- from densepose.vis.extractor import CompoundExtractor, create_extractor
31
-
32
- DOC = """Apply Net - a tool to print / visualize DensePose results
33
- """
34
-
35
- LOGGER_NAME = "apply_net"
36
- logger = logging.getLogger(LOGGER_NAME)
37
-
38
- _ACTION_REGISTRY: Dict[str, "Action"] = {}
39
-
40
-
41
- class Action(object):
42
- @classmethod
43
- def add_arguments(cls: type, parser: argparse.ArgumentParser):
44
- parser.add_argument(
45
- "-v",
46
- "--verbosity",
47
- action="count",
48
- help="Verbose mode. Multiple -v options increase the verbosity.",
49
- )
50
-
51
-
52
- def register_action(cls: type):
53
- """
54
- Decorator for action classes to automate action registration
55
- """
56
- global _ACTION_REGISTRY
57
- _ACTION_REGISTRY[cls.COMMAND] = cls
58
- return cls
59
-
60
-
61
- class InferenceAction(Action):
62
- @classmethod
63
- def add_arguments(cls: type, parser: argparse.ArgumentParser):
64
- super(InferenceAction, cls).add_arguments(parser)
65
- parser.add_argument("cfg", metavar="<config>", help="Config file")
66
- parser.add_argument("model", metavar="<model>", help="Model file")
67
- parser.add_argument("input", metavar="<input>", help="Input data")
68
- parser.add_argument(
69
- "--opts",
70
- help="Modify config options using the command-line 'KEY VALUE' pairs",
71
- default=[],
72
- nargs=argparse.REMAINDER,
73
- )
74
-
75
- @classmethod
76
- def execute(cls: type, args: argparse.Namespace):
77
- logger.info(f"Loading config from {args.cfg}")
78
- opts = []
79
- cfg = cls.setup_config(args.cfg, args.model, args, opts)
80
- logger.info(f"Loading model from {args.model}")
81
- predictor = DefaultPredictor(cfg)
82
- logger.info(f"Loading data from {args.input}")
83
- file_list = cls._get_input_file_list(args.input)
84
- if len(file_list) == 0:
85
- logger.warning(f"No input images for {args.input}")
86
- return
87
- context = cls.create_context(args)
88
- for file_name in file_list:
89
- img = read_image(file_name, format="BGR") # predictor expects BGR image.
90
- with torch.no_grad():
91
- outputs = predictor(img)["instances"]
92
- cls.execute_on_outputs(context, {"file_name": file_name, "image": img}, outputs)
93
- cls.postexecute(context)
94
-
95
- @classmethod
96
- def setup_config(
97
- cls: type, config_fpath: str, model_fpath: str, args: argparse.Namespace, opts: List[str]
98
- ):
99
- cfg = get_cfg()
100
- add_densepose_config(cfg)
101
- cfg.merge_from_file(config_fpath)
102
- cfg.merge_from_list(args.opts)
103
- if opts:
104
- cfg.merge_from_list(opts)
105
- cfg.MODEL.WEIGHTS = model_fpath
106
- cfg.freeze()
107
- return cfg
108
-
109
- @classmethod
110
- def _get_input_file_list(cls: type, input_spec: str):
111
- if os.path.isdir(input_spec):
112
- file_list = [
113
- os.path.join(input_spec, fname)
114
- for fname in os.listdir(input_spec)
115
- if os.path.isfile(os.path.join(input_spec, fname))
116
- ]
117
- elif os.path.isfile(input_spec):
118
- file_list = [input_spec]
119
- else:
120
- file_list = glob.glob(input_spec)
121
- return file_list
122
-
123
-
124
- @register_action
125
- class DumpAction(InferenceAction):
126
- """
127
- Dump action that outputs results to a pickle file
128
- """
129
-
130
- COMMAND: ClassVar[str] = "dump"
131
-
132
- @classmethod
133
- def add_parser(cls: type, subparsers: argparse._SubParsersAction):
134
- parser = subparsers.add_parser(cls.COMMAND, help="Dump model outputs to a file.")
135
- cls.add_arguments(parser)
136
- parser.set_defaults(func=cls.execute)
137
-
138
- @classmethod
139
- def add_arguments(cls: type, parser: argparse.ArgumentParser):
140
- super(DumpAction, cls).add_arguments(parser)
141
- parser.add_argument(
142
- "--output",
143
- metavar="<dump_file>",
144
- default="results.pkl",
145
- help="File name to save dump to",
146
- )
147
-
148
- @classmethod
149
- def execute_on_outputs(
150
- cls: type, context: Dict[str, Any], entry: Dict[str, Any], outputs: Instances
151
- ):
152
- image_fpath = entry["file_name"]
153
- logger.info(f"Processing {image_fpath}")
154
- result = {"file_name": image_fpath}
155
- if outputs.has("scores"):
156
- result["scores"] = outputs.get("scores").cpu()
157
- if outputs.has("pred_boxes"):
158
- result["pred_boxes_XYXY"] = outputs.get("pred_boxes").tensor.cpu()
159
- if outputs.has("pred_densepose"):
160
- boxes_XYWH = BoxMode.convert(
161
- result["pred_boxes_XYXY"], BoxMode.XYXY_ABS, BoxMode.XYWH_ABS
162
- )
163
- result["pred_densepose"] = outputs.get("pred_densepose").to_result(boxes_XYWH)
164
- context["results"].append(result)
165
-
166
- @classmethod
167
- def create_context(cls: type, args: argparse.Namespace):
168
- context = {"results": [], "out_fname": args.output}
169
- return context
170
-
171
- @classmethod
172
- def postexecute(cls: type, context: Dict[str, Any]):
173
- out_fname = context["out_fname"]
174
- out_dir = os.path.dirname(out_fname)
175
- if len(out_dir) > 0 and not os.path.exists(out_dir):
176
- os.makedirs(out_dir)
177
- with open(out_fname, "wb") as hFile:
178
- pickle.dump(context["results"], hFile)
179
- logger.info(f"Output saved to {out_fname}")
180
-
181
-
182
- @register_action
183
- class ShowAction(InferenceAction):
184
- """
185
- Show action that visualizes selected entries on an image
186
- """
187
-
188
- COMMAND: ClassVar[str] = "show"
189
- VISUALIZERS: ClassVar[Dict[str, object]] = {
190
- "dp_contour": DensePoseResultsContourVisualizer,
191
- "dp_segm": DensePoseResultsFineSegmentationVisualizer,
192
- "dp_u": DensePoseResultsUVisualizer,
193
- "dp_v": DensePoseResultsVVisualizer,
194
- "bbox": ScoredBoundingBoxVisualizer,
195
- }
196
-
197
- @classmethod
198
- def add_parser(cls: type, subparsers: argparse._SubParsersAction):
199
- parser = subparsers.add_parser(cls.COMMAND, help="Visualize selected entries")
200
- cls.add_arguments(parser)
201
- parser.set_defaults(func=cls.execute)
202
-
203
- @classmethod
204
- def add_arguments(cls: type, parser: argparse.ArgumentParser):
205
- super(ShowAction, cls).add_arguments(parser)
206
- parser.add_argument(
207
- "visualizations",
208
- metavar="<visualizations>",
209
- help="Comma separated list of visualizations, possible values: "
210
- "[{}]".format(",".join(sorted(cls.VISUALIZERS.keys()))),
211
- )
212
- parser.add_argument(
213
- "--min_score",
214
- metavar="<score>",
215
- default=0.8,
216
- type=float,
217
- help="Minimum detection score to visualize",
218
- )
219
- parser.add_argument(
220
- "--nms_thresh", metavar="<threshold>", default=None, type=float, help="NMS threshold"
221
- )
222
- parser.add_argument(
223
- "--output",
224
- metavar="<image_file>",
225
- default="outputres.png",
226
- help="File name to save output to",
227
- )
228
-
229
- @classmethod
230
- def setup_config(
231
- cls: type, config_fpath: str, model_fpath: str, args: argparse.Namespace, opts: List[str]
232
- ):
233
- opts.append("MODEL.ROI_HEADS.SCORE_THRESH_TEST")
234
- opts.append(str(args.min_score))
235
- if args.nms_thresh is not None:
236
- opts.append("MODEL.ROI_HEADS.NMS_THRESH_TEST")
237
- opts.append(str(args.nms_thresh))
238
- cfg = super(ShowAction, cls).setup_config(config_fpath, model_fpath, args, opts)
239
- return cfg
240
-
241
- @classmethod
242
- def execute_on_outputs(
243
- cls: type, context: Dict[str, Any], entry: Dict[str, Any], outputs: Instances
244
- ):
245
- import cv2
246
- import numpy as np
247
-
248
- visualizer = context["visualizer"]
249
- extractor = context["extractor"]
250
- image_fpath = entry["file_name"]
251
- logger.info(f"Processing {image_fpath}")
252
- image = cv2.cvtColor(entry["image"], cv2.COLOR_BGR2GRAY)
253
- image = np.tile(image[:, :, np.newaxis], [1, 1, 3])
254
- data = extractor(outputs)
255
- image_vis = visualizer.visualize(image, data)
256
- entry_idx = context["entry_idx"] + 1
257
- out_fname = cls._get_out_fname(entry_idx, context["out_fname"])
258
- out_dir = os.path.dirname(out_fname)
259
- if len(out_dir) > 0 and not os.path.exists(out_dir):
260
- os.makedirs(out_dir)
261
- cv2.imwrite(out_fname, image_vis)
262
- logger.info(f"Output saved to {out_fname}")
263
- context["entry_idx"] += 1
264
-
265
- @classmethod
266
- def postexecute(cls: type, context: Dict[str, Any]):
267
- pass
268
-
269
- @classmethod
270
- def _get_out_fname(cls: type, entry_idx: int, fname_base: str):
271
- base, ext = os.path.splitext(fname_base)
272
- return base + ".{0:04d}".format(entry_idx) + ext
273
-
274
- @classmethod
275
- def create_context(cls: type, args: argparse.Namespace) -> Dict[str, Any]:
276
- vis_specs = args.visualizations.split(",")
277
- visualizers = []
278
- extractors = []
279
- for vis_spec in vis_specs:
280
- vis = cls.VISUALIZERS[vis_spec]()
281
- visualizers.append(vis)
282
- extractor = create_extractor(vis)
283
- extractors.append(extractor)
284
- visualizer = CompoundVisualizer(visualizers)
285
- extractor = CompoundExtractor(extractors)
286
- context = {
287
- "extractor": extractor,
288
- "visualizer": visualizer,
289
- "out_fname": args.output,
290
- "entry_idx": 0,
291
- }
292
- return context
293
-
294
-
295
- def create_argument_parser() -> argparse.ArgumentParser:
296
- parser = argparse.ArgumentParser(
297
- description=DOC,
298
- formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=120),
299
- )
300
- parser.set_defaults(func=lambda _: parser.print_help(sys.stdout))
301
- subparsers = parser.add_subparsers(title="Actions")
302
- for _, action in _ACTION_REGISTRY.items():
303
- action.add_parser(subparsers)
304
- return parser
305
-
306
-
307
- def main():
308
- parser = create_argument_parser()
309
- args = parser.parse_args()
310
- verbosity = args.verbosity if hasattr(args, "verbosity") else None
311
- global logger
312
- logger = setup_logger(name=LOGGER_NAME)
313
- logger.setLevel(verbosity_to_level(verbosity))
314
- args.func(args)
315
-
316
-
317
- if __name__ == "__main__":
318
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tests/test_eval.cpp DELETED
@@ -1,91 +0,0 @@
1
- /*
2
- tests/test_eval.cpp -- Usage of eval() and eval_file()
3
-
4
- Copyright (c) 2016 Klemens D. Morgenstern
5
-
6
- All rights reserved. Use of this source code is governed by a
7
- BSD-style license that can be found in the LICENSE file.
8
- */
9
-
10
-
11
- #include <pybind11/eval.h>
12
- #include "pybind11_tests.h"
13
-
14
- TEST_SUBMODULE(eval_, m) {
15
- // test_evals
16
-
17
- auto global = py::dict(py::module::import("__main__").attr("__dict__"));
18
-
19
- m.def("test_eval_statements", [global]() {
20
- auto local = py::dict();
21
- local["call_test"] = py::cpp_function([&]() -> int {
22
- return 42;
23
- });
24
-
25
- // Regular string literal
26
- py::exec(
27
- "message = 'Hello World!'\n"
28
- "x = call_test()",
29
- global, local
30
- );
31
-
32
- // Multi-line raw string literal
33
- py::exec(R"(
34
- if x == 42:
35
- print(message)
36
- else:
37
- raise RuntimeError
38
- )", global, local
39
- );
40
- auto x = local["x"].cast<int>();
41
-
42
- return x == 42;
43
- });
44
-
45
- m.def("test_eval", [global]() {
46
- auto local = py::dict();
47
- local["x"] = py::int_(42);
48
- auto x = py::eval("x", global, local);
49
- return x.cast<int>() == 42;
50
- });
51
-
52
- m.def("test_eval_single_statement", []() {
53
- auto local = py::dict();
54
- local["call_test"] = py::cpp_function([&]() -> int {
55
- return 42;
56
- });
57
-
58
- auto result = py::eval<py::eval_single_statement>("x = call_test()", py::dict(), local);
59
- auto x = local["x"].cast<int>();
60
- return result.is_none() && x == 42;
61
- });
62
-
63
- m.def("test_eval_file", [global](py::str filename) {
64
- auto local = py::dict();
65
- local["y"] = py::int_(43);
66
-
67
- int val_out;
68
- local["call_test2"] = py::cpp_function([&](int value) { val_out = value; });
69
-
70
- auto result = py::eval_file(filename, global, local);
71
- return val_out == 43 && result.is_none();
72
- });
73
-
74
- m.def("test_eval_failure", []() {
75
- try {
76
- py::eval("nonsense code ...");
77
- } catch (py::error_already_set &) {
78
- return true;
79
- }
80
- return false;
81
- });
82
-
83
- m.def("test_eval_file_failure", []() {
84
- try {
85
- py::eval_file("non-existing file");
86
- } catch (std::exception &) {
87
- return true;
88
- }
89
- return false;
90
- });
91
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/cmake/ThrustCudaConfig.cmake DELETED
@@ -1,140 +0,0 @@
1
- enable_language(CUDA)
2
-
3
- set(THRUST_KNOWN_COMPUTE_ARCHS 35 37 50 52 53 60 61 62 70 72 75 80)
4
-
5
- # Split CUDA_FLAGS into 3 parts:
6
- #
7
- # THRUST_CUDA_FLAGS_BASE: Common CUDA flags for all targets.
8
- # THRUST_CUDA_FLAGS_RDC: Additional CUDA flags for targets compiled with RDC.
9
- # THRUST_CUDA_FLAGS_NO_RDC: Additional CUDA flags for targets compiled without RDC.
10
- #
11
- # This is necessary because CUDA SMs 5.3, 6.2, and 7.2 do not support RDC, but
12
- # we want to always build some targets (e.g. testing/cuda/*) with RDC.
13
- # We work around this by building the "always RDC" targets without support for
14
- # those SMs. This requires two sets of CUDA_FLAGS.
15
- #
16
- # Enabling any of those SMs along with the ENABLE_RDC options will result in a
17
- # configuration error.
18
- #
19
- # Because of how CMake handles the CMAKE_CUDA_FLAGS variables, every target
20
- # generated in a given directory will use the same value for CMAKE_CUDA_FLAGS,
21
- # which is determined at the end of the directory's scope. This means caution
22
- # should be used when trying to build different targets with different flags,
23
- # since they might not behave as expected. This will improve with CMake 3.18,
24
- # which add the DEVICE_LINK genex, fixing the issue with using per-target
25
- # CUDA_FLAGS: https://gitlab.kitware.com/cmake/cmake/-/issues/18265
26
- set(THRUST_CUDA_FLAGS_BASE "${CMAKE_CUDA_FLAGS}")
27
- set(THRUST_CUDA_FLAGS_RDC)
28
- set(THRUST_CUDA_FLAGS_NO_RDC)
29
-
30
- # Archs that don't support RDC:
31
- set(no_rdc_archs 53 62 72)
32
-
33
- # Find the highest arch:
34
- list(SORT THRUST_KNOWN_COMPUTE_ARCHS)
35
- list(LENGTH THRUST_KNOWN_COMPUTE_ARCHS max_idx)
36
- math(EXPR max_idx "${max_idx} - 1")
37
- list(GET THRUST_KNOWN_COMPUTE_ARCHS ${max_idx} highest_arch)
38
-
39
- set(option_init OFF)
40
- if ("Feta" STREQUAL "${CMAKE_CUDA_COMPILER_ID}")
41
- set(option_init ON)
42
- endif()
43
- option(THRUST_DISABLE_ARCH_BY_DEFAULT
44
- "If ON, then all CUDA architectures are disabled on the initial CMake run."
45
- ${option_init}
46
- )
47
-
48
- set(option_init ON)
49
- if (THRUST_DISABLE_ARCH_BY_DEFAULT)
50
- set(option_init OFF)
51
- endif()
52
-
53
- set(num_archs_enabled 0)
54
- foreach (arch IN LISTS THRUST_KNOWN_COMPUTE_ARCHS)
55
- option(THRUST_ENABLE_COMPUTE_${arch}
56
- "Enable code generation for tests for sm_${arch}"
57
- ${option_init}
58
- )
59
-
60
- if (NOT THRUST_ENABLE_COMPUTE_${arch})
61
- continue()
62
- endif()
63
-
64
- math(EXPR num_archs_enabled "${num_archs_enabled} + 1")
65
-
66
- if ("Feta" STREQUAL "${CMAKE_CUDA_COMPILER_ID}")
67
- if (NOT ${num_archs_enabled} EQUAL 1)
68
- message(FATAL_ERROR
69
- "Feta does not support compilation for multiple device architectures "
70
- "at once."
71
- )
72
- endif()
73
- set(arch_flag "-gpu=cc${arch}")
74
- else()
75
- set(arch_flag "-gencode arch=compute_${arch},code=sm_${arch}")
76
- endif()
77
-
78
- string(APPEND COMPUTE_MESSAGE " sm_${arch}")
79
- string(APPEND THRUST_CUDA_FLAGS_NO_RDC " ${arch_flag}")
80
- if (NOT arch IN_LIST no_rdc_archs)
81
- string(APPEND THRUST_CUDA_FLAGS_RDC " ${arch_flag}")
82
- endif()
83
- endforeach()
84
-
85
- if (NOT "Feta" STREQUAL "${CMAKE_CUDA_COMPILER_ID}")
86
- option(THRUST_ENABLE_COMPUTE_FUTURE
87
- "Enable code generation for tests for compute_${highest_arch}"
88
- ${option_init}
89
- )
90
- if (THRUST_ENABLE_COMPUTE_FUTURE)
91
- string(APPEND THRUST_CUDA_FLAGS_BASE
92
- " -gencode arch=compute_${highest_arch},code=compute_${highest_arch}"
93
- )
94
- string(APPEND COMPUTE_MESSAGE " compute_${highest_arch}")
95
- endif()
96
- endif()
97
-
98
- message(STATUS "Thrust: Enabled CUDA architectures:${COMPUTE_MESSAGE}")
99
-
100
- # RDC is off by default in NVCC and on by default in Feta. Turning off RDC
101
- # isn't currently supported by Feta. So, we default to RDC off for NVCC and
102
- # RDC on for Feta.
103
- set(option_init OFF)
104
- if ("Feta" STREQUAL "${CMAKE_CUDA_COMPILER_ID}")
105
- set(option_init ON)
106
- endif()
107
-
108
- option(THRUST_ENABLE_TESTS_WITH_RDC
109
- "Build all Thrust tests with RDC; tests that require RDC are not affected by this option."
110
- ${option_init}
111
- )
112
-
113
- option(THRUST_ENABLE_EXAMPLES_WITH_RDC
114
- "Build all Thrust examples with RDC; examples which require RDC are not affected by this option."
115
- ${option_init}
116
- )
117
-
118
- # Check for RDC/SM compatibility and error/warn if necessary
119
- foreach (sm IN LISTS no_rdc_archs)
120
- set(sm_opt THRUST_ENABLE_COMPUTE_${sm})
121
- if (${sm_opt})
122
- foreach (opt IN ITEMS TESTS EXAMPLES)
123
- set(rdc_opt THRUST_ENABLE_${opt}_WITH_RDC)
124
- if (${rdc_opt})
125
- message(FATAL_ERROR
126
- "${rdc_opt} is incompatible with ${sm_opt}, since sm_${sm} does not "
127
- "support RDC."
128
- )
129
- endif()
130
- endforeach()
131
-
132
- message(NOTICE
133
- "sm_${sm} does not support RDC. Targets that require RDC will be built "
134
- "without support for this architecture."
135
- )
136
- endif()
137
- endforeach()
138
-
139
- # By default RDC is not used:
140
- set(CMAKE_CUDA_FLAGS "${THRUST_CUDA_FLAGS_BASE} ${THRUST_CUDA_FLAGS_NO_RDC}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChallengeHub/Chinese-LangChain/README.md DELETED
@@ -1,116 +0,0 @@
1
- ---
2
- license: openrail
3
- title: 'Chinese-LangChain '
4
- sdk: gradio
5
- emoji: 🚀
6
- colorFrom: yellow
7
- colorTo: yellow
8
- pinned: true
9
- app_file: app.py
10
- ---
11
-
12
- # Chinese-LangChain
13
-
14
- > Chinese-LangChain:中文langchain项目,基于ChatGLM-6b+langchain实现本地化知识库检索与智能答案生成
15
-
16
- https://github.com/yanqiangmiffy/Chinese-LangChain
17
-
18
- 俗称:小必应,Q.Talk,强聊,QiangTalk
19
-
20
- ## 🔥 效果演示
21
-
22
- ![](https://github.com/yanqiangmiffy/Chinese-LangChain/blob/master/images/web_demos/v1.png)
23
- ![](https://github.com/yanqiangmiffy/Chinese-LangChain/blob/master/images/web_demos/v3.png)
24
-
25
- ## 🚋 使用教程
26
-
27
- - 选择知识库询问相关领域的问题
28
-
29
- ## 🏗️ 部署教程
30
-
31
- ### 运行配置
32
-
33
- - 显存:12g,实际运行9g够了
34
- - 运行内存:32g
35
-
36
- ### 运行环境
37
-
38
- ```text
39
- langchain
40
- gradio
41
- transformers
42
- sentence_transformers
43
- faiss-cpu
44
- unstructured
45
- duckduckgo_search
46
- mdtex2html
47
- chardet
48
- cchardet
49
- ```
50
-
51
- ### 启动Gradio
52
-
53
- ```shell
54
- python main.py
55
- ```
56
-
57
- ## 🚀 特性
58
-
59
- - 🔭 2023/04/20 支持模型问答与检索问答模式切换
60
- - 💻 2023/04/20 感谢HF官方提供免费算力,添加HuggingFace
61
- Spaces在线体验[[🤗 DEMO](https://huggingface.co/spaces/ChallengeHub/Chinese-LangChain)
62
- - 🧫 2023/04/19 发布45万Wikipedia的文本预处理语料以及FAISS索引向量
63
- - 🐯 2023/04/19 引入ChuanhuChatGPT皮肤
64
- - 📱 2023/04/19 增加web search功能,需要确保网络畅通!(感谢[@wanghao07456](https://github.com/wanghao07456),提供的idea)
65
- - 📚 2023/04/18 webui增加知识库选择功能
66
- - 🚀 2023/04/18 修复推理预测超时5s报错问题
67
- - 🎉 2023/04/17 支持多种文档上传与内容解析:pdf、docx,ppt等
68
- - 🎉 2023/04/17 支持知识增量更新
69
-
70
- [//]: # (- 支持检索结果与LLM生成结果对比)
71
-
72
- ## 🧰 知识库
73
-
74
- ### 构建知识库
75
-
76
- - Wikipedia-zh
77
-
78
- > 详情见:corpus/zh_wikipedia/README.md
79
-
80
- ### 知识库向量索引
81
-
82
- | 知识库数据 | FAISS向量 |
83
- |-------------------------------------------------------------------------------|----------------------------------------------------------------------|
84
- | 中文维基百科截止4月份数据,45万 | 链接:https://pan.baidu.com/s/1VQeA_dq92fxKOtLL3u3Zpg?pwd=l3pn 提取码:l3pn |
85
- | 截止去年九月的130w条中文维基百科处理结果和对应faiss向量文件 @[yubuyuabc](https://github.com/yubuyuabc) | 链接:https://pan.baidu.com/s/1Yls_Qtg15W1gneNuFP9O_w?pwd=exij 提取码:exij |
86
- | 💹 [大规模金融研报知识图谱](http://openkg.cn/dataset/fr2kg) | 链接:https://pan.baidu.com/s/1FcIH5Fi3EfpS346DnDu51Q?pwd=ujjv 提取码:ujjv |
87
-
88
- ## 🔨 TODO
89
-
90
- * [x] 支持上下文
91
- * [x] 支持知识增量更新
92
- * [x] 支持加载不同知识库
93
- * [x] 支持检索结果与LLM生成结果对比
94
- * [ ] 支持检索生成结果与原始LLM生成结果对比
95
- * [ ] 支持模型问答与检索问答
96
- * [ ] 检索结果过滤与排序
97
- * [x] 互联网检索结果接入
98
- * [ ] 模型初始化有问题
99
- * [ ] 增加非LangChain策略
100
- * [ ] 显示当前对话策略
101
- * [ ] 构建一个垂直业务场景知识库,非通用性
102
-
103
- ## 交流
104
-
105
- 欢迎多提建议、Bad cases,目前尚不完善,欢迎进群及时交流,也欢迎大家多提PR</br>
106
-
107
- <figure class="third">
108
- <img src="https://raw.githubusercontent.com/yanqiangmiffy/Chinese-LangChain/master/images/ch.jpg" width="180px"><img src="https://raw.githubusercontent.com/yanqiangmiffy/Chinese-LangChain/master/images/chatgroup.jpg" width="180px" height="270px"><img src="https://raw.githubusercontent.com/yanqiangmiffy/Chinese-LangChain/master/images/personal.jpg" width="180px">
109
- </figure>
110
-
111
- ## ❤️引用
112
-
113
- - webui参考:https://github.com/thomas-yanxin/LangChain-ChatGLM-Webui
114
- - knowledge问答参考:https://github.com/imClumsyPanda/langchain-ChatGLM
115
- - LLM模型:https://github.com/THUDM/ChatGLM-6B
116
- - CSS:https://huggingface.co/spaces/JohnSmith9982/ChuanhuChatGPT
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChandraMohanNayal/AutoGPT/autogpt/json_utils/utilities.py DELETED
@@ -1,54 +0,0 @@
1
- """Utilities for the json_fixes package."""
2
- import json
3
- import re
4
-
5
- from jsonschema import Draft7Validator
6
-
7
- from autogpt.config import Config
8
- from autogpt.logs import logger
9
-
10
- CFG = Config()
11
-
12
-
13
- def extract_char_position(error_message: str) -> int:
14
- """Extract the character position from the JSONDecodeError message.
15
-
16
- Args:
17
- error_message (str): The error message from the JSONDecodeError
18
- exception.
19
-
20
- Returns:
21
- int: The character position.
22
- """
23
-
24
- char_pattern = re.compile(r"\(char (\d+)\)")
25
- if match := char_pattern.search(error_message):
26
- return int(match[1])
27
- else:
28
- raise ValueError("Character position not found in the error message.")
29
-
30
-
31
- def validate_json(json_object: object, schema_name: object) -> object:
32
- """
33
- :type schema_name: object
34
- :param schema_name:
35
- :type json_object: object
36
- """
37
- with open(f"autogpt/json_utils/{schema_name}.json", "r") as f:
38
- schema = json.load(f)
39
- validator = Draft7Validator(schema)
40
-
41
- if errors := sorted(validator.iter_errors(json_object), key=lambda e: e.path):
42
- logger.error("The JSON object is invalid.")
43
- if CFG.debug_mode:
44
- logger.error(
45
- json.dumps(json_object, indent=4)
46
- ) # Replace 'json_object' with the variable containing the JSON data
47
- logger.error("The following issues were found:")
48
-
49
- for error in errors:
50
- logger.error(f"Error: {error.message}")
51
- elif CFG.debug_mode:
52
- print("The JSON object is valid.")
53
-
54
- return json_object
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/meme_generator/memes/capoo_rub/__init__.py DELETED
@@ -1,30 +0,0 @@
1
- from pathlib import Path
2
- from typing import List
3
-
4
- from PIL.Image import Image as IMG
5
- from pil_utils import BuildImage
6
-
7
- from meme_generator import add_meme
8
- from meme_generator.utils import save_gif
9
-
10
- img_dir = Path(__file__).parent / "images"
11
-
12
-
13
- def capoo_rub(images: List[BuildImage], texts, args):
14
- img = images[0].convert("RGBA").square().resize((180, 180))
15
- frames: List[IMG] = []
16
- locs = [
17
- (178, 184, 78, 260),
18
- (178, 174, 84, 269),
19
- (178, 174, 84, 269),
20
- (178, 178, 84, 264),
21
- ]
22
- for i in range(4):
23
- frame = BuildImage.open(img_dir / f"{i}.png")
24
- w, h, x, y = locs[i]
25
- frame.paste(img.resize((w, h)), (x, y), below=True)
26
- frames.append(frame.image)
27
- return save_gif(frames, 0.1)
28
-
29
-
30
- add_meme("capoo_rub", capoo_rub, min_images=1, max_images=1, keywords=["咖波蹭", "咖波贴"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CognitiveLabs/Research-Assistant/test/test.py DELETED
@@ -1,17 +0,0 @@
1
- import gradio as gr
2
- import test2 as test2
3
- import test3 as test3
4
-
5
- theme = gr.themes.Soft(
6
- font_mono=[gr.themes.GoogleFont('Fira Code'), 'ui-monospace', 'Consolas', 'monospace'],
7
- ).set(
8
- embed_radius='*radius_md'
9
- )
10
-
11
- with gr.Blocks(theme=theme, title="AI Research Assistant") as demo:
12
- output = gr.Textbox(label="Output")
13
- button = gr.Button("Start")
14
- button.click(fn=test2.generator_, outputs=output)
15
-
16
- demo.queue()
17
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CorvaeOboro/gen_ability_icon/dnnlib/util.py DELETED
@@ -1,477 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- """Miscellaneous utility classes and functions."""
10
-
11
- import ctypes
12
- import fnmatch
13
- import importlib
14
- import inspect
15
- import numpy as np
16
- import os
17
- import shutil
18
- import sys
19
- import types
20
- import io
21
- import pickle
22
- import re
23
- import requests
24
- import html
25
- import hashlib
26
- import glob
27
- import tempfile
28
- import urllib
29
- import urllib.request
30
- import uuid
31
-
32
- from distutils.util import strtobool
33
- from typing import Any, List, Tuple, Union
34
-
35
-
36
- # Util classes
37
- # ------------------------------------------------------------------------------------------
38
-
39
-
40
- class EasyDict(dict):
41
- """Convenience class that behaves like a dict but allows access with the attribute syntax."""
42
-
43
- def __getattr__(self, name: str) -> Any:
44
- try:
45
- return self[name]
46
- except KeyError:
47
- raise AttributeError(name)
48
-
49
- def __setattr__(self, name: str, value: Any) -> None:
50
- self[name] = value
51
-
52
- def __delattr__(self, name: str) -> None:
53
- del self[name]
54
-
55
-
56
- class Logger(object):
57
- """Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing on both stdout and the file."""
58
-
59
- def __init__(self, file_name: str = None, file_mode: str = "w", should_flush: bool = True):
60
- self.file = None
61
-
62
- if file_name is not None:
63
- self.file = open(file_name, file_mode)
64
-
65
- self.should_flush = should_flush
66
- self.stdout = sys.stdout
67
- self.stderr = sys.stderr
68
-
69
- sys.stdout = self
70
- sys.stderr = self
71
-
72
- def __enter__(self) -> "Logger":
73
- return self
74
-
75
- def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
76
- self.close()
77
-
78
- def write(self, text: Union[str, bytes]) -> None:
79
- """Write text to stdout (and a file) and optionally flush."""
80
- if isinstance(text, bytes):
81
- text = text.decode()
82
- if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash
83
- return
84
-
85
- if self.file is not None:
86
- self.file.write(text)
87
-
88
- self.stdout.write(text)
89
-
90
- if self.should_flush:
91
- self.flush()
92
-
93
- def flush(self) -> None:
94
- """Flush written text to both stdout and a file, if open."""
95
- if self.file is not None:
96
- self.file.flush()
97
-
98
- self.stdout.flush()
99
-
100
- def close(self) -> None:
101
- """Flush, close possible files, and remove stdout/stderr mirroring."""
102
- self.flush()
103
-
104
- # if using multiple loggers, prevent closing in wrong order
105
- if sys.stdout is self:
106
- sys.stdout = self.stdout
107
- if sys.stderr is self:
108
- sys.stderr = self.stderr
109
-
110
- if self.file is not None:
111
- self.file.close()
112
- self.file = None
113
-
114
-
115
- # Cache directories
116
- # ------------------------------------------------------------------------------------------
117
-
118
- _dnnlib_cache_dir = None
119
-
120
- def set_cache_dir(path: str) -> None:
121
- global _dnnlib_cache_dir
122
- _dnnlib_cache_dir = path
123
-
124
- def make_cache_dir_path(*paths: str) -> str:
125
- if _dnnlib_cache_dir is not None:
126
- return os.path.join(_dnnlib_cache_dir, *paths)
127
- if 'DNNLIB_CACHE_DIR' in os.environ:
128
- return os.path.join(os.environ['DNNLIB_CACHE_DIR'], *paths)
129
- if 'HOME' in os.environ:
130
- return os.path.join(os.environ['HOME'], '.cache', 'dnnlib', *paths)
131
- if 'USERPROFILE' in os.environ:
132
- return os.path.join(os.environ['USERPROFILE'], '.cache', 'dnnlib', *paths)
133
- return os.path.join(tempfile.gettempdir(), '.cache', 'dnnlib', *paths)
134
-
135
- # Small util functions
136
- # ------------------------------------------------------------------------------------------
137
-
138
-
139
- def format_time(seconds: Union[int, float]) -> str:
140
- """Convert the seconds to human readable string with days, hours, minutes and seconds."""
141
- s = int(np.rint(seconds))
142
-
143
- if s < 60:
144
- return "{0}s".format(s)
145
- elif s < 60 * 60:
146
- return "{0}m {1:02}s".format(s // 60, s % 60)
147
- elif s < 24 * 60 * 60:
148
- return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60)
149
- else:
150
- return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60)
151
-
152
-
153
- def ask_yes_no(question: str) -> bool:
154
- """Ask the user the question until the user inputs a valid answer."""
155
- while True:
156
- try:
157
- print("{0} [y/n]".format(question))
158
- return strtobool(input().lower())
159
- except ValueError:
160
- pass
161
-
162
-
163
- def tuple_product(t: Tuple) -> Any:
164
- """Calculate the product of the tuple elements."""
165
- result = 1
166
-
167
- for v in t:
168
- result *= v
169
-
170
- return result
171
-
172
-
173
- _str_to_ctype = {
174
- "uint8": ctypes.c_ubyte,
175
- "uint16": ctypes.c_uint16,
176
- "uint32": ctypes.c_uint32,
177
- "uint64": ctypes.c_uint64,
178
- "int8": ctypes.c_byte,
179
- "int16": ctypes.c_int16,
180
- "int32": ctypes.c_int32,
181
- "int64": ctypes.c_int64,
182
- "float32": ctypes.c_float,
183
- "float64": ctypes.c_double
184
- }
185
-
186
-
187
- def get_dtype_and_ctype(type_obj: Any) -> Tuple[np.dtype, Any]:
188
- """Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes."""
189
- type_str = None
190
-
191
- if isinstance(type_obj, str):
192
- type_str = type_obj
193
- elif hasattr(type_obj, "__name__"):
194
- type_str = type_obj.__name__
195
- elif hasattr(type_obj, "name"):
196
- type_str = type_obj.name
197
- else:
198
- raise RuntimeError("Cannot infer type name from input")
199
-
200
- assert type_str in _str_to_ctype.keys()
201
-
202
- my_dtype = np.dtype(type_str)
203
- my_ctype = _str_to_ctype[type_str]
204
-
205
- assert my_dtype.itemsize == ctypes.sizeof(my_ctype)
206
-
207
- return my_dtype, my_ctype
208
-
209
-
210
- def is_pickleable(obj: Any) -> bool:
211
- try:
212
- with io.BytesIO() as stream:
213
- pickle.dump(obj, stream)
214
- return True
215
- except:
216
- return False
217
-
218
-
219
- # Functionality to import modules/objects by name, and call functions by name
220
- # ------------------------------------------------------------------------------------------
221
-
222
- def get_module_from_obj_name(obj_name: str) -> Tuple[types.ModuleType, str]:
223
- """Searches for the underlying module behind the name to some python object.
224
- Returns the module and the object name (original name with module part removed)."""
225
-
226
- # allow convenience shorthands, substitute them by full names
227
- obj_name = re.sub("^np.", "numpy.", obj_name)
228
- obj_name = re.sub("^tf.", "tensorflow.", obj_name)
229
-
230
- # list alternatives for (module_name, local_obj_name)
231
- parts = obj_name.split(".")
232
- name_pairs = [(".".join(parts[:i]), ".".join(parts[i:])) for i in range(len(parts), 0, -1)]
233
-
234
- # try each alternative in turn
235
- for module_name, local_obj_name in name_pairs:
236
- try:
237
- module = importlib.import_module(module_name) # may raise ImportError
238
- get_obj_from_module(module, local_obj_name) # may raise AttributeError
239
- return module, local_obj_name
240
- except:
241
- pass
242
-
243
- # maybe some of the modules themselves contain errors?
244
- for module_name, _local_obj_name in name_pairs:
245
- try:
246
- importlib.import_module(module_name) # may raise ImportError
247
- except ImportError:
248
- if not str(sys.exc_info()[1]).startswith("No module named '" + module_name + "'"):
249
- raise
250
-
251
- # maybe the requested attribute is missing?
252
- for module_name, local_obj_name in name_pairs:
253
- try:
254
- module = importlib.import_module(module_name) # may raise ImportError
255
- get_obj_from_module(module, local_obj_name) # may raise AttributeError
256
- except ImportError:
257
- pass
258
-
259
- # we are out of luck, but we have no idea why
260
- raise ImportError(obj_name)
261
-
262
-
263
- def get_obj_from_module(module: types.ModuleType, obj_name: str) -> Any:
264
- """Traverses the object name and returns the last (rightmost) python object."""
265
- if obj_name == '':
266
- return module
267
- obj = module
268
- for part in obj_name.split("."):
269
- obj = getattr(obj, part)
270
- return obj
271
-
272
-
273
- def get_obj_by_name(name: str) -> Any:
274
- """Finds the python object with the given name."""
275
- module, obj_name = get_module_from_obj_name(name)
276
- return get_obj_from_module(module, obj_name)
277
-
278
-
279
- def call_func_by_name(*args, func_name: str = None, **kwargs) -> Any:
280
- """Finds the python object with the given name and calls it as a function."""
281
- assert func_name is not None
282
- func_obj = get_obj_by_name(func_name)
283
- assert callable(func_obj)
284
- return func_obj(*args, **kwargs)
285
-
286
-
287
- def construct_class_by_name(*args, class_name: str = None, **kwargs) -> Any:
288
- """Finds the python class with the given name and constructs it with the given arguments."""
289
- return call_func_by_name(*args, func_name=class_name, **kwargs)
290
-
291
-
292
- def get_module_dir_by_obj_name(obj_name: str) -> str:
293
- """Get the directory path of the module containing the given object name."""
294
- module, _ = get_module_from_obj_name(obj_name)
295
- return os.path.dirname(inspect.getfile(module))
296
-
297
-
298
- def is_top_level_function(obj: Any) -> bool:
299
- """Determine whether the given object is a top-level function, i.e., defined at module scope using 'def'."""
300
- return callable(obj) and obj.__name__ in sys.modules[obj.__module__].__dict__
301
-
302
-
303
- def get_top_level_function_name(obj: Any) -> str:
304
- """Return the fully-qualified name of a top-level function."""
305
- assert is_top_level_function(obj)
306
- module = obj.__module__
307
- if module == '__main__':
308
- module = os.path.splitext(os.path.basename(sys.modules[module].__file__))[0]
309
- return module + "." + obj.__name__
310
-
311
-
312
- # File system helpers
313
- # ------------------------------------------------------------------------------------------
314
-
315
- def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]:
316
- """List all files recursively in a given directory while ignoring given file and directory names.
317
- Returns list of tuples containing both absolute and relative paths."""
318
- assert os.path.isdir(dir_path)
319
- base_name = os.path.basename(os.path.normpath(dir_path))
320
-
321
- if ignores is None:
322
- ignores = []
323
-
324
- result = []
325
-
326
- for root, dirs, files in os.walk(dir_path, topdown=True):
327
- for ignore_ in ignores:
328
- dirs_to_remove = [d for d in dirs if fnmatch.fnmatch(d, ignore_)]
329
-
330
- # dirs need to be edited in-place
331
- for d in dirs_to_remove:
332
- dirs.remove(d)
333
-
334
- files = [f for f in files if not fnmatch.fnmatch(f, ignore_)]
335
-
336
- absolute_paths = [os.path.join(root, f) for f in files]
337
- relative_paths = [os.path.relpath(p, dir_path) for p in absolute_paths]
338
-
339
- if add_base_to_relative:
340
- relative_paths = [os.path.join(base_name, p) for p in relative_paths]
341
-
342
- assert len(absolute_paths) == len(relative_paths)
343
- result += zip(absolute_paths, relative_paths)
344
-
345
- return result
346
-
347
-
348
- def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None:
349
- """Takes in a list of tuples of (src, dst) paths and copies files.
350
- Will create all necessary directories."""
351
- for file in files:
352
- target_dir_name = os.path.dirname(file[1])
353
-
354
- # will create all intermediate-level directories
355
- if not os.path.exists(target_dir_name):
356
- os.makedirs(target_dir_name)
357
-
358
- shutil.copyfile(file[0], file[1])
359
-
360
-
361
- # URL helpers
362
- # ------------------------------------------------------------------------------------------
363
-
364
- def is_url(obj: Any, allow_file_urls: bool = False) -> bool:
365
- """Determine whether the given object is a valid URL string."""
366
- if not isinstance(obj, str) or not "://" in obj:
367
- return False
368
- if allow_file_urls and obj.startswith('file://'):
369
- return True
370
- try:
371
- res = requests.compat.urlparse(obj)
372
- if not res.scheme or not res.netloc or not "." in res.netloc:
373
- return False
374
- res = requests.compat.urlparse(requests.compat.urljoin(obj, "/"))
375
- if not res.scheme or not res.netloc or not "." in res.netloc:
376
- return False
377
- except:
378
- return False
379
- return True
380
-
381
-
382
- def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True, return_filename: bool = False, cache: bool = True) -> Any:
383
- """Download the given URL and return a binary-mode file object to access the data."""
384
- assert num_attempts >= 1
385
- assert not (return_filename and (not cache))
386
-
387
- # Doesn't look like an URL scheme so interpret it as a local filename.
388
- if not re.match('^[a-z]+://', url):
389
- return url if return_filename else open(url, "rb")
390
-
391
- # Handle file URLs. This code handles unusual file:// patterns that
392
- # arise on Windows:
393
- #
394
- # file:///c:/foo.txt
395
- #
396
- # which would translate to a local '/c:/foo.txt' filename that's
397
- # invalid. Drop the forward slash for such pathnames.
398
- #
399
- # If you touch this code path, you should test it on both Linux and
400
- # Windows.
401
- #
402
- # Some internet resources suggest using urllib.request.url2pathname() but
403
- # but that converts forward slashes to backslashes and this causes
404
- # its own set of problems.
405
- if url.startswith('file://'):
406
- filename = urllib.parse.urlparse(url).path
407
- if re.match(r'^/[a-zA-Z]:', filename):
408
- filename = filename[1:]
409
- return filename if return_filename else open(filename, "rb")
410
-
411
- assert is_url(url)
412
-
413
- # Lookup from cache.
414
- if cache_dir is None:
415
- cache_dir = make_cache_dir_path('downloads')
416
-
417
- url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest()
418
- if cache:
419
- cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*"))
420
- if len(cache_files) == 1:
421
- filename = cache_files[0]
422
- return filename if return_filename else open(filename, "rb")
423
-
424
- # Download.
425
- url_name = None
426
- url_data = None
427
- with requests.Session() as session:
428
- if verbose:
429
- print("Downloading %s ..." % url, end="", flush=True)
430
- for attempts_left in reversed(range(num_attempts)):
431
- try:
432
- with session.get(url) as res:
433
- res.raise_for_status()
434
- if len(res.content) == 0:
435
- raise IOError("No data received")
436
-
437
- if len(res.content) < 8192:
438
- content_str = res.content.decode("utf-8")
439
- if "download_warning" in res.headers.get("Set-Cookie", ""):
440
- links = [html.unescape(link) for link in content_str.split('"') if "export=download" in link]
441
- if len(links) == 1:
442
- url = requests.compat.urljoin(url, links[0])
443
- raise IOError("Google Drive virus checker nag")
444
- if "Google Drive - Quota exceeded" in content_str:
445
- raise IOError("Google Drive download quota exceeded -- please try again later")
446
-
447
- match = re.search(r'filename="([^"]*)"', res.headers.get("Content-Disposition", ""))
448
- url_name = match[1] if match else url
449
- url_data = res.content
450
- if verbose:
451
- print(" done")
452
- break
453
- except KeyboardInterrupt:
454
- raise
455
- except:
456
- if not attempts_left:
457
- if verbose:
458
- print(" failed")
459
- raise
460
- if verbose:
461
- print(".", end="", flush=True)
462
-
463
- # Save to cache.
464
- if cache:
465
- safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name)
466
- cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name)
467
- temp_file = os.path.join(cache_dir, "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name)
468
- os.makedirs(cache_dir, exist_ok=True)
469
- with open(temp_file, "wb") as f:
470
- f.write(url_data)
471
- os.replace(temp_file, cache_file) # atomic
472
- if return_filename:
473
- return cache_file
474
-
475
- # Return data as file object.
476
- assert not return_filename
477
- return io.BytesIO(url_data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cpp4App/Cpp4App/CDM/detect_compo/ip_region_proposal.py DELETED
@@ -1,200 +0,0 @@
1
- import cv2
2
- from os.path import join as pjoin
3
- import time
4
-
5
- import CDM.detect_compo.lib_ip.ip_preprocessing as pre
6
- import CDM.detect_compo.lib_ip.ip_draw as draw
7
- import CDM.detect_compo.lib_ip.ip_detection as det
8
- import CDM.detect_compo.lib_ip.file_utils as file
9
- import CDM.detect_compo.lib_ip.Component as Compo
10
- from CDM.config.CONFIG_UIED import Config
11
- C = Config()
12
-
13
-
14
- def nesting_inspection(org, grey, compos, ffl_block):
15
- '''
16
- Inspect all big compos through block division by flood-fill
17
- :param ffl_block: gradient threshold for flood-fill
18
- :return: nesting compos
19
- '''
20
- nesting_compos = []
21
- for i, compo in enumerate(compos):
22
- if compo.height > 50:
23
- replace = False
24
- clip_grey = compo.compo_clipping(grey)
25
- n_compos = det.nested_components_detection(clip_grey, org, grad_thresh=ffl_block, show=False)
26
- Compo.cvt_compos_relative_pos(n_compos, compo.bbox.col_min, compo.bbox.row_min)
27
-
28
- for n_compo in n_compos:
29
- if n_compo.redundant:
30
- compos[i] = n_compo
31
- replace = True
32
- break
33
- if not replace:
34
- nesting_compos += n_compos
35
- return nesting_compos
36
-
37
-
38
- def compo_detection(input_img_path, output_root, uied_params,
39
- resize_by_height=800, classifier=None, show=False, wai_key=0):
40
-
41
- start = time.process_time()
42
- name = input_img_path.split('/')[-1][:-4] if '/' in input_img_path else input_img_path.split('\\')[-1][:-4]
43
- ip_root = file.build_directory(pjoin(output_root, "ip"))
44
-
45
- # *** Step 1 *** pre-processing: read img -> get binary map
46
- org, grey = pre.read_img(input_img_path, resize_by_height)
47
- binary = pre.binarization(org, grad_min=int(uied_params['min-grad']))
48
-
49
- full_size_org, full_size_grey = pre.read_img(input_img_path)
50
- ratio = full_size_org.shape[0] / org.shape[0]
51
-
52
- # *** Step 2 *** element detection
53
- det.rm_line(binary, show=show, wait_key=wai_key)
54
- uicompos = det.component_detection(binary, min_obj_area=int(uied_params['min-ele-area']))
55
-
56
- # *** Step 3 *** results refinement
57
- uicompos = det.compo_filter(uicompos, min_area=int(uied_params['min-ele-area']), img_shape=binary.shape)
58
- uicompos = det.merge_intersected_compos(uicompos)
59
- det.compo_block_recognition(binary, uicompos)
60
- if uied_params['merge-contained-ele']:
61
- uicompos = det.rm_contained_compos_not_in_block(uicompos)
62
- Compo.compos_update(uicompos, org.shape)
63
- Compo.compos_containment(uicompos)
64
-
65
- # *** Step 4 ** nesting inspection: check if big compos have nesting element
66
- uicompos += nesting_inspection(org, grey, uicompos, ffl_block=uied_params['ffl-block'])
67
- Compo.compos_update(uicompos, org.shape)
68
- draw.draw_bounding_box(full_size_org, ratio, uicompos, show=show, name='merged compo', write_path=pjoin(ip_root, name + '.jpg'), wait_key=wai_key)
69
-
70
- # # classify icons
71
- # model = models.resnet18().to('cpu')
72
- # in_feature_num = model.fc.in_features
73
- # model.fc = nn.Linear(in_feature_num, 99)
74
- # # model.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=(3,3), padding=(3,3), stride=(2,2), bias=False)
75
- # model.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=(5, 5), padding=(3, 3), stride=(2, 2),
76
- # bias=False)
77
- # # PATH = "C:/ANU/2022 s2/honours project/code/UIED-master/model/model-99-resnet18.pkl"
78
- # PATH = "./model/model-99-resnet18.pkl"
79
- # # trained_model = model()
80
- # model.load_state_dict(torch.load(PATH, map_location=torch.device('cpu')))
81
- #
82
- # model.eval()
83
- #
84
- # # ----------------- try on semantics dataset---------------------
85
- #
86
- # # sample_data = np.load('C:/ANU/2022 s2/honours project/code/semantic-icon-classifier-master/data/training_x.npy')
87
- # #
88
- # # array = np.reshape(sample_data[0, :, :, :], [32, 32])
89
- # #
90
- # # print("array: ", array)
91
- # #
92
- # # cv2.imshow("array", array)
93
- # # cv2.waitKey(0)
94
- # #
95
- # # array = array.astype('float32')
96
- # # array = array / 255
97
- # # array = (array - array.mean()) / array.std()
98
- # #
99
- # # print("array mean: ", array.mean())
100
- # # print("array std: ", array.std())
101
- # #
102
- # # array = array.reshape(1, 1, 32, 32)
103
- # #
104
- # # array = torch.tensor(array)
105
- # # print("array_tensor: ", array)
106
- # # array_pred_label = model(array)
107
- # # print("output: ", array_pred_label)
108
- #
109
- # # ----------------- end trying ---------------------
110
- #
111
- # grey = grey.astype('float32')
112
- # grey = grey / 255
113
- # # grey = grey / np.linalg.norm(grey)
114
- #
115
- # grey = (grey-grey.mean())/grey.std()
116
- # print("grey mean: ", grey.mean())
117
- # print("grey std: ", grey.std())
118
- #
119
- # # grey = grey.to(torch.float32)
120
- #
121
- # # plt.imshow(Image.fromarray(binary))
122
- # # plt.show()
123
- # # cv2.imshow("grey", grey)
124
- #
125
- # privacy_compos = []
126
- # for comp in uicompos:
127
- #
128
- # # cv2.imshow("comp", grey[comp.bbox.row_min:comp.bbox.row_max, comp.bbox.col_min:comp.bbox.col_max])
129
- # # cv2.waitKey(0)
130
- #
131
- # # col_mid = int((comp.bbox.col_min+comp.bbox.col_max)/2)
132
- # # row_mid = int((comp.bbox.row_min+comp.bbox.row_max)/2)
133
- # # comp_crop = grey[max(0, row_mid-16):min(grey.shape[1], row_mid+16), max(0, col_mid-16):min(grey.shape[0], col_mid+16)]
134
- # #
135
- # # if comp_crop.shape[0] != 32 or comp_crop.shape[1] != 32:
136
- # # print("A component is not classified, size: ", comp_crop.shape)
137
- # # print("col_mid: ", col_mid)
138
- # # print("row_mid: ", row_mid)
139
- # # print("shape[0]: ", comp_crop.shape[0])
140
- # # print("shape[1]: ", comp_crop.shape[1])
141
- # # print("max(0, row_mid-16) and min(binary.shape[1], row_mid+16): ", max(0, row_mid-16), min(grey.shape[1], row_mid+16))
142
- #
143
- # comp_grey = grey[comp.bbox.row_min:comp.bbox.row_max, comp.bbox.col_min:comp.bbox.col_max]
144
- #
145
- # # cv2.imshow("comp_grey", comp_grey)
146
- # # cv2.waitKey(0)
147
- #
148
- # # print("comp_crop: ", comp_crop)
149
- # # comp_crop = comp_grey.reshape(1, 1, 32, 32)
150
- # comp_crop = cv2.resize(comp_grey, (32, 32))
151
- # print("comp_crop: ", comp_crop)
152
- #
153
- # # cv2.imshow("comp_crop", comp_crop)
154
- # # cv2.waitKey(0)
155
- #
156
- # comp_crop = comp_crop.reshape(1, 1, 32, 32)
157
- #
158
- # comp_tensor = torch.tensor(comp_crop)
159
- # comp_tensor = comp_tensor.permute(0, 1, 3, 2)
160
- # print("comp_tensor: ", comp_tensor)
161
- # # comp_float = comp_tensor.to(torch.float32)
162
- # # print("comp_float: ", comp_float)
163
- # # pred_label = model(comp_float)
164
- # pred_label = model(comp_tensor)
165
- # print("output: ", pred_label)
166
- # print("label: ", np.argmax(pred_label.cpu().data.numpy(), axis=1))
167
- # if np.argmax(pred_label.cpu().data.numpy(), axis=1) in [72.0, 42.0, 77.0, 91.0, 6.0, 89.0, 40.0, 43.0, 82.0, 3.0, 68.0,
168
- # 49.0, 56.0, 89.0]:
169
- # privacy_compos.append(comp)
170
- #
171
- # draw.draw_bounding_box(org, privacy_compos, show=show, name='merged compo', write_path=pjoin(ip_root, name + '.jpg'), wait_key=wai_key)
172
-
173
- # *** Step 5 *** image inspection: recognize image -> remove noise in image -> binarize with larger threshold and reverse -> rectangular compo detection
174
- # if classifier is not None:
175
- # classifier['Image'].predict(seg.clipping(org, uicompos), uicompos)
176
- # draw.draw_bounding_box_class(org, uicompos, show=show)
177
- # uicompos = det.rm_noise_in_large_img(uicompos, org)
178
- # draw.draw_bounding_box_class(org, uicompos, show=show)
179
- # det.detect_compos_in_img(uicompos, binary_org, org)
180
- # draw.draw_bounding_box(org, uicompos, show=show)
181
- # if classifier is not None:
182
- # classifier['Noise'].predict(seg.clipping(org, uicompos), uicompos)
183
- # draw.draw_bounding_box_class(org, uicompos, show=show)
184
- # uicompos = det.rm_noise_compos(uicompos)
185
-
186
- # *** Step 6 *** element classification: all category classification
187
- # if classifier is not None:
188
- # classifier['Elements'].predict([compo.compo_clipping(org) for compo in uicompos], uicompos)
189
- # draw.draw_bounding_box_class(org, uicompos, show=show, name='cls', write_path=pjoin(ip_root, 'result.jpg'))
190
- # draw.draw_bounding_box_class(org, uicompos, write_path=pjoin(output_root, 'result.jpg'))
191
-
192
- # *** Step 7 *** save detection result
193
-
194
- Compo.compos_update(uicompos, org.shape)
195
- file.save_corners_json(pjoin(ip_root, name + '.json'), uicompos)
196
- # file.save_corners_json(pjoin(ip_root, name + '.json'), uicompos, full_size_org, ratio)
197
-
198
- cd_time = time.process_time() - start
199
- print("[Compo Detection Completed in %.3f s] Input: %s Output: %s" % (cd_time, input_img_path, pjoin(ip_root, name + '.json')))
200
- return cd_time
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DCandE/rvc-models/infer_pack/modules.py DELETED
@@ -1,522 +0,0 @@
1
- import copy
2
- import math
3
- import numpy as np
4
- import scipy
5
- import torch
6
- from torch import nn
7
- from torch.nn import functional as F
8
-
9
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
10
- from torch.nn.utils import weight_norm, remove_weight_norm
11
-
12
- from infer_pack import commons
13
- from infer_pack.commons import init_weights, get_padding
14
- from infer_pack.transforms import piecewise_rational_quadratic_transform
15
-
16
-
17
- LRELU_SLOPE = 0.1
18
-
19
-
20
- class LayerNorm(nn.Module):
21
- def __init__(self, channels, eps=1e-5):
22
- super().__init__()
23
- self.channels = channels
24
- self.eps = eps
25
-
26
- self.gamma = nn.Parameter(torch.ones(channels))
27
- self.beta = nn.Parameter(torch.zeros(channels))
28
-
29
- def forward(self, x):
30
- x = x.transpose(1, -1)
31
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
32
- return x.transpose(1, -1)
33
-
34
-
35
- class ConvReluNorm(nn.Module):
36
- def __init__(
37
- self,
38
- in_channels,
39
- hidden_channels,
40
- out_channels,
41
- kernel_size,
42
- n_layers,
43
- p_dropout,
44
- ):
45
- super().__init__()
46
- self.in_channels = in_channels
47
- self.hidden_channels = hidden_channels
48
- self.out_channels = out_channels
49
- self.kernel_size = kernel_size
50
- self.n_layers = n_layers
51
- self.p_dropout = p_dropout
52
- assert n_layers > 1, "Number of layers should be larger than 0."
53
-
54
- self.conv_layers = nn.ModuleList()
55
- self.norm_layers = nn.ModuleList()
56
- self.conv_layers.append(
57
- nn.Conv1d(
58
- in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
59
- )
60
- )
61
- self.norm_layers.append(LayerNorm(hidden_channels))
62
- self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
63
- for _ in range(n_layers - 1):
64
- self.conv_layers.append(
65
- nn.Conv1d(
66
- hidden_channels,
67
- hidden_channels,
68
- kernel_size,
69
- padding=kernel_size // 2,
70
- )
71
- )
72
- self.norm_layers.append(LayerNorm(hidden_channels))
73
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
74
- self.proj.weight.data.zero_()
75
- self.proj.bias.data.zero_()
76
-
77
- def forward(self, x, x_mask):
78
- x_org = x
79
- for i in range(self.n_layers):
80
- x = self.conv_layers[i](x * x_mask)
81
- x = self.norm_layers[i](x)
82
- x = self.relu_drop(x)
83
- x = x_org + self.proj(x)
84
- return x * x_mask
85
-
86
-
87
- class DDSConv(nn.Module):
88
- """
89
- Dialted and Depth-Separable Convolution
90
- """
91
-
92
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
93
- super().__init__()
94
- self.channels = channels
95
- self.kernel_size = kernel_size
96
- self.n_layers = n_layers
97
- self.p_dropout = p_dropout
98
-
99
- self.drop = nn.Dropout(p_dropout)
100
- self.convs_sep = nn.ModuleList()
101
- self.convs_1x1 = nn.ModuleList()
102
- self.norms_1 = nn.ModuleList()
103
- self.norms_2 = nn.ModuleList()
104
- for i in range(n_layers):
105
- dilation = kernel_size**i
106
- padding = (kernel_size * dilation - dilation) // 2
107
- self.convs_sep.append(
108
- nn.Conv1d(
109
- channels,
110
- channels,
111
- kernel_size,
112
- groups=channels,
113
- dilation=dilation,
114
- padding=padding,
115
- )
116
- )
117
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
118
- self.norms_1.append(LayerNorm(channels))
119
- self.norms_2.append(LayerNorm(channels))
120
-
121
- def forward(self, x, x_mask, g=None):
122
- if g is not None:
123
- x = x + g
124
- for i in range(self.n_layers):
125
- y = self.convs_sep[i](x * x_mask)
126
- y = self.norms_1[i](y)
127
- y = F.gelu(y)
128
- y = self.convs_1x1[i](y)
129
- y = self.norms_2[i](y)
130
- y = F.gelu(y)
131
- y = self.drop(y)
132
- x = x + y
133
- return x * x_mask
134
-
135
-
136
- class WN(torch.nn.Module):
137
- def __init__(
138
- self,
139
- hidden_channels,
140
- kernel_size,
141
- dilation_rate,
142
- n_layers,
143
- gin_channels=0,
144
- p_dropout=0,
145
- ):
146
- super(WN, self).__init__()
147
- assert kernel_size % 2 == 1
148
- self.hidden_channels = hidden_channels
149
- self.kernel_size = (kernel_size,)
150
- self.dilation_rate = dilation_rate
151
- self.n_layers = n_layers
152
- self.gin_channels = gin_channels
153
- self.p_dropout = p_dropout
154
-
155
- self.in_layers = torch.nn.ModuleList()
156
- self.res_skip_layers = torch.nn.ModuleList()
157
- self.drop = nn.Dropout(p_dropout)
158
-
159
- if gin_channels != 0:
160
- cond_layer = torch.nn.Conv1d(
161
- gin_channels, 2 * hidden_channels * n_layers, 1
162
- )
163
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
164
-
165
- for i in range(n_layers):
166
- dilation = dilation_rate**i
167
- padding = int((kernel_size * dilation - dilation) / 2)
168
- in_layer = torch.nn.Conv1d(
169
- hidden_channels,
170
- 2 * hidden_channels,
171
- kernel_size,
172
- dilation=dilation,
173
- padding=padding,
174
- )
175
- in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
176
- self.in_layers.append(in_layer)
177
-
178
- # last one is not necessary
179
- if i < n_layers - 1:
180
- res_skip_channels = 2 * hidden_channels
181
- else:
182
- res_skip_channels = hidden_channels
183
-
184
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
185
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
186
- self.res_skip_layers.append(res_skip_layer)
187
-
188
- def forward(self, x, x_mask, g=None, **kwargs):
189
- output = torch.zeros_like(x)
190
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
191
-
192
- if g is not None:
193
- g = self.cond_layer(g)
194
-
195
- for i in range(self.n_layers):
196
- x_in = self.in_layers[i](x)
197
- if g is not None:
198
- cond_offset = i * 2 * self.hidden_channels
199
- g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
200
- else:
201
- g_l = torch.zeros_like(x_in)
202
-
203
- acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
204
- acts = self.drop(acts)
205
-
206
- res_skip_acts = self.res_skip_layers[i](acts)
207
- if i < self.n_layers - 1:
208
- res_acts = res_skip_acts[:, : self.hidden_channels, :]
209
- x = (x + res_acts) * x_mask
210
- output = output + res_skip_acts[:, self.hidden_channels :, :]
211
- else:
212
- output = output + res_skip_acts
213
- return output * x_mask
214
-
215
- def remove_weight_norm(self):
216
- if self.gin_channels != 0:
217
- torch.nn.utils.remove_weight_norm(self.cond_layer)
218
- for l in self.in_layers:
219
- torch.nn.utils.remove_weight_norm(l)
220
- for l in self.res_skip_layers:
221
- torch.nn.utils.remove_weight_norm(l)
222
-
223
-
224
- class ResBlock1(torch.nn.Module):
225
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
226
- super(ResBlock1, self).__init__()
227
- self.convs1 = nn.ModuleList(
228
- [
229
- weight_norm(
230
- Conv1d(
231
- channels,
232
- channels,
233
- kernel_size,
234
- 1,
235
- dilation=dilation[0],
236
- padding=get_padding(kernel_size, dilation[0]),
237
- )
238
- ),
239
- weight_norm(
240
- Conv1d(
241
- channels,
242
- channels,
243
- kernel_size,
244
- 1,
245
- dilation=dilation[1],
246
- padding=get_padding(kernel_size, dilation[1]),
247
- )
248
- ),
249
- weight_norm(
250
- Conv1d(
251
- channels,
252
- channels,
253
- kernel_size,
254
- 1,
255
- dilation=dilation[2],
256
- padding=get_padding(kernel_size, dilation[2]),
257
- )
258
- ),
259
- ]
260
- )
261
- self.convs1.apply(init_weights)
262
-
263
- self.convs2 = nn.ModuleList(
264
- [
265
- weight_norm(
266
- Conv1d(
267
- channels,
268
- channels,
269
- kernel_size,
270
- 1,
271
- dilation=1,
272
- padding=get_padding(kernel_size, 1),
273
- )
274
- ),
275
- weight_norm(
276
- Conv1d(
277
- channels,
278
- channels,
279
- kernel_size,
280
- 1,
281
- dilation=1,
282
- padding=get_padding(kernel_size, 1),
283
- )
284
- ),
285
- weight_norm(
286
- Conv1d(
287
- channels,
288
- channels,
289
- kernel_size,
290
- 1,
291
- dilation=1,
292
- padding=get_padding(kernel_size, 1),
293
- )
294
- ),
295
- ]
296
- )
297
- self.convs2.apply(init_weights)
298
-
299
- def forward(self, x, x_mask=None):
300
- for c1, c2 in zip(self.convs1, self.convs2):
301
- xt = F.leaky_relu(x, LRELU_SLOPE)
302
- if x_mask is not None:
303
- xt = xt * x_mask
304
- xt = c1(xt)
305
- xt = F.leaky_relu(xt, LRELU_SLOPE)
306
- if x_mask is not None:
307
- xt = xt * x_mask
308
- xt = c2(xt)
309
- x = xt + x
310
- if x_mask is not None:
311
- x = x * x_mask
312
- return x
313
-
314
- def remove_weight_norm(self):
315
- for l in self.convs1:
316
- remove_weight_norm(l)
317
- for l in self.convs2:
318
- remove_weight_norm(l)
319
-
320
-
321
- class ResBlock2(torch.nn.Module):
322
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
323
- super(ResBlock2, self).__init__()
324
- self.convs = nn.ModuleList(
325
- [
326
- weight_norm(
327
- Conv1d(
328
- channels,
329
- channels,
330
- kernel_size,
331
- 1,
332
- dilation=dilation[0],
333
- padding=get_padding(kernel_size, dilation[0]),
334
- )
335
- ),
336
- weight_norm(
337
- Conv1d(
338
- channels,
339
- channels,
340
- kernel_size,
341
- 1,
342
- dilation=dilation[1],
343
- padding=get_padding(kernel_size, dilation[1]),
344
- )
345
- ),
346
- ]
347
- )
348
- self.convs.apply(init_weights)
349
-
350
- def forward(self, x, x_mask=None):
351
- for c in self.convs:
352
- xt = F.leaky_relu(x, LRELU_SLOPE)
353
- if x_mask is not None:
354
- xt = xt * x_mask
355
- xt = c(xt)
356
- x = xt + x
357
- if x_mask is not None:
358
- x = x * x_mask
359
- return x
360
-
361
- def remove_weight_norm(self):
362
- for l in self.convs:
363
- remove_weight_norm(l)
364
-
365
-
366
- class Log(nn.Module):
367
- def forward(self, x, x_mask, reverse=False, **kwargs):
368
- if not reverse:
369
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
370
- logdet = torch.sum(-y, [1, 2])
371
- return y, logdet
372
- else:
373
- x = torch.exp(x) * x_mask
374
- return x
375
-
376
-
377
- class Flip(nn.Module):
378
- def forward(self, x, *args, reverse=False, **kwargs):
379
- x = torch.flip(x, [1])
380
- if not reverse:
381
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
382
- return x, logdet
383
- else:
384
- return x
385
-
386
-
387
- class ElementwiseAffine(nn.Module):
388
- def __init__(self, channels):
389
- super().__init__()
390
- self.channels = channels
391
- self.m = nn.Parameter(torch.zeros(channels, 1))
392
- self.logs = nn.Parameter(torch.zeros(channels, 1))
393
-
394
- def forward(self, x, x_mask, reverse=False, **kwargs):
395
- if not reverse:
396
- y = self.m + torch.exp(self.logs) * x
397
- y = y * x_mask
398
- logdet = torch.sum(self.logs * x_mask, [1, 2])
399
- return y, logdet
400
- else:
401
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
402
- return x
403
-
404
-
405
- class ResidualCouplingLayer(nn.Module):
406
- def __init__(
407
- self,
408
- channels,
409
- hidden_channels,
410
- kernel_size,
411
- dilation_rate,
412
- n_layers,
413
- p_dropout=0,
414
- gin_channels=0,
415
- mean_only=False,
416
- ):
417
- assert channels % 2 == 0, "channels should be divisible by 2"
418
- super().__init__()
419
- self.channels = channels
420
- self.hidden_channels = hidden_channels
421
- self.kernel_size = kernel_size
422
- self.dilation_rate = dilation_rate
423
- self.n_layers = n_layers
424
- self.half_channels = channels // 2
425
- self.mean_only = mean_only
426
-
427
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
428
- self.enc = WN(
429
- hidden_channels,
430
- kernel_size,
431
- dilation_rate,
432
- n_layers,
433
- p_dropout=p_dropout,
434
- gin_channels=gin_channels,
435
- )
436
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
437
- self.post.weight.data.zero_()
438
- self.post.bias.data.zero_()
439
-
440
- def forward(self, x, x_mask, g=None, reverse=False):
441
- x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
442
- h = self.pre(x0) * x_mask
443
- h = self.enc(h, x_mask, g=g)
444
- stats = self.post(h) * x_mask
445
- if not self.mean_only:
446
- m, logs = torch.split(stats, [self.half_channels] * 2, 1)
447
- else:
448
- m = stats
449
- logs = torch.zeros_like(m)
450
-
451
- if not reverse:
452
- x1 = m + x1 * torch.exp(logs) * x_mask
453
- x = torch.cat([x0, x1], 1)
454
- logdet = torch.sum(logs, [1, 2])
455
- return x, logdet
456
- else:
457
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
458
- x = torch.cat([x0, x1], 1)
459
- return x
460
-
461
- def remove_weight_norm(self):
462
- self.enc.remove_weight_norm()
463
-
464
-
465
- class ConvFlow(nn.Module):
466
- def __init__(
467
- self,
468
- in_channels,
469
- filter_channels,
470
- kernel_size,
471
- n_layers,
472
- num_bins=10,
473
- tail_bound=5.0,
474
- ):
475
- super().__init__()
476
- self.in_channels = in_channels
477
- self.filter_channels = filter_channels
478
- self.kernel_size = kernel_size
479
- self.n_layers = n_layers
480
- self.num_bins = num_bins
481
- self.tail_bound = tail_bound
482
- self.half_channels = in_channels // 2
483
-
484
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
485
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0)
486
- self.proj = nn.Conv1d(
487
- filter_channels, self.half_channels * (num_bins * 3 - 1), 1
488
- )
489
- self.proj.weight.data.zero_()
490
- self.proj.bias.data.zero_()
491
-
492
- def forward(self, x, x_mask, g=None, reverse=False):
493
- x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
494
- h = self.pre(x0)
495
- h = self.convs(h, x_mask, g=g)
496
- h = self.proj(h) * x_mask
497
-
498
- b, c, t = x0.shape
499
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
500
-
501
- unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels)
502
- unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(
503
- self.filter_channels
504
- )
505
- unnormalized_derivatives = h[..., 2 * self.num_bins :]
506
-
507
- x1, logabsdet = piecewise_rational_quadratic_transform(
508
- x1,
509
- unnormalized_widths,
510
- unnormalized_heights,
511
- unnormalized_derivatives,
512
- inverse=reverse,
513
- tails="linear",
514
- tail_bound=self.tail_bound,
515
- )
516
-
517
- x = torch.cat([x0, x1], 1) * x_mask
518
- logdet = torch.sum(logabsdet * x_mask, [1, 2])
519
- if not reverse:
520
- return x, logdet
521
- else:
522
- return x