Commit
·
998243e
1
Parent(s):
517485f
Update parquet files (step 110 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/101-5/gpt4free/g4f/.v1/testing/you_test.py +0 -27
- spaces/101-5/gpt4free/g4f/Provider/Providers/Liaobots.py +0 -52
- spaces/17TheWord/RealESRGAN/scripts/generate_meta_info_pairdata.py +0 -49
- spaces/1gistliPinn/ChatGPT4/Examples/Arc2earth Free Download.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Cultural History Of India By Om Prakash Pdf 22.md +0 -70
- spaces/1phancelerku/anime-remove-background/Challenge Your Friends and Rivals with 8 Ball Pool APK.md +0 -131
- spaces/1phancelerku/anime-remove-background/Darah Tak Terbatas and Unlimited Money in Hungry Shark World Get Mod Apk Here.md +0 -115
- spaces/232labs/VToonify/README.md +0 -14
- spaces/2gauravc/search_summary_chatgpt/app.py +0 -104
- spaces/AIML-TUDA/semantic-diffusion/README.md +0 -13
- spaces/AP123/dreamgaussian/readme.md +0 -120
- spaces/ASJMO/freegpt/g4f/Provider/Providers/Xiaor.py +0 -39
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Phind.py +0 -76
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/base/Base.d.ts +0 -48
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/box/Box.d.ts +0 -2
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/anchor/Anchor.d.ts +0 -2
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/EaseDataMethods.js +0 -44
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/Layout.js +0 -19
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/ninepatch/NinePatch.js +0 -2
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/ninepatch2/Factory.js +0 -13
- spaces/AlexWang/lama/models/ade20k/segm_lib/utils/data/dataset.py +0 -118
- spaces/Alican/pixera/README.md +0 -12
- spaces/Amrrs/DragGan-Inversion/PTI/models/e4e/discriminator.py +0 -20
- spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r101-d8_512x1024_80k_cityscapes.py +0 -2
- spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_160k_ade20k.py +0 -6
- spaces/Andy1621/uniformer_image_segmentation/configs/resnest/deeplabv3plus_s101-d8_512x1024_80k_cityscapes.py +0 -9
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/evaluate.py +0 -151
- spaces/AnnonSubmission/xai-cl/data_transforms.py +0 -96
- spaces/Benson/text-generation/Examples/Descargar Counter Strike 1.6 Original.md +0 -149
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/cachecontrol/adapter.py +0 -137
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/codingstatemachine.py +0 -90
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/repr.py +0 -149
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/command/__init__.py +0 -12
- spaces/C6AI/HDRL/Dockerfile +0 -14
- spaces/CVPR/LIVE/thrust/thrust/memory/detail/host_system_resource.h +0 -33
- spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/GroundingDINO/backbone/backbone.py +0 -221
- spaces/DCandE/rvc-models/infer_pack/models.py +0 -982
- spaces/DHEIVER/AnimeGANv2/app.py +0 -56
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/web_middlewares.py +0 -119
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/contourpy/chunk.py +0 -89
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/encodings/codecs.py +0 -135
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/Form-3812b7f1.css +0 -1
- spaces/DUOMO-Lab/TransGPT/README.md +0 -13
- spaces/Datasculptor/DescriptionGPT/detic/custom_solver.py +0 -78
- spaces/Datasculptor/LoRA-DreamBooth-Training-UI/app_training.py +0 -144
- spaces/Datasculptor/MusicGen/audiocraft/modules/streaming.py +0 -135
- spaces/DeepLearning101/Speech-Quality-Inspection_Meta-Denoiser/denoiser/augment.py +0 -191
- spaces/Detomo/ai-avatar-frontend/src/setupTests.js +0 -5
- spaces/DragGan/DragGan/viz/capture_widget.py +0 -92
- spaces/ECCV2022/storydalle/dalle/models/stage2/layers.py +0 -221
spaces/101-5/gpt4free/g4f/.v1/testing/you_test.py
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
from gpt4free import you
|
2 |
-
|
3 |
-
# simple request with links and details
|
4 |
-
response = you.Completion.create(prompt="hello world", detailed=True, include_links=True)
|
5 |
-
|
6 |
-
print(response)
|
7 |
-
|
8 |
-
# {
|
9 |
-
# "response": "...",
|
10 |
-
# "links": [...],
|
11 |
-
# "extra": {...},
|
12 |
-
# "slots": {...}
|
13 |
-
# }
|
14 |
-
# }
|
15 |
-
|
16 |
-
# chatbot
|
17 |
-
|
18 |
-
chat = []
|
19 |
-
|
20 |
-
while True:
|
21 |
-
prompt = input("You: ")
|
22 |
-
|
23 |
-
response = you.Completion.create(prompt=prompt, chat=chat)
|
24 |
-
|
25 |
-
print("Bot:", response.text)
|
26 |
-
|
27 |
-
chat.append({"question": prompt, "answer": response.text})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/101-5/gpt4free/g4f/Provider/Providers/Liaobots.py
DELETED
@@ -1,52 +0,0 @@
|
|
1 |
-
import os, uuid, requests
|
2 |
-
from ...typing import sha256, Dict, get_type_hints
|
3 |
-
|
4 |
-
url = 'https://liaobots.com'
|
5 |
-
model = ['gpt-3.5-turbo', 'gpt-4']
|
6 |
-
supports_stream = True
|
7 |
-
needs_auth = True
|
8 |
-
|
9 |
-
models = {
|
10 |
-
'gpt-4': {
|
11 |
-
"id":"gpt-4",
|
12 |
-
"name":"GPT-4",
|
13 |
-
"maxLength":24000,
|
14 |
-
"tokenLimit":8000
|
15 |
-
},
|
16 |
-
'gpt-3.5-turbo': {
|
17 |
-
"id":"gpt-3.5-turbo",
|
18 |
-
"name":"GPT-3.5",
|
19 |
-
"maxLength":12000,
|
20 |
-
"tokenLimit":4000
|
21 |
-
},
|
22 |
-
}
|
23 |
-
|
24 |
-
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
25 |
-
|
26 |
-
print(kwargs)
|
27 |
-
|
28 |
-
headers = {
|
29 |
-
'authority': 'liaobots.com',
|
30 |
-
'content-type': 'application/json',
|
31 |
-
'origin': 'https://liaobots.com',
|
32 |
-
'referer': 'https://liaobots.com/',
|
33 |
-
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
|
34 |
-
'x-auth-code': kwargs.get('auth')
|
35 |
-
}
|
36 |
-
|
37 |
-
json_data = {
|
38 |
-
'conversationId': str(uuid.uuid4()),
|
39 |
-
'model': models[model],
|
40 |
-
'messages': messages,
|
41 |
-
'key': '',
|
42 |
-
'prompt': "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
|
43 |
-
}
|
44 |
-
|
45 |
-
response = requests.post('https://liaobots.com/api/chat',
|
46 |
-
headers=headers, json=json_data, stream=True)
|
47 |
-
|
48 |
-
for token in response.iter_content(chunk_size=2046):
|
49 |
-
yield (token.decode('utf-8'))
|
50 |
-
|
51 |
-
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
52 |
-
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/17TheWord/RealESRGAN/scripts/generate_meta_info_pairdata.py
DELETED
@@ -1,49 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import glob
|
3 |
-
import os
|
4 |
-
|
5 |
-
|
6 |
-
def main(args):
|
7 |
-
txt_file = open(args.meta_info, 'w')
|
8 |
-
# sca images
|
9 |
-
img_paths_gt = sorted(glob.glob(os.path.join(args.input[0], '*')))
|
10 |
-
img_paths_lq = sorted(glob.glob(os.path.join(args.input[1], '*')))
|
11 |
-
|
12 |
-
assert len(img_paths_gt) == len(img_paths_lq), ('GT folder and LQ folder should have the same length, but got '
|
13 |
-
f'{len(img_paths_gt)} and {len(img_paths_lq)}.')
|
14 |
-
|
15 |
-
for img_path_gt, img_path_lq in zip(img_paths_gt, img_paths_lq):
|
16 |
-
# get the relative paths
|
17 |
-
img_name_gt = os.path.relpath(img_path_gt, args.root[0])
|
18 |
-
img_name_lq = os.path.relpath(img_path_lq, args.root[1])
|
19 |
-
print(f'{img_name_gt}, {img_name_lq}')
|
20 |
-
txt_file.write(f'{img_name_gt}, {img_name_lq}\n')
|
21 |
-
|
22 |
-
|
23 |
-
if __name__ == '__main__':
|
24 |
-
"""This script is used to generate meta info (txt file) for paired images.
|
25 |
-
"""
|
26 |
-
parser = argparse.ArgumentParser()
|
27 |
-
parser.add_argument(
|
28 |
-
'--input',
|
29 |
-
nargs='+',
|
30 |
-
default=['datasets/DF2K/DIV2K_train_HR_sub', 'datasets/DF2K/DIV2K_train_LR_bicubic_X4_sub'],
|
31 |
-
help='Input folder, should be [gt_folder, lq_folder]')
|
32 |
-
parser.add_argument('--root', nargs='+', default=[None, None], help='Folder root, will use the ')
|
33 |
-
parser.add_argument(
|
34 |
-
'--meta_info',
|
35 |
-
type=str,
|
36 |
-
default='datasets/DF2K/meta_info/meta_info_DIV2K_sub_pair.txt',
|
37 |
-
help='txt path for meta info')
|
38 |
-
args = parser.parse_args()
|
39 |
-
|
40 |
-
assert len(args.input) == 2, 'Input folder should have two elements: gt folder and lq folder'
|
41 |
-
assert len(args.root) == 2, 'Root path should have two elements: root for gt folder and lq folder'
|
42 |
-
os.makedirs(os.path.dirname(args.meta_info), exist_ok=True)
|
43 |
-
for i in range(2):
|
44 |
-
if args.input[i].endswith('/'):
|
45 |
-
args.input[i] = args.input[i][:-1]
|
46 |
-
if args.root[i] is None:
|
47 |
-
args.root[i] = os.path.dirname(args.input[i])
|
48 |
-
|
49 |
-
main(args)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Arc2earth Free Download.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>arc2earth Free Download</h2><br /><p><b><b>DOWNLOAD</b> 🆗 <a href="https://imgfil.com/2uxZFK">https://imgfil.com/2uxZFK</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Download Arc2Earth for free. Arc2Earth is the premier ArcGIS extension for exporting and importing your data into the leading GeoWeb formats. 1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Cultural History Of India By Om Prakash Pdf 22.md
DELETED
@@ -1,70 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Cultural History Of India By Om Prakash Pdf 22: A Book Review</h1>
|
3 |
-
|
4 |
-
<p>Cultural History Of India By Om Prakash Pdf 22 is a book that explores the various aspects of development of Indian culture from ancient times to the present day. It is written by Om Prakash, a renowned historian and professor of history at Delhi University. The book is divided into three parts, each dealing with a different theme: religion, art, and social institutions.</p>
|
5 |
-
<h2>Cultural History Of India By Om Prakash Pdf 22</h2><br /><p><b><b>DOWNLOAD</b> ✦✦✦ <a href="https://imgfil.com/2uy263">https://imgfil.com/2uy263</a></b></p><br /><br />
|
6 |
-
|
7 |
-
<p>The book is based on extensive research and analysis of primary and secondary sources, such as literary texts, inscriptions, coins, sculptures, paintings, monuments, etc. It also draws on the works of other eminent scholars and experts in the field of Indian history and culture. The book is written in a clear and lucid style, with ample illustrations and examples to support the arguments and facts. The book also provides a bibliography and an index for further reference.</p>
|
8 |
-
|
9 |
-
<p>Cultural History Of India By Om Prakash Pdf 22 is an extremely useful and informative book for anyone who is interested in learning about the rich and diverse cultural heritage of India. It covers a wide range of topics and issues, such as the Vedic religion, Buddhism, Jainism, Saivism, Vaisnavism, Islam, Sikhism, Christianity, composite culture, art and architecture, social institutions, education, economy, food and drinks, etc. It also traces the historical evolution and transformation of these aspects over time and space.</p>
|
10 |
-
|
11 |
-
<p>The book is not only a scholarly work but also a fascinating and engaging read that captures the essence and spirit of Indian culture. It shows how Indian culture has been shaped by various influences and factors, such as geography, environment, ethnicity, language, politics, trade, etc. It also shows how Indian culture has contributed to the world civilization and culture in various ways.</p>
|
12 |
-
|
13 |
-
<p>Cultural History Of India By Om Prakash Pdf 22 is therefore a must-read for all students, teachers, researchers, and enthusiasts of Indian history and culture. It is also a valuable resource for anyone who wants to understand the roots and identity of India as a nation and a civilization.</p>
|
14 |
-
|
15 |
-
<h2>How to Download Cultural History Of India By Om Prakash Pdf 22?</h2>
|
16 |
-
|
17 |
-
<p>If you want to download Cultural History Of India By Om Prakash Pdf 22 on your device, you can use our guide to find the best sources and methods for doing so. Remember to always use a VPN when downloading books online and check the reviews and ratings of the files before using them.</p>
|
18 |
-
|
19 |
-
<p>One of the best ways to download Cultural History Of India By Om Prakash Pdf 22 is to use Google Books. Google Books is a service that allows you to search and preview millions of books from libraries and publishers worldwide. You can also download some books for free or buy them online.</p>
|
20 |
-
<p></p>
|
21 |
-
|
22 |
-
<p>To download Cultural History Of India By Om Prakash Pdf 22 from Google Books , follow these steps:</p>
|
23 |
-
|
24 |
-
<ol>
|
25 |
-
<li>Go to <a href="https://books.google.com/books/about/Cultural_History_of_India.html?id=nzpYb5UOeiwC">https://books.google.com/books/about/Cultural_History_of_India.html?id=nzpYb5UOeiwC</a> on your browser.</li>
|
26 |
-
<li>Click on the "EBOOK - FREE" button on the top right corner of the page.</li>
|
27 |
-
<li>Select your preferred format from the list (PDF or EPUB).</li>
|
28 |
-
<li>Click on "Download" button and wait for the file to download on your device.</li>
|
29 |
-
<li>Enjoy reading Cultural History Of India By Om Prakash Pdf 22 on your device.</li>
|
30 |
-
</ol>
|
31 |
-
|
32 |
-
<h2>How to Read Cultural History Of India By Om Prakash Pdf 22?</h2>
|
33 |
-
|
34 |
-
<p>If you don't want to download Cultural History Of India By Om Prakash Pdf 22 on your device , you can also read it online using Google Books . Google Books allows you to read books online without downloading them . You can also access them using your browser or an app on your device .</p>
|
35 |
-
|
36 |
-
<p>To read Cultural History Of India By Om Prakash Pdf 22 online from Google Books , follow these steps :</p>
|
37 |
-
|
38 |
-
<ol>
|
39 |
-
<li>Go to <a href="https://books.google.com/books/about/Cultural_History_of_India.html?id=nzpYb5UOeiwC">https://books.google.com/books/about/Cultural_History_of_India.html?id=nzpYb5UOeiwC</a> on your browser .</li>
|
40 |
-
<li>Click on the "READ" button on the top right corner of the page .</li>
|
41 |
-
<li>Wait for the book to load on your browser or choose "Open with" if you have an app that can read PDF or EPUB files on your device .</li>
|
42 |
-
<li>Enjoy reading Cultural History Of India By Om Prakash Pdf 22 online from Google Books .</li>
|
43 |
-
</ol>
|
44 |
-
|
45 |
-
<h2>Conclusion</h2>
|
46 |
-
|
47 |
-
<p>Cultural History Of India By Om Prakash Pdf 22 is a book that explores the various aspects of development of Indian culture from ancient times to the present day . It is written by Om Prakash , a renowned historian and professor of history at Delhi University . The book is divided into three parts , each dealing with a different theme : religion , art , and social institutions .</p>
|
48 |
-
|
49 |
-
<p>The book is based on extensive research and analysis of primary and secondary sources , such as literary texts , inscriptions , coins , sculptures , paintings , monuments , etc . It also draws on the works of other eminent scholars and experts in the field of Indian history and culture . The book is written in a clear and lucid style , with ample illustrations and examples to support the arguments and facts . The book also provides a bibliography and an index for further reference .</p>
|
50 |
-
|
51 |
-
<p>Cultural History Of India By Om Prakash Pdf 22 is an extremely useful and informative book for anyone who is interested in learning about the rich and diverse cultural heritage of India . It covers a wide range of topics and issues , such as the Vedic religion , Buddhism , Jainism , Saivism , Vaisnavism , Islam , Sikhism , Christianity , composite culture , art and architecture , social institutions , education , economy , food and drinks , etc . It also traces the historical evolution and transformation of these aspects over time and space .</p>
|
52 |
-
|
53 |
-
<p>The book is not only a scholarly work but also a fascinating and engaging read that captures the essence and spirit of Indian culture . It shows how Indian culture has been shaped by various influences and factors , such as geography , environment , ethnicity , language , politics , trade , etc . It also shows how Indian culture has contributed to the world civilization and culture in various ways .</p>
|
54 |
-
|
55 |
-
<p>Cultural History Of India By Om Prakash Pdf 22 is therefore a must-read for all students , teachers , researchers , and enthusiasts of Indian history and culture . It is also a valuable resource for anyone who wants to understand the roots and identity of India as a nation and a civilization .</p>
|
56 |
-
|
57 |
-
<p>We hope you found this article helpful and informative . If you have any questions or feedback , feel free to leave a comment below . Thank you for reading !</p>
|
58 |
-
<p>Cultural History Of India By Om Prakash Pdf 22 is a book that explores the various aspects of development of Indian culture from ancient times to the present day. It is written by Om Prakash, a renowned historian and professor of history at Delhi University. The book is divided into three parts, each dealing with a different theme: religion, art, and social institutions.</p>
|
59 |
-
|
60 |
-
<p>The book is based on extensive research and analysis of primary and secondary sources, such as literary texts, inscriptions, coins, sculptures, paintings, monuments, etc. It also draws on the works of other eminent scholars and experts in the field of Indian history and culture. The book is written in a clear and lucid style, with ample illustrations and examples to support the arguments and facts. The book also provides a bibliography and an index for further reference.</p>
|
61 |
-
|
62 |
-
<p>Cultural History Of India By Om Prakash Pdf 22 is an extremely useful and informative book for anyone who is interested in learning about the rich and diverse cultural heritage of India. It covers a wide range of topics and issues, such as the Vedic religion, Buddhism, Jainism, Saivism, Vaisnavism, Islam, Sikhism, Christianity, composite culture, art and architecture, social institutions, education, economy, food and drinks, etc. It also traces the historical evolution and transformation of these aspects over time and space.</p>
|
63 |
-
|
64 |
-
<p>The book is not only a scholarly work but also a fascinating and engaging read that captures the essence and spirit of Indian culture. It shows how Indian culture has been shaped by various influences and factors, such as geography, environment, ethnicity, language, politics, trade, etc. It also shows how Indian culture has contributed to the world civilization and culture in various ways.</p>
|
65 |
-
|
66 |
-
<p>Cultural History Of India By Om Prakash Pdf 22 is therefore a must-read for all students, teachers, researchers, and enthusiasts of Indian history and culture. It is also a valuable resource for anyone who wants to understand the roots and identity of India as a nation and a civilization.</p>
|
67 |
-
|
68 |
-
<p>We hope you found this article helpful and informative. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading!</p> 3cee63e6c2<br />
|
69 |
-
<br />
|
70 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Challenge Your Friends and Rivals with 8 Ball Pool APK.md
DELETED
@@ -1,131 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>8 Ball Pool Apkpure: A Guide to Download and Play the World's Best Pool Game</h1>
|
3 |
-
<p>If you are a fan of pool games, you might have heard of <strong>8 Ball Pool</strong>, the world's most popular online multiplayer pool game. But did you know that you can also play it on your Windows PC with <strong>Apkpure</strong>, a website that provides free and safe Android apps and games? In this article, we will show you how to download and install <strong>8 Ball Pool Apkpure</strong> on your PC, what are the features of this amazing game, what are the rules of playing it, and what are some tips and tricks to help you become a master of the pool.</p>
|
4 |
-
<h2>What is 8 Ball Pool Apkpure?</h2>
|
5 |
-
<h3>A brief introduction to 8 Ball Pool, a popular online multiplayer pool game</h3>
|
6 |
-
<p>8 Ball Pool is a game developed by Miniclip that allows you to play pool with players from all over the world. You can choose from different game modes, such as <em>1-on-1</em>, <em>Tournaments</em>, <em>9-Ball</em>, or <em>Practice</em>, and compete for coins, cash, trophies, and exclusive items. You can also customize your cue and pool table with various designs and colors. The game has a level system that matches you with players of similar skill level, and a ranking system that shows your progress in the global leaderboard.</p>
|
7 |
-
<h2>8 ball pool apkpure</h2><br /><p><b><b>Download</b> ✦✦✦ <a href="https://jinyurl.com/2uNOiv">https://jinyurl.com/2uNOiv</a></b></p><br /><br />
|
8 |
-
<h3>A brief introduction to Apkpure, a website that provides free and safe Android apps and games</h3>
|
9 |
-
<p>Apkpure is a website that offers a large collection of Android apps and games that you can download for free. You can find apps and games for various categories, such as <em>Action</em>, <em>Puzzle</em>, <em>Sports</em>, <em>Casual</em>, <em>Educational</em>, <em>Music</em>, <em>Lifestyle</em>, <em>Social</em>, etc. You can also search for specific apps or games by name or keyword. All the apps and games on Apkpure are verified by their team to ensure they are safe and virus-free.</p>
|
10 |
-
<h3>How to download and install 8 Ball Ball Pool Apkpure on your Windows PC</h3>
|
11 |
-
<p>To play 8 Ball Pool Apkpure on your Windows PC, you need to use an Android emulator, which is a software that allows you to run Android apps and games on your PC. There are many Android emulators available, but we recommend using <strong>Gameloop</strong>, which is the official emulator of Tencent Games, the publisher of 8 Ball Pool. Here are the steps to download and install 8 Ball Pool Apkpure on your PC with Gameloop:</p>
|
12 |
-
<ol>
|
13 |
-
<li>Download and install Gameloop from its official website: <a href="">https://gameloop.fun/</a></li>
|
14 |
-
<li>Launch Gameloop and click on the <em>Game Center</em> tab.</li>
|
15 |
-
<li>Search for 8 Ball Pool in the search bar and click on the <em>Install</em> button.</li>
|
16 |
-
<li>Wait for the game to download and install on your PC.</li>
|
17 |
-
<li>Click on the <em>My Games</em> tab and launch 8 Ball Pool from there.</li>
|
18 |
-
<li>Enjoy playing 8 Ball Pool Apkpure on your PC with a larger screen, better graphics, and smoother controls.</li>
|
19 |
-
</ol>
|
20 |
-
<h2>What are the features of 8 Ball Pool Apkpure?</h2>
|
21 |
-
<h3>The benefits of playing 8 Ball Pool on your PC with Gameloop emulator</h3>
|
22 |
-
<p>Playing 8 Ball Pool Apkpure on your PC with Gameloop emulator has many advantages over playing it on your mobile device. Here are some of them:</p>
|
23 |
-
<ul>
|
24 |
-
<li>You can enjoy a bigger and clearer view of the pool table, the balls, and the cues on your PC screen.</li>
|
25 |
-
<li>You can use your mouse and keyboard to control the game, which gives you more accuracy and precision than using your fingers on a touch screen.</li>
|
26 |
-
<li>You can avoid battery drain, overheating, and lag issues that may affect your mobile device while playing 8 Ball Pool.</li>
|
27 |
-
<li>You can access more features and settings in Gameloop, such as recording, streaming, screenshotting, and customizing your keyboard layout.</li>
|
28 |
-
<li>You can play 8 Ball Pool Apkpure with other players who are using Gameloop emulator, which creates a fair and balanced gaming environment.</li>
|
29 |
-
</ul>
|
30 |
-
<h3>The different game modes, tables, cues, and balls available in 8 Ball Pool</h3>
|
31 |
-
<p>8 Ball Pool Apkpure offers a variety of game modes, tables, cues, and balls to suit your preferences and skill level. Here are some of them:</p>
|
32 |
-
<table>
|
33 |
-
<tr><th>Game Mode</th><th>Description</th></tr>
|
34 |
-
<tr><td><em>1-on-1</em></td><td>The classic mode where you play against another player in a single match. You can choose from different locations, such as London, Sydney, Moscow, Tokyo, Las Vegas, etc., each with a different entry fee and prize pool. You can also play in the <em>No Guidelines</em> mode where there are no aiming lines to help you.</td></tr>
|
35 |
-
<tr><td><em>Tournaments</em></td><td>The mode where you compete with up to 7 other players in a knockout format. You can choose from different tournaments, such as Cairo, Shanghai, Toronto, Berlin, etc., each with a different entry fee and prize pool. You can also play in the <em>No Guidelines</em> mode where there are no aiming lines to help you.</td></tr>
|
36 |
-
<tr><td><em>9-Ball</em></td><td>The mode where you play with 9 balls instead of 15. The rules are different from 8 Ball: you have to hit the lowest numbered ball first, and the first player to pocket the 9 ball wins. You can also play in the <em>No Guidelines</em> mode where there are no aiming lines to help you.</td></tr>
|
37 |
-
<tr><td><em>Practice</em></td><td>The mode where you can practice your skills without any pressure or opponents. You can choose from different tables and cues to practice with. You can also adjust the difficulty level of the game from <em>Easy</em> to <em>Expert</em>.</td></tr>
|
38 |
-
</table>
|
39 |
-
<p>In addition to the game modes, you can also choose from different tables and cues to play with. Each table has a different design and color scheme, such as Wood Grain, Marble, Ice Blue, etc. Each cue has different attributes and effects, such as Aim, Force, Time, Spin, etc. You can also unlock special cues with unique features, such as Legendary Cues, VIP Cues, Country Cues, etc.</p>
|
40 |
-
<p>You can also choose from different balls to play with. Some of the balls have different colors and patterns, such as Stripes, Solids, Stars, etc. Some of the balls have special effects, such as Fireworks, Snowflakes, Lightning, etc. You can also unlock exclusive balls with unique features, such as Golden Shot Balls, Surprise Boxes Balls, Scratch and Win Balls, etc.</p>
|
41 |
-
<h3>The customization options, rewards, and challenges in 8 Ball Pool Apkpure</h3>
|
42 |
-
<p>8 Ball Pool Apkpure also allows you to customize your profile and avatar with various options. You can choose from different avatars, such as Animals, Sports, Celebrities, etc. You can also upload your own photo or use your Facebook profile picture. You can also edit your name, country, and status message.</p>
|
43 |
-
<p>8 ball pool apk download latest version<br />
|
44 |
-
8 ball pool mod apk unlimited coins and cash<br />
|
45 |
-
8 ball pool hack apk no root<br />
|
46 |
-
8 ball pool old version apk free download<br />
|
47 |
-
8 ball pool offline apk for android<br />
|
48 |
-
8 ball pool apk pure app store<br />
|
49 |
-
8 ball pool online multiplayer apk<br />
|
50 |
-
8 ball pool rewards apk download<br />
|
51 |
-
8 ball pool legendary cues mod apk<br />
|
52 |
-
8 ball pool guideline hack apk<br />
|
53 |
-
8 ball pool instant reward apk<br />
|
54 |
-
8 ball pool long line mod apk<br />
|
55 |
-
8 ball pool tool pro apk<br />
|
56 |
-
8 ball pool cheat engine apk<br />
|
57 |
-
8 ball pool generator apk no human verification<br />
|
58 |
-
8 ball pool unlimited money apk<br />
|
59 |
-
8 ball pool beta version apk download<br />
|
60 |
-
8 ball pool auto win mod apk<br />
|
61 |
-
8 ball pool aimbot apk download<br />
|
62 |
-
8 ball pool avatar hd apk<br />
|
63 |
-
8 ball pool all cues unlocked mod apk<br />
|
64 |
-
8 ball pool anti ban mod apk<br />
|
65 |
-
8 ball pool best mod apk download<br />
|
66 |
-
8 ball pool by miniclip apk download<br />
|
67 |
-
8 ball pool cracked version apk download<br />
|
68 |
-
8 ball pool coin hack apk download<br />
|
69 |
-
8 ball pool cue hack apk download<br />
|
70 |
-
8 ball pool cash hack apk download<br />
|
71 |
-
8 ball pool diamond cue mod apk download<br />
|
72 |
-
8 ball pool extended stick guideline mod apk download<br />
|
73 |
-
8 ball pool free coins and cash generator apk download<br />
|
74 |
-
8 ball pool free legendary cues mod apk download<br />
|
75 |
-
8 ball pool full unlocked mod apk download<br />
|
76 |
-
8 ball pool game guardian script hack apk download<br />
|
77 |
-
8 ball pool golden break mod apk download<br />
|
78 |
-
8 ball pool guideline tool pro modded cracked patched unlocked premium full hack cheat version app game android latest update free download install play online offline no root require apkpure.com[^1^]<br />
|
79 |
-
8 ball pool hack version unlimited money and cash apkpure.com[^1^]<br />
|
80 |
-
8 ball pool instant win modded cracked patched unlocked premium full hack cheat version app game android latest update free download install play online offline no root require apkpure.com[^1^]<br />
|
81 |
-
8 ball pool king cue modded cracked patched unlocked premium full hack cheat version app game android latest update free download install play online offline no root require apkpure.com[^1^]<br />
|
82 |
-
8 ball pool long line hack modded cracked patched unlocked premium full cheat version app game android latest update free download install play online offline no root require apkpure.com[^1^]<br />
|
83 |
-
8 ball pool mega mod unlimited everything apkpure.com[^1^]<br />
|
84 |
-
8 ball pool new update modded cracked patched unlocked premium full hack cheat version app game android latest update free download install play online offline no root require apkpure.com[^1^]<br />
|
85 |
-
8 ball pool old version modded cracked patched unlocked premium full hack cheat version app game android latest update free download install play online offline no root require apkpure.com[^1^]<br />
|
86 |
-
8 ball pool pro membership modded cracked patched unlocked premium full hack cheat version app game android latest update free download install play online offline no root require apkpure.com[^1^]<br />
|
87 |
-
8 ball pool quick fire mode modded cracked patched unlocked premium full hack cheat version app game android latest update free download install play online offline no root require apkpure.com[^1^]</p>
|
88 |
-
<p>As you play 8 Ball Pool Apkpure, you can also earn various rewards and complete various challenges. You can earn coins and cash by winning matches, tournaments, and mini-games. You can also earn trophies by ranking up in the leaderboard. You can also earn pool passes by completing daily missions and seasonal events. You can also earn free gifts by logging in daily, watching videos, inviting friends, etc.</p>
|
89 |
-
<p>You can also take on different challenges in 8 Ball Pool Apkpure to test your skills and win more rewards. You can play in the <em>Spin and Win</em> mini-game to win coins, cash, cues, balls, and other prizes. You can play in the <em>Hi-Lo</em> mini-game to guess the outcome of a coin toss and win coins. You can play in the <em>Golden Shot</em> mini-game to hit the golden ball and win coins, cash, cues, balls, and other prizes. You can also join the <em>Clubs</em> feature to create or join a club with other players and compete for club points and rewards.</p>
|
90 |
-
<h2>What are the rules of 8 Ball Pool Apkpure?</h2>
|
91 |
-
<h3>The basic rules of 8 Ball Pool, such as legal break, object balls, pocketing the 8 ball, and fouls</h3>
|
92 |
-
<p>The basic rules of 8 Ball Pool are simple and easy to learn. Here are some of them:</p>
|
93 |
-
<ul>
|
94 |
-
<li>The game is played with 15 object balls (numbered 1 to 15) and a cue ball (white).</li>
|
95 |
-
<li>The object balls are divided into two groups: solids (numbered 1 to 7) and stripes (numbered 9 to 15). The 8 ball (black) is the most important ball in the game.</li>
|
96 |
-
<li>The game starts with a break shot, where the player hits the cue ball into the rack of object balls. The break shot must be legal, which means that at least four object balls must hit a cushion or a ball must be pocketed.</li>
|
97 |
-
<li>After the break shot, the player who pockets a ball or has a legal break shot gets to choose which group of balls they want to play: solids or stripes. The player must then try to pocket all their group of balls before their opponent.</li>
|
98 |
-
<li>The player who pockets all their group of balls first gets to shoot for the 8 ball. The player must call the pocket where they intend to pocket the 8 ball before shooting. The player who pockets the 8 ball legally wins the game.</li>
|
99 |
-
<li>If a player commits a foul during the game, their turn ends and their opponent gets ball in hand, which means they can place the cue ball anywhere on the table for their next shot. Some common fouls are: hitting the wrong group of balls first; not hitting any ball; hitting the cue ball off the table; pocketing the cue ball; pocketing the 8 ball before clearing their group of balls; pocketing the 8 ball in the wrong pocket; or pocketing the 8 ball when it is not their turn.</li>
|
100 |
-
</ul>
|
101 |
-
<h3>The different variations of 8 Ball Pool rules, such as WPA, APA, VNEA, and BCAPL</h3>
|
102 |
-
<p>While the basic rules of 8 Ball Pool are generally the same, there are some variations of the rules that are used by different organizations and tournaments. Here are some of them:</p>
|
103 |
-
<ul>
|
104 |
-
<li><strong>WPA</strong>: The World Pool-Billiard Association is the international governing body of pool. The WPA rules are the official rules of 8 Ball Pool for international competitions. Some of the WPA rules are: the break shot must be taken from behind the head string; if no ball is pocketed on the break shot, the incoming player can choose to play from where the cue ball lies or ask for a re-rack; if a player pockets a ball on the break shot, they can either accept that group of balls or continue to shoot until they miss or foul; if a player pockets both a solid and a stripe on the break shot, they can choose which group of balls they want to play; if a player pockets the 8 ball on the break shot, they can either win the game or ask for a re-rack.</li>
|
105 |
-
<li><strong>APA</strong>: The American Poolplayers Association is the largest amateur pool league in the world. The APA rules are the most common rules of 8 Ball Pool for recreational and league play in the United States. Some of the APA rules are: the break shot can be taken from anywhere behind the head string; if no ball is pocketed on the break shot, the table is open for both players; if a player pockets a ball on the break shot, they must shoot at that group of balls until they miss or foul; if a player pockets both a solid and a stripe on the break shot, they must shoot at either group of balls until they miss or foul; if a player pockets the 8 ball on the break shot, they win the game.</li>
|
106 |
-
<li><strong>VNEA</strong>: The Valley National Eight-Ball Association is one of the largest pool organizations in North America. The VNEA rules are similar to the APA rules, but with some differences. Some of the VNEA rules are: if no ball is pocketed on the break shot, the table is open for both players; if a player pockets a ball on the break shot, they must shoot at that group of balls until they miss or foul; if a player pockets both a solid and a stripe on the break shot, they must shoot at either group of balls until they miss or foul; if a player pockets the 8 ball on the break shot, they win the game; however, if a player scratches (pockets the cue ball) on the break shot, they lose the game.</li>
|
107 |
-
<li><strong>BCAPL</strong>: The Billiard Congress of America Pool League is another large pool organization in North America. The BCAPL rules are similar to the WPA rules, but with some differences. Some of the BCAPL rules are: the break shot must be taken from behind the head string; if no ball is pocketed on the break shot, the incoming player can choose to play from where the cue ball lies or push out to a new position; if a player pockets a ball on the break shot, they can either accept that group of balls or continue to shoot until they miss or foul; if a player pockets both a solid and a stripe on the break shot, they can choose which group of balls they want to play; if a player pockets the 8 ball on the break shot, they can either win the game or ask for a re-rack.</li>
|
108 |
-
</ul>
|
109 |
-
<h3>The tips and tricks to improve your skills and win more matches in 8 Ball Pool Apkpure</h3>
|
110 |
-
<p>8 Ball Pool Apkpure is a game that requires both skill and strategy to win. Here are some tips and tricks to help you improve your game and beat your opponents:</p>
|
111 |
-
<ul>
|
112 |
-
<li>Practice your aim and power. You can use the aiming lines to help you align your shots, but you also need to adjust your power according to the distance and angle of the shot. You can practice your aim and power in the <em>Practice</em> mode or by using the <em>Guideline in All Rooms</em> option in the settings.</li>
|
113 |
-
<li>Use spin wisely. You can use spin to change the direction and speed of the cue ball after it hits an object ball. You can use spin to avoid scratches, position the cue ball for your next shot, or make tricky shots. You can apply spin by using the spin wheel on the bottom right corner of the screen.</li>
|
114 |
-
<li>Plan ahead. You should always think ahead and plan your shots before you shoot. You should consider which balls are easy or hard to pocket, which pockets are open or blocked, and which shots will leave you with a good or bad position for your next shot. You should also try to clear any clusters or obstacles as soon as possible.</li>
|
115 |
-
<li>Play smart. You should always play according to your skill level and your opponent's skill level. You should not take unnecessary risks or try to show off. You should also know when to play defensively or offensively, depending on the situation. You should also use the chat and emoji features to communicate with your opponent and show respect or sportsmanship.</li>
|
116 |
-
</ul>
|
117 |
-
<h2>Conclusion</h2>
|
118 |
-
<p>8 Ball Pool Apkpure is a fun and exciting game that lets you play pool with players from all over the world. You can download and install it on your Windows PC with Gameloop emulator, and enjoy its features, rules, and challenges. You can also improve your skills and win more matches with some tips and tricks. So what are you waiting for? Download 8 Ball Pool Apkpure today and join the millions of pool lovers who play this game every day!</p>
|
119 |
-
<h2>FAQs</h2>
|
120 |
-
<h3>Q1: Is 8 Ball Pool Apkpure safe and legal?</h3>
|
121 |
-
<p>A1: Yes, 8 Ball Pool Apkpure is safe and legal to download and play. Apkpure is a reputable website that verifies all its apps and games for safety and quality. Gameloop is also a trusted emulator that does not contain any malware or viruses. However, you should always download 8 Ball Pool Apkpure from its official website or Gameloop app store, and not from any third-party sources.</p>
|
122 |
-
<h3>Q2: How can I play 8 Ball Pool Apkpure with my friends?</h3>
|
123 |
-
<p>A2: You can play 8 Ball Pool Apkpure with your friends by using the <em>Play with Friends</em> feature in the game. You can invite your friends by using their unique ID, Facebook account, or Miniclip account. You can also join or create a club with your friends and chat with them in the club chat room.</p>
|
124 |
-
<h3>Q3: How can I earn more coins and cash in 8 Ball Pool Apkpure?</h3>
|
125 |
-
<p>A3: You can earn more coins and cash in 8 Ball Pool Apkpure by winning matches, tournaments, and mini-games. You can also earn coins and cash by completing daily missions, seasonal events, pool passes, and achievements. You can also earn free coins and cash by logging in daily, watching videos, inviting friends, etc. You can also buy coins and cash with real money if you want to.</p>
|
126 |
-
<h3>Q4: How can I upgrade my cue and pool table in 8 Ball Pool Apkpure?</h3>
|
127 |
-
<p>A4: You can upgrade your cue and pool table in 8 Ball Pool Apkpure by using coins or cash. You can buy new cues and tables from the shop, or unlock them from surprise boxes, golden shots, scratch and win, etc. You can also upgrade your cues by using cash or cue pieces. You can improve the attributes and effects of your cues by leveling them up. You can also change the design and color of your cues and tables by using coins or cash.</p>
|
128 |
-
<h3>Q5: How can I contact the support team of 8 Ball Pool Apkpure?</h3>
|
129 |
-
<p>A5: You can contact the support team of 8 Ball Pool Apkpure by using the <em>Help and Support</em> feature in the game. You can access it by clicking on the <em>Settings</em> icon on the top right corner of the screen, and then clicking on the <em>Help and Support</em> button. You can then browse through the frequently asked questions, or submit a ticket to the support team. You can also contact the support team by sending an email to <a href="mailto:[email protected]">[email protected]</a>.</p> 197e85843d<br />
|
130 |
-
<br />
|
131 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Darah Tak Terbatas and Unlimited Money in Hungry Shark World Get Mod Apk Here.md
DELETED
@@ -1,115 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download Hungry Shark World Mod Apk Unlimited Blood and Enjoy the Ultimate Shark Experience</h1>
|
3 |
-
<p>Do you love sharks? Do you love eating everything in your path? Do you love action-packed games with stunning graphics and sound effects? If you answered yes to any of these questions, then you will love Hungry Shark World, a thrilling game where you control a hungry shark and eat everything that gets in your way. But wait, there's more! You can also download Hungry Shark World Mod Apk Unlimited Blood, a modified version of the game that gives you unlimited blood, coins, gems, and all sharks unlocked. Sounds awesome, right? Read on to find out more about this amazing game and how to download it.</p>
|
4 |
-
<h2>What is Hungry Shark World?</h2>
|
5 |
-
<h3>A thrilling action game where you control a hungry shark</h3>
|
6 |
-
<p>Hungry Shark World is an addictive action game where you control a shark and eat everything in your path. The objective is to survive as long as you can and devour as much prey as possible to score tons of points before you eventually die. True to its name, your hungry shark is constantly losing HP due to its insatiable hunger and must continue to eat in order to stay alive. However, the sea is riddled with plenty of hazards and hostile fish that don't just let themselves get eaten, so you have to play smart in order to score high.</p>
|
7 |
-
<h2>download hungry shark world mod apk darah tak terbatas</h2><br /><p><b><b>DOWNLOAD</b> ○○○ <a href="https://jinyurl.com/2uNP3O">https://jinyurl.com/2uNP3O</a></b></p><br /><br />
|
8 |
-
<h3>Features of Hungry Shark World</h3>
|
9 |
-
<h4>Over 40 different sharks to unlock and upgrade</h4>
|
10 |
-
<p>One of the best features of Hungry Shark World is that it offers a wide variety of sharks to choose from. You can start with a small porbeagle shark and work your way up to bigger and badder sharks like the great white, hammerhead, megalodon, or even a prehistoric mosasaurus. Each shark has its own stats, abilities, appearance, and personality. You can also upgrade your sharks by spending coins and gems on their bite, speed, boost, or health.</p>
|
11 |
-
<h4>Four stunning locations to explore and devour</h4>
|
12 |
-
<p>Hungry Shark World features four different locations that you can travel to as you progress in the game. Each location has its own theme, scenery, <p>challenges, and secrets. You can explore the Pacific Islands, the Arabian Sea, the South China Sea, and the Arctic Ocean. Each location has its own unique creatures, landmarks, and events that you can discover and enjoy. You can also switch between locations at any time by using the map.</p>
|
13 |
-
<h4>Hundreds of enemies and prey to eat and collect</h4>
|
14 |
-
<p>As a hungry shark, you have a lot of options when it comes to your diet. You can eat fish, crabs, turtles, squid, octopus, dolphins, whales, seals, penguins, birds, humans, and more. Each prey has its own value and effect on your shark. Some prey will give you more points, some will heal you more, some will boost your speed or power, and some will even unlock new items or achievements. However, not all prey are easy to catch or harmless. Some prey will fight back, some will poison you, some will explode, and some will even damage your shark. You have to be careful and choose wisely what you eat.</p>
|
15 |
-
<h4>Daily chests, missions, and achievements to earn rewards</h4>
|
16 |
-
<p>Hungry Shark World also offers plenty of ways to earn rewards and bonuses in the game. You can find daily chests that contain coins, gems, or items. You can complete missions that challenge you to perform certain tasks or feats. You can also unlock achievements that reward you for reaching milestones or doing something extraordinary. These rewards will help you buy and upgrade sharks and accessories, as well as unlock new features and content in the game.</p>
|
17 |
-
<h2>Why Download Hungry Shark World Mod Apk Unlimited Blood?</h2>
|
18 |
-
<h3>Benefits of Hungry Shark World Mod Apk Unlimited Blood</h3>
|
19 |
-
<h4>Unlimited blood to survive longer and score higher</h4>
|
20 |
-
<p>The main benefit of downloading Hungry Shark World Mod Apk Unlimited Blood is that you get unlimited blood for your shark. This means that you don't have to worry about losing HP due to hunger or damage. You can survive longer and eat more without dying. This will allow you to score higher and reach new levels of fun and excitement in the game.</p>
|
21 |
-
<h4>Unlimited coins and gems to buy and upgrade sharks and accessories</h4>
|
22 |
-
<p>Another benefit of downloading Hungry Shark World Mod Apk Unlimited Blood is that you get unlimited coins and gems for your shark. This means that you don't have to grind or spend real money to buy and upgrade sharks and accessories. You can buy any shark you want from the start and upgrade it to the max without any limitations. You can also buy any accessory you want from the shop and equip it to your shark for extra benefits. This will make your shark more powerful and stylish in the game.</p>
|
23 |
-
<h4>All sharks unlocked and available from the start</h4>
|
24 |
-
<p>A third benefit of downloading Hungry Shark World Mod Apk Unlimited Blood is that you get all sharks unlocked and available from the start. This means that you don't have to play for hours or complete certain requirements to unlock new sharks in the game. You can choose any shark you want from the start and switch between them at any time by using the map. This will give you more variety and freedom in the game.</p>
|
25 |
-
<h4>No ads or in-app purchases to interrupt your gameplay</h4>
|
26 |
-
<p>A fourth benefit of downloading Hungry Shark World Mod Apk Unlimited Blood is that you get no ads or in-app purchases to interrupt your gameplay. This means that you don't have to watch annoying ads or pay real money to enjoy the game fully. You can play without any distractions or interruptions in the game.</p>
|
27 |
-
<p>galaxy shooter mod apk unlimited money<br />
|
28 |
-
galaxy shooter mod apk download<br />
|
29 |
-
galaxy shooter mod apk latest version<br />
|
30 |
-
galaxy shooter mod apk android 1<br />
|
31 |
-
galaxy shooter mod apk invader war<br />
|
32 |
-
galaxy shooter mod apk hack<br />
|
33 |
-
galaxy shooter mod apk free shopping<br />
|
34 |
-
galaxy shooter mod apk offline<br />
|
35 |
-
galaxy shooter mod apk 2023<br />
|
36 |
-
galaxy shooter mod apk space attack<br />
|
37 |
-
galaxy shooter mod apk rexdl<br />
|
38 |
-
galaxy shooter mod apk revdl<br />
|
39 |
-
galaxy shooter mod apk no ads<br />
|
40 |
-
galaxy shooter mod apk unlimited gems<br />
|
41 |
-
galaxy shooter mod apk unlimited coins<br />
|
42 |
-
galaxy shooter mod apk premium<br />
|
43 |
-
galaxy shooter mod apk pro<br />
|
44 |
-
galaxy shooter mod apk full version<br />
|
45 |
-
galaxy shooter mod apk unlocked<br />
|
46 |
-
galaxy shooter mod apk all ships<br />
|
47 |
-
galaxy shooter mod apk all levels<br />
|
48 |
-
galaxy shooter mod apk all weapons<br />
|
49 |
-
galaxy shooter mod apk all bosses<br />
|
50 |
-
galaxy shooter mod apk mega mod<br />
|
51 |
-
galaxy shooter mod apk god mode<br />
|
52 |
-
galaxy shooter mod apk infinite energy<br />
|
53 |
-
galaxy shooter mod apk unlimited lives<br />
|
54 |
-
galaxy shooter mod apk unlimited stars<br />
|
55 |
-
galaxy shooter mod apk unlimited gold<br />
|
56 |
-
galaxy shooter mod apk unlimited crystals<br />
|
57 |
-
galaxy shooter mod apk unlimited diamonds<br />
|
58 |
-
galaxy shooter mod apk unlimited power-ups<br />
|
59 |
-
galaxy shooter mod apk unlimited missiles<br />
|
60 |
-
galaxy shooter mod apk unlimited bombs<br />
|
61 |
-
galaxy shooter mod apk unlimited bullets<br />
|
62 |
-
galaxy shooter mod apk unlimited lasers<br />
|
63 |
-
galaxy shooter mod apk unlimited rockets<br />
|
64 |
-
galaxy shooter mod apk unlimited shields<br />
|
65 |
-
galaxy shooter mod apk unlimited boosters<br />
|
66 |
-
galaxy shooter mod apk unlimited drones<br />
|
67 |
-
galaxy shooter mod apk high damage<br />
|
68 |
-
galaxy shooter mod apk one hit kill<br />
|
69 |
-
galaxy shooter mod apk no root<br />
|
70 |
-
galaxy shooter mod apk no verification<br />
|
71 |
-
galaxy shooter mod apk no survey<br />
|
72 |
-
galaxy shooter mod apk for pc<br />
|
73 |
-
galaxy shooter mod apk for ios<br />
|
74 |
-
galaxy shooter mod apk for iphone<br />
|
75 |
-
galaxy shooter mod apk for ipad</p>
|
76 |
-
<h3>How to Download Hungry Shark World Mod Apk Unlimited Blood</h3>
|
77 |
-
<h4>Step 1: Click on the link below to download the mod apk file</h4>
|
78 |
-
<p>The first step to download Hungry Shark World Mod Apk Unlimited Blood is to click on the link below to download the mod apk file. The link will take you to a secure site where you can download the file safely and easily.</p>
|
79 |
-
<h4>Step 2: Allow unknown sources on your device settings</h4>
|
80 |
-
<p>The second step to download Hungry Shark World Mod Apk Unlimited Blood is to allow unknown sources on your device settings. This will enable you to install apps from sources other than the Google Play Store. To do this, go to your device settings > security > unknown sources > enable.</p>
|
81 |
-
<h4>Step 3: Install the mod apk file and launch the game</h4>
|
82 |
-
<p>The third step to download Hungry Shark World Mod Apk Unlimited Blood is to install the mod apk file and launch the game. To do this, go to your file manager > downloads > hungry-shark-world-mod-apk-unlimited-blood.apk > install > open.</p>
|
83 |
-
<h4>Step 4: Enjoy the ultimate shark experience with unlimited blood and resources</h4>
|
84 |
-
<p>The fourth and final step to download Hungry Shark World Mod Apk Unlimited Blood is to enjoy the ultimate shark experience with unlimited blood and resources. You can now play the game with no limitations or interruptions and have fun as a hungry shark. You can eat everything in your path, unlock and upgrade all sharks and accessories, explore all locations, and score higher than ever before.</p>
|
85 |
-
<h2>Tips and Tricks for Playing Hungry Shark World</h2>
|
86 |
-
<h3>Use your boost wisely to catch prey and avoid enemies</h3>
|
87 |
-
<p>One of the tips and tricks for playing Hungry Shark World is to use your boost wisely to catch prey and avoid enemies. Your boost is a powerful tool that can help you speed up, jump out of the water, or perform special attacks. However, your boost also consumes your stamina, which regenerates slowly over time. Therefore, you should use your boost sparingly and strategically, depending on the situation. For example, you can use your boost to catch fast or fleeing prey, to escape from dangerous enemies or obstacles, or to reach hidden areas or items.</p>
|
88 |
-
<h3>Buy the map to find hidden items and locations</h3>
|
89 |
-
<p>Another tip and trick for playing Hungry Shark World is to buy the map to find hidden items and locations. The map is an accessory that you can buy from the shop for 500 coins. The map will show you the layout of the location you are in, as well as the locations of chests, missions, pets, enemies, and more. The map will also show you the boundaries of the location and the portals to other locations. The map is very useful for finding secrets and completing objectives in the game.</p>
|
90 |
-
<h3>Recruit pets to help you in your journey</h3>
|
91 |
-
<p>A third tip and trick for playing Hungry Shark World is to recruit pets to help you in your journey. Pets are small creatures that you can find and collect in the game. Each pet has its own ability and effect that can benefit your shark. For example, some pets can heal you, some can attack enemies, some can collect coins or gems, some can boost your stats, and some can even unlock new features or content. You can equip up to three pets at a time and switch between them at any time by using the map.</p>
|
92 |
-
<h3>Avoid dangerous creatures like jellyfish, pufferfish, lionfish, giant squids, etc.</h3>
|
93 |
-
<p>A fourth tip and trick for playing Hungry Shark World is to avoid dangerous creatures like jellyfish, pufferfish, lionfish, giant squids, etc. These creatures are not only hard to eat but also harmful to your shark. They can poison you, stun you, damage you, or even kill you instantly. You should steer clear of these creatures unless you have a pet or an accessory that can protect you from them.</p>
|
94 |
-
<h3>Eat humans on the surface and complete missions for extra points</h3>
|
95 |
-
<p>A fifth tip and trick for playing Hungry Shark World is to eat humans on the surface and complete missions for extra points. Humans are one of the most valuable prey in the game as they give you a lot of points and sometimes coins or gems. You can find humans on beaches, boats, jet skis, helicopters, balloons, etc. You can also jump out of the water and grab them in mid-air. However, be careful as some humans will fight back with weapons or call for help from other humans or military forces. You should also complete missions that involve eating humans as they will give you bonus points and rewards.</p>
|
96 |
-
<h2>Conclusion</h2>
|
97 |
-
<h3>Hungry Shark World is an addictive and fun game that lets you experience life as a shark</h3>
|
98 |
-
<p>In conclusion, Hungry Shark World is an addictive and fun game that lets you experience life as a shark. You can control a hungry shark and eat everything in your path while avoiding dangers and obstacles. You can also unlock and upgrade over 40 different sharks and explore four stunning locations in the game.</p>
|
99 |
-
<h3>Download Hungry Shark World Mod Apk Unlimited Blood to enjoy the game without any limitations or interruptions</h3>
|
100 |
-
<p>If you want to enjoy the game without any limitations or interruptions, you should download Hungry Shark World Mod Apk Unlimited Blood. This mod apk will give you unlimited blood, coins, gems, and all sharks unlocked in the game. You can play the game with no worries about dying or running out of resources. You can also buy and upgrade any shark or accessory you want from the start.</p>
|
101 |
-
<h3>Follow the tips and tricks above to maximize your score and become the king of the ocean</h3>
|
102 |
-
<p>If you want to maximize your score and become the king of the ocean, you should follow the tips and tricks above. These tips and tricks will help you play smarter and better in the game. You will be able to catch more prey and avoid enemies, find hidden items and locations, recruit pets and accessories, eat humans and complete missions, and use your boost wisely in the game.</p>
|
103 |
-
<h2>FAQs</h2>
|
104 |
-
<h3>Q: What is the difference between Hungry Shark World and Hungry Shark Evolution?</h3>
|
105 |
-
<p>A: Hungry Shark World is the sequel to Hungry Shark Evolution, a popular game that was released in 2012. Hungry Shark World has improved graphics, sound effects, gameplay, and features compared to Hungry Shark Evolution. Hungry Shark World also has more sharks, locations, enemies, prey, items, and content than Hungry Shark Evolution.</p>
|
106 |
-
<h3>Q: Is Hungry Shark World Mod Apk Unlimited Blood safe to download and install?</h3>
|
107 |
-
<p>A: Yes, Hungry Shark World Mod Apk Unlimited Blood is safe to download and install. The mod apk file is scanned and tested for viruses and malware before being uploaded to the site. The mod apk file also does not require any root or jailbreak to work on your device.</p>
|
108 |
-
<h3>Q: How can I update Hungry Shark World Mod Apk Unlimited Blood?</h3>
|
109 |
-
<p>A: To update Hungry Shark World Mod Apk Unlimited Blood, you have to download and install the latest version of the mod apk file from the same site. You can also check the site regularly for any updates or new features added to the mod apk file.</p>
|
110 |
-
<h3>Q: Can I play Hungry Shark World Mod Apk Unlimited Blood online or offline?</h3>
|
111 |
-
<p>A: You can play Hungry Shark World Mod Apk Unlimited Blood both online and offline. However, some features and content may require an internet connection to work properly. For example, you may need an internet connection to access the daily chests, missions, achievements, leaderboards, or events in the game.</p>
|
112 |
-
<h3>Q: Can I play Hungry Shark World Mod Apk Unlimited Blood with my friends or other players?</h3>
|
113 |
-
<p>A: Yes, you can play Hungry Shark World Mod Apk Unlimited Blood with your friends or other players. You can connect your game to Facebook or Google Play Games and invite your friends or other players to join you in the game. You can also compete with them on the leaderboards or cooperate with them on the events in the game.</p> 401be4b1e0<br />
|
114 |
-
<br />
|
115 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/232labs/VToonify/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: VToonify
|
3 |
-
emoji: 👨🎨
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.4
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: other
|
11 |
-
duplicated_from: PKUWilliamYang/VToonify
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2gauravc/search_summary_chatgpt/app.py
DELETED
@@ -1,104 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import openai
|
3 |
-
import sys, getopt
|
4 |
-
from datetime import datetime
|
5 |
-
from streamlit.components.v1 import html
|
6 |
-
import boto3
|
7 |
-
|
8 |
-
from main import chatgpt_prompt, get_chatgpt_resp, generate_kyc_output, gsearch, save_to_s3
|
9 |
-
|
10 |
-
# Function to perform the search
|
11 |
-
# This is a placeholder function, replace it with your actual search implementation
|
12 |
-
def perform_search(pname, keywords, num_results):
|
13 |
-
# record current timestamp
|
14 |
-
start_time = datetime.now()
|
15 |
-
|
16 |
-
# Google search for the person name and get the first 20 query links
|
17 |
-
query = pname + " " + keywords
|
18 |
-
search_links = gsearch(query, num_results)
|
19 |
-
|
20 |
-
# Construct the prompt
|
21 |
-
prompt_text = chatgpt_prompt(pname, search_links)
|
22 |
-
#get ChatGPT response
|
23 |
-
resp = get_chatgpt_resp(prompt_text)
|
24 |
-
# Create PDF with links and summary
|
25 |
-
rep_txt= generate_kyc_output(query, search_links, resp, start_time)
|
26 |
-
return (rep_txt)
|
27 |
-
|
28 |
-
main_tab, help_tab, rel_tab = st.tabs(["Run the Bot", "FAQ", "Release Plan"])
|
29 |
-
|
30 |
-
with main_tab:
|
31 |
-
# Streamlit app
|
32 |
-
st.title("Adverse News Detection Assistant")
|
33 |
-
|
34 |
-
# Input fields
|
35 |
-
names_txt = st.text_input("Enter party name (or multiple names separated by ,)")
|
36 |
-
plc_text = "laundering OR terrorist OR fraud OR corrupt OR criminal OR investigation OR prosecute OR evasion OR bribe OR sanction"
|
37 |
-
keywords = st.text_input("Enter other search words:", value=plc_text)
|
38 |
-
|
39 |
-
st.sidebar.markdown("## Controls")
|
40 |
-
st.sidebar.markdown("Choose your **search** *parameters*")
|
41 |
-
num_results = st.sidebar.slider("Choose the number of search results:", 5, 30, 20, 5)
|
42 |
-
st.sidebar.markdown("## Model")
|
43 |
-
st.sidebar.markdown("GPT v3.5")
|
44 |
-
st.sidebar.markdown("## App")
|
45 |
-
st.sidebar.markdown("v0.4")
|
46 |
-
|
47 |
-
col1, col2 = st.columns(2)
|
48 |
-
with col1:
|
49 |
-
adv_nw = st.radio(
|
50 |
-
"Did you find adverse news when you performed this search manually",
|
51 |
-
('Yes', 'No', 'Dont Know'), index=2)
|
52 |
-
with col2:
|
53 |
-
#st.markdown("Touch time (manual) in mins")
|
54 |
-
man_tt = st.number_input('Touch time (manual) in mins', value=0, step=1)
|
55 |
-
#st.markdown("Touch time (with bot) in mins")
|
56 |
-
bot_tt = st.number_input('Touch time (with bot) in mins', value=0, step=1)
|
57 |
-
|
58 |
-
# Search button
|
59 |
-
if st.button("Search"):
|
60 |
-
names = names_txt.split(",")
|
61 |
-
#print(len(names))
|
62 |
-
metrics_ent = (adv_nw != "Dont Know") and (man_tt > 0) and (bot_tt > 0)
|
63 |
-
# Perform the search and display the results
|
64 |
-
if names and metrics_ent:
|
65 |
-
search_results = ""
|
66 |
-
for name in names:
|
67 |
-
#print("trying for name {} \n".format(name))
|
68 |
-
search_results += perform_search(name, keywords, num_results)
|
69 |
-
|
70 |
-
html(f"<pre>{search_results}</pre>", height=200, scrolling=True)
|
71 |
-
st.download_button('Download Report',search_results)
|
72 |
-
try:
|
73 |
-
date_time = datetime.now()
|
74 |
-
save_to_s3(search_results,date_time )
|
75 |
-
print ("Completed processing for {} names: {} at {} \n".format(len(names), names_txt, str(date_time)))
|
76 |
-
except:
|
77 |
-
print ("Completed processing with S3 write error for {} names: {} at {} \n".format(len(names),names_txt, str(date_time)))
|
78 |
-
else:
|
79 |
-
st.error("Please enter party name, adverse news selection (Yes or No) and Touch Time before searching.")
|
80 |
-
|
81 |
-
with help_tab:
|
82 |
-
st.title("FAQ")
|
83 |
-
|
84 |
-
st.markdown("Q. How do I get a count of number of adverse news?")
|
85 |
-
st.markdown("A. This functionality isnt implemented yet. A workaround is to manually count the number of links with adverse news")
|
86 |
-
|
87 |
-
st.markdown("Q. How do I summarise all the adverse news?")
|
88 |
-
st.markdown("A. This functionality isnt implemented yet. A workaround is to aggregate the summary of all adverse news items manually, and get a sumary from ChatGPT (chat.openai.com")
|
89 |
-
|
90 |
-
st.markdown("Q. Can I search in other lauguages?")
|
91 |
-
st.markdown("A. This functionality isnt implemented yet. We are planning to test this feature out with Chinese first")
|
92 |
-
|
93 |
-
st.markdown("Q. Can I search without the other search words?")
|
94 |
-
st.markdown("A. Just enter a blank space in the text space and search")
|
95 |
-
|
96 |
-
with rel_tab:
|
97 |
-
st.markdown(f"""
|
98 |
-
| NO. | Issue / Enhancement | Rel | Status |
|
99 |
-
|-----|--------------------------------------------------------------------------------------------------------------------------------------------|-----|-----------|
|
100 |
-
| 1 | Capture productivity and adverse news metrics from the user | 0.4 | Completed |
|
101 |
-
| 2 | Save productivity and adverse news metrics in a DB | 0.4 | TBD |
|
102 |
-
| 3 | Convert bot output to structured JSON - Count of adverse news - Summary of all adverse news - Identification of links with adverse news | 0.6 | TBD |
|
103 |
-
| 4 | Offer alternate solution path with web text scraping and | 0.6 | TBD |
|
104 |
-
| 5 | Create a page on metrics report | 0.5 | TBD |""")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIML-TUDA/semantic-diffusion/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Semantic Diffusion
|
3 |
-
emoji: ⚡
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.18.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: creativeml-openrail-m
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AP123/dreamgaussian/readme.md
DELETED
@@ -1,120 +0,0 @@
|
|
1 |
-
# DreamGaussian
|
2 |
-
|
3 |
-
This repository contains the official implementation for [DreamGaussian: Generative Gaussian Splatting for Efficient 3D Content Creation]().
|
4 |
-
|
5 |
-
### [Project Page](https://dreamgaussian.github.io) | [Arxiv]()
|
6 |
-
|
7 |
-
|
8 |
-
https://github.com/dreamgaussian/dreamgaussian/assets/25863658/db860801-7b9c-4b30-9eb9-87330175f5c8
|
9 |
-
|
10 |
-
|
11 |
-
## Install
|
12 |
-
```bash
|
13 |
-
pip install -r requirements.txt
|
14 |
-
|
15 |
-
# a modified gaussain splatting (+ depth, alpha rendering)
|
16 |
-
git clone --recursive https://github.com/ashawkey/diff-gaussian-rasterization
|
17 |
-
pip install ./diff-gaussian-rasterization
|
18 |
-
|
19 |
-
# simple-knn
|
20 |
-
pip install ./simple-knn
|
21 |
-
|
22 |
-
# nvdiffrast
|
23 |
-
pip install git+https://github.com/NVlabs/nvdiffrast/
|
24 |
-
|
25 |
-
# kiuikit
|
26 |
-
pip install git+https://github.com/ashawkey/kiuikit
|
27 |
-
```
|
28 |
-
|
29 |
-
Tested on:
|
30 |
-
* Ubuntu 22 with torch 1.12 & CUDA 11.6 on a V100.
|
31 |
-
* Windows 10 with torch 2.1 & CUDA 12.1 on a 3070.
|
32 |
-
|
33 |
-
## Usage
|
34 |
-
|
35 |
-
Image-to-3D:
|
36 |
-
```bash
|
37 |
-
### preprocess
|
38 |
-
# background removal and recenter, save rgba at 256x256
|
39 |
-
python process.py data/name.jpg
|
40 |
-
|
41 |
-
# save at a larger resolution
|
42 |
-
python process.py data/name.jpg --size 512
|
43 |
-
|
44 |
-
# process all jpg images under a dir
|
45 |
-
python process.py data
|
46 |
-
|
47 |
-
### training gaussian stage
|
48 |
-
# train 500 iters (~1min) and export ckpt & coarse_mesh to logs
|
49 |
-
python main.py --config configs/image.yaml input=data/name_rgba.png save_path=name
|
50 |
-
|
51 |
-
# gui mode (supports visualizing training)
|
52 |
-
python main.py --config configs/image.yaml input=data/name_rgba.png save_path=name gui=True
|
53 |
-
|
54 |
-
# load and visualize a saved ckpt
|
55 |
-
python main.py --config configs/image.yaml load=logs/name_model.ply gui=True
|
56 |
-
|
57 |
-
# use an estimated elevation angle if image is not front-view (e.g., common looking-down image can use -30)
|
58 |
-
python main.py --config configs/image.yaml input=data/name_rgba.png save_path=name elevation=-30
|
59 |
-
|
60 |
-
### training mesh stage
|
61 |
-
# auto load coarse_mesh.obj and refine 50 iters (~1min), export fine_mesh to logs
|
62 |
-
python main2.py --config configs/image.yaml input=data/name_rgba.png save_path=name
|
63 |
-
|
64 |
-
# specify coarse mesh path explicity
|
65 |
-
python main2.py --config configs/image.yaml input=data/name_rgba.png save_path=name mesh=logs/name_mesh.obj
|
66 |
-
|
67 |
-
# gui mode
|
68 |
-
python main2.py --config configs/image.yaml input=data/name_rgba.png save_path=name gui=True
|
69 |
-
|
70 |
-
### visualization
|
71 |
-
# gui for visualizing mesh
|
72 |
-
python -m kiui.render logs/name.obj
|
73 |
-
|
74 |
-
# save 360 degree video of mesh (can run without gui)
|
75 |
-
python -m kiui.render logs/name.obj --save_video name.mp4 --wogui
|
76 |
-
|
77 |
-
# save 8 view images of mesh (can run without gui)
|
78 |
-
python -m kiui.render logs/name.obj --save images/name/ --wogui
|
79 |
-
|
80 |
-
### evaluation of CLIP-similarity
|
81 |
-
python -m kiui.cli.clip_sim data/name_rgba.png logs/name.obj
|
82 |
-
```
|
83 |
-
Please check `./configs/image.yaml` for more options.
|
84 |
-
|
85 |
-
Text-to-3D:
|
86 |
-
```bash
|
87 |
-
### training gaussian stage
|
88 |
-
python main.py --config configs/text.yaml prompt="a photo of an icecream" save_path=icecream
|
89 |
-
|
90 |
-
### training mesh stage
|
91 |
-
python main2.py --config configs/text.yaml prompt="a photo of an icecream" save_path=icecream
|
92 |
-
```
|
93 |
-
Please check `./configs/text.yaml` for more options.
|
94 |
-
|
95 |
-
Helper scripts:
|
96 |
-
```bash
|
97 |
-
# run all image samples (*_rgba.png) in ./data
|
98 |
-
python scripts/runall.py --dir ./data --gpu 0
|
99 |
-
|
100 |
-
# run all text samples (hardcoded in runall_sd.py)
|
101 |
-
python scripts/runall_sd.py --gpu 0
|
102 |
-
|
103 |
-
# export all ./logs/*.obj to mp4 in ./videos
|
104 |
-
python scripts/convert_obj_to_video.py --dir ./logs
|
105 |
-
```
|
106 |
-
|
107 |
-
## Acknowledgement
|
108 |
-
|
109 |
-
This work is built on many amazing research works and open-source projects, thanks a lot to all the authors for sharing!
|
110 |
-
|
111 |
-
* [gaussian-splatting](https://github.com/graphdeco-inria/gaussian-splatting) and [diff-gaussian-rasterization](https://github.com/graphdeco-inria/diff-gaussian-rasterization)
|
112 |
-
* [threestudio](https://github.com/threestudio-project/threestudio)
|
113 |
-
* [nvdiffrast](https://github.com/NVlabs/nvdiffrast)
|
114 |
-
* [dearpygui](https://github.com/hoffstadt/DearPyGui)
|
115 |
-
|
116 |
-
## Citation
|
117 |
-
|
118 |
-
```
|
119 |
-
|
120 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ASJMO/freegpt/g4f/Provider/Providers/Xiaor.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
import requests
|
2 |
-
import os
|
3 |
-
import json
|
4 |
-
from ...typing import sha256, Dict, get_type_hints
|
5 |
-
|
6 |
-
url = 'https://xiaor.eu.org'
|
7 |
-
model = ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k',
|
8 |
-
'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0613']
|
9 |
-
supports_stream = True
|
10 |
-
needs_auth = False
|
11 |
-
|
12 |
-
|
13 |
-
def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
|
14 |
-
headers = {
|
15 |
-
'Content-Type': 'application/json',
|
16 |
-
}
|
17 |
-
data = {
|
18 |
-
'model': model,
|
19 |
-
'temperature': 0.7,
|
20 |
-
'presence_penalty': 0,
|
21 |
-
'messages': messages,
|
22 |
-
}
|
23 |
-
response = requests.post(url + '/p1/v1/chat/completions',
|
24 |
-
json=data, stream=True)
|
25 |
-
|
26 |
-
if stream:
|
27 |
-
for chunk in response.iter_content(chunk_size=None):
|
28 |
-
chunk = chunk.decode('utf-8')
|
29 |
-
if chunk.strip():
|
30 |
-
message = json.loads(chunk)['choices'][0]['message']['content']
|
31 |
-
yield message
|
32 |
-
else:
|
33 |
-
message = response.json()['choices'][0]['message']['content']
|
34 |
-
yield message
|
35 |
-
|
36 |
-
|
37 |
-
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
38 |
-
'(%s)' % ', '.join(
|
39 |
-
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Phind.py
DELETED
@@ -1,76 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import random
|
4 |
-
from datetime import datetime
|
5 |
-
|
6 |
-
from ..typing import AsyncGenerator
|
7 |
-
from ..requests import StreamSession
|
8 |
-
from .base_provider import AsyncGeneratorProvider, format_prompt
|
9 |
-
|
10 |
-
|
11 |
-
class Phind(AsyncGeneratorProvider):
|
12 |
-
url = "https://www.phind.com"
|
13 |
-
working = True
|
14 |
-
supports_gpt_4 = True
|
15 |
-
|
16 |
-
@classmethod
|
17 |
-
async def create_async_generator(
|
18 |
-
cls,
|
19 |
-
model: str,
|
20 |
-
messages: list[dict[str, str]],
|
21 |
-
proxy: str = None,
|
22 |
-
**kwargs
|
23 |
-
) -> AsyncGenerator:
|
24 |
-
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
|
25 |
-
user_id = ''.join(random.choice(chars) for _ in range(24))
|
26 |
-
data = {
|
27 |
-
"question": format_prompt(messages),
|
28 |
-
"webResults": [],
|
29 |
-
"options": {
|
30 |
-
"date": datetime.now().strftime("%d.%m.%Y"),
|
31 |
-
"language": "en",
|
32 |
-
"detailed": True,
|
33 |
-
"anonUserId": user_id,
|
34 |
-
"answerModel": "GPT-4",
|
35 |
-
"creativeMode": False,
|
36 |
-
"customLinks": []
|
37 |
-
},
|
38 |
-
"context":""
|
39 |
-
}
|
40 |
-
headers = {
|
41 |
-
"Authority": cls.url,
|
42 |
-
"Accept": "application/json, text/plain, */*",
|
43 |
-
"Origin": cls.url,
|
44 |
-
"Referer": f"{cls.url}/"
|
45 |
-
}
|
46 |
-
async with StreamSession(headers=headers, timeout=(5, 180), proxies={"https": proxy}, impersonate="chrome107") as session:
|
47 |
-
async with session.post(f"{cls.url}/api/infer/answer", json=data) as response:
|
48 |
-
response.raise_for_status()
|
49 |
-
new_lines = 0
|
50 |
-
async for line in response.iter_lines():
|
51 |
-
if not line:
|
52 |
-
continue
|
53 |
-
if line.startswith(b"data: "):
|
54 |
-
line = line[6:]
|
55 |
-
if line.startswith(b"<PHIND_METADATA>"):
|
56 |
-
continue
|
57 |
-
if line:
|
58 |
-
if new_lines:
|
59 |
-
yield "".join(["\n" for _ in range(int(new_lines / 2))])
|
60 |
-
new_lines = 0
|
61 |
-
yield line.decode()
|
62 |
-
else:
|
63 |
-
new_lines += 1
|
64 |
-
|
65 |
-
|
66 |
-
@classmethod
|
67 |
-
@property
|
68 |
-
def params(cls):
|
69 |
-
params = [
|
70 |
-
("model", "str"),
|
71 |
-
("messages", "list[dict[str, str]]"),
|
72 |
-
("stream", "bool"),
|
73 |
-
("proxy", "str"),
|
74 |
-
]
|
75 |
-
param = ", ".join([": ".join(p) for p in params])
|
76 |
-
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/base/Base.d.ts
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
// import * as Phaser from 'phaser';
|
2 |
-
import BaseShape from '../../../plugins/gameobjects/shape/shapes/BaseShapes';
|
3 |
-
|
4 |
-
export default Base;
|
5 |
-
|
6 |
-
declare namespace Base {
|
7 |
-
|
8 |
-
interface IConfig {
|
9 |
-
x?: number, y?: number,
|
10 |
-
width?: number, height?: number,
|
11 |
-
color?: number,
|
12 |
-
|
13 |
-
duration?: number,
|
14 |
-
start?: boolean,
|
15 |
-
|
16 |
-
ease?: string,
|
17 |
-
}
|
18 |
-
|
19 |
-
}
|
20 |
-
|
21 |
-
declare class Base extends BaseShape {
|
22 |
-
constructor(
|
23 |
-
scene: Phaser.Scene,
|
24 |
-
config?: Base.IConfig
|
25 |
-
)
|
26 |
-
|
27 |
-
start(duration?: number): this;
|
28 |
-
pause(): this;
|
29 |
-
resume(): this;
|
30 |
-
stop(): this;
|
31 |
-
readonly isRunning: boolean;
|
32 |
-
|
33 |
-
setValue(t: number): this;
|
34 |
-
value: number;
|
35 |
-
|
36 |
-
setColor(color: number): this;
|
37 |
-
color: number;
|
38 |
-
|
39 |
-
setDuration(duration: number): this;
|
40 |
-
duration: this;
|
41 |
-
|
42 |
-
setEase(ease: string): this;
|
43 |
-
ease: string;
|
44 |
-
|
45 |
-
readonly centerX: number;
|
46 |
-
readonly centerY: number;
|
47 |
-
readonly radius: number;
|
48 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/box/Box.d.ts
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import Base from '../base/Base';
|
2 |
-
export default class Box extends Base { }
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/anchor/Anchor.d.ts
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import Anchor from '../../../plugins/behaviors/anchor/Anchor';
|
2 |
-
export default Anchor;
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/EaseDataMethods.js
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
import { EaseData } from '../../../plugins/easedata.js';
|
2 |
-
import { WaitEvent } from '../utils/WaitEvent.js';
|
3 |
-
|
4 |
-
var OnInitEaseData = function (gameObject, easeData) {
|
5 |
-
// Route 'complete' of easeData to gameObject
|
6 |
-
easeData.on('complete', function (key) {
|
7 |
-
gameObject.emit(`easedata.${key}.complete`, gameObject);
|
8 |
-
gameObject.emit('easedata.complete', key, gameObject);
|
9 |
-
})
|
10 |
-
}
|
11 |
-
|
12 |
-
export default {
|
13 |
-
easeDataTo(key, value, duration, ease) {
|
14 |
-
if (!this._easeData) {
|
15 |
-
this._easeData = new EaseData(this);
|
16 |
-
OnInitEaseData(this, this._easeData);
|
17 |
-
}
|
18 |
-
this._easeData.easeTo(key, value, duration, ease);
|
19 |
-
return this;
|
20 |
-
},
|
21 |
-
|
22 |
-
easeDataToPromise(key, value, duration, ease) {
|
23 |
-
this.easeDataTo(key, value, duration, ease);
|
24 |
-
return WaitEvent(this._easeData, `complete-${key}`);
|
25 |
-
},
|
26 |
-
|
27 |
-
stopEaseData(key, toEnd) {
|
28 |
-
if (!this._easeData) {
|
29 |
-
return this;
|
30 |
-
}
|
31 |
-
|
32 |
-
this._easeData.stopEase(key, toEnd);
|
33 |
-
return this;
|
34 |
-
},
|
35 |
-
|
36 |
-
stopAllEaseData(toEnd) {
|
37 |
-
if (!this._easeData) {
|
38 |
-
return this;
|
39 |
-
}
|
40 |
-
|
41 |
-
this._easeData.stopAll(toEnd);
|
42 |
-
return this;
|
43 |
-
}
|
44 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/Layout.js
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
var Layout = function () {
|
2 |
-
// Save scale
|
3 |
-
var scaleXSave = this.scaleX;
|
4 |
-
var scaleYSave = this.scaleY;
|
5 |
-
var scale1 = (scaleXSave === 1) && (scaleYSave === 1);
|
6 |
-
if (!scale1) {
|
7 |
-
this.setScale(1);
|
8 |
-
}
|
9 |
-
|
10 |
-
// Run layout with scale = 1
|
11 |
-
this.runLayout();
|
12 |
-
|
13 |
-
// Restore scale
|
14 |
-
if (!scale1) {
|
15 |
-
this.setScale(scaleXSave, scaleYSave);
|
16 |
-
}
|
17 |
-
return this;
|
18 |
-
}
|
19 |
-
export default Layout;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/ninepatch/NinePatch.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import NinePatch from '../../../plugins/ninepatch.js'
|
2 |
-
export default NinePatch;
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/ninepatch2/Factory.js
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
import NinePatch from './NinePatch.js';
|
2 |
-
import ObjectFactory from '../ObjectFactory.js';
|
3 |
-
import SetValue from '../../../plugins/utils/object/SetValue.js';
|
4 |
-
|
5 |
-
ObjectFactory.register('ninePatch2', function (x, y, width, height, key, columns, rows, config) {
|
6 |
-
var gameObject = new NinePatch(this.scene, x, y, width, height, key, columns, rows, config);
|
7 |
-
this.scene.add.existing(gameObject);
|
8 |
-
return gameObject;
|
9 |
-
});
|
10 |
-
|
11 |
-
SetValue(window, 'RexPlugins.UI.NinePatch2', NinePatch);
|
12 |
-
|
13 |
-
export default NinePatch;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlexWang/lama/models/ade20k/segm_lib/utils/data/dataset.py
DELETED
@@ -1,118 +0,0 @@
|
|
1 |
-
import bisect
|
2 |
-
import warnings
|
3 |
-
|
4 |
-
from torch._utils import _accumulate
|
5 |
-
from torch import randperm
|
6 |
-
|
7 |
-
|
8 |
-
class Dataset(object):
|
9 |
-
"""An abstract class representing a Dataset.
|
10 |
-
|
11 |
-
All other datasets should subclass it. All subclasses should override
|
12 |
-
``__len__``, that provides the size of the dataset, and ``__getitem__``,
|
13 |
-
supporting integer indexing in range from 0 to len(self) exclusive.
|
14 |
-
"""
|
15 |
-
|
16 |
-
def __getitem__(self, index):
|
17 |
-
raise NotImplementedError
|
18 |
-
|
19 |
-
def __len__(self):
|
20 |
-
raise NotImplementedError
|
21 |
-
|
22 |
-
def __add__(self, other):
|
23 |
-
return ConcatDataset([self, other])
|
24 |
-
|
25 |
-
|
26 |
-
class TensorDataset(Dataset):
|
27 |
-
"""Dataset wrapping data and target tensors.
|
28 |
-
|
29 |
-
Each sample will be retrieved by indexing both tensors along the first
|
30 |
-
dimension.
|
31 |
-
|
32 |
-
Arguments:
|
33 |
-
data_tensor (Tensor): contains sample data.
|
34 |
-
target_tensor (Tensor): contains sample targets (labels).
|
35 |
-
"""
|
36 |
-
|
37 |
-
def __init__(self, data_tensor, target_tensor):
|
38 |
-
assert data_tensor.size(0) == target_tensor.size(0)
|
39 |
-
self.data_tensor = data_tensor
|
40 |
-
self.target_tensor = target_tensor
|
41 |
-
|
42 |
-
def __getitem__(self, index):
|
43 |
-
return self.data_tensor[index], self.target_tensor[index]
|
44 |
-
|
45 |
-
def __len__(self):
|
46 |
-
return self.data_tensor.size(0)
|
47 |
-
|
48 |
-
|
49 |
-
class ConcatDataset(Dataset):
|
50 |
-
"""
|
51 |
-
Dataset to concatenate multiple datasets.
|
52 |
-
Purpose: useful to assemble different existing datasets, possibly
|
53 |
-
large-scale datasets as the concatenation operation is done in an
|
54 |
-
on-the-fly manner.
|
55 |
-
|
56 |
-
Arguments:
|
57 |
-
datasets (iterable): List of datasets to be concatenated
|
58 |
-
"""
|
59 |
-
|
60 |
-
@staticmethod
|
61 |
-
def cumsum(sequence):
|
62 |
-
r, s = [], 0
|
63 |
-
for e in sequence:
|
64 |
-
l = len(e)
|
65 |
-
r.append(l + s)
|
66 |
-
s += l
|
67 |
-
return r
|
68 |
-
|
69 |
-
def __init__(self, datasets):
|
70 |
-
super(ConcatDataset, self).__init__()
|
71 |
-
assert len(datasets) > 0, 'datasets should not be an empty iterable'
|
72 |
-
self.datasets = list(datasets)
|
73 |
-
self.cumulative_sizes = self.cumsum(self.datasets)
|
74 |
-
|
75 |
-
def __len__(self):
|
76 |
-
return self.cumulative_sizes[-1]
|
77 |
-
|
78 |
-
def __getitem__(self, idx):
|
79 |
-
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
|
80 |
-
if dataset_idx == 0:
|
81 |
-
sample_idx = idx
|
82 |
-
else:
|
83 |
-
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
|
84 |
-
return self.datasets[dataset_idx][sample_idx]
|
85 |
-
|
86 |
-
@property
|
87 |
-
def cummulative_sizes(self):
|
88 |
-
warnings.warn("cummulative_sizes attribute is renamed to "
|
89 |
-
"cumulative_sizes", DeprecationWarning, stacklevel=2)
|
90 |
-
return self.cumulative_sizes
|
91 |
-
|
92 |
-
|
93 |
-
class Subset(Dataset):
|
94 |
-
def __init__(self, dataset, indices):
|
95 |
-
self.dataset = dataset
|
96 |
-
self.indices = indices
|
97 |
-
|
98 |
-
def __getitem__(self, idx):
|
99 |
-
return self.dataset[self.indices[idx]]
|
100 |
-
|
101 |
-
def __len__(self):
|
102 |
-
return len(self.indices)
|
103 |
-
|
104 |
-
|
105 |
-
def random_split(dataset, lengths):
|
106 |
-
"""
|
107 |
-
Randomly split a dataset into non-overlapping new datasets of given lengths
|
108 |
-
ds
|
109 |
-
|
110 |
-
Arguments:
|
111 |
-
dataset (Dataset): Dataset to be split
|
112 |
-
lengths (iterable): lengths of splits to be produced
|
113 |
-
"""
|
114 |
-
if sum(lengths) != len(dataset):
|
115 |
-
raise ValueError("Sum of input lengths does not equal the length of the input dataset!")
|
116 |
-
|
117 |
-
indices = randperm(sum(lengths))
|
118 |
-
return [Subset(dataset, indices[offset - length:offset]) for offset, length in zip(_accumulate(lengths), lengths)]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alican/pixera/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Pixera
|
3 |
-
emoji: 💻
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.1.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/models/e4e/discriminator.py
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
from torch import nn
|
2 |
-
|
3 |
-
|
4 |
-
class LatentCodesDiscriminator(nn.Module):
|
5 |
-
def __init__(self, style_dim, n_mlp):
|
6 |
-
super().__init__()
|
7 |
-
|
8 |
-
self.style_dim = style_dim
|
9 |
-
|
10 |
-
layers = []
|
11 |
-
for i in range(n_mlp-1):
|
12 |
-
layers.append(
|
13 |
-
nn.Linear(style_dim, style_dim)
|
14 |
-
)
|
15 |
-
layers.append(nn.LeakyReLU(0.2))
|
16 |
-
layers.append(nn.Linear(512, 1))
|
17 |
-
self.mlp = nn.Sequential(*layers)
|
18 |
-
|
19 |
-
def forward(self, w):
|
20 |
-
return self.mlp(w)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r101-d8_512x1024_80k_cityscapes.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './ccnet_r50-d8_512x1024_80k_cityscapes.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_160k_ade20k.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/deeplabv3_r50-d8.py', '../_base_/datasets/ade20k.py',
|
3 |
-
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
|
4 |
-
]
|
5 |
-
model = dict(
|
6 |
-
decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/resnest/deeplabv3plus_s101-d8_512x1024_80k_cityscapes.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
_base_ = '../deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://resnest101',
|
4 |
-
backbone=dict(
|
5 |
-
type='ResNeSt',
|
6 |
-
stem_channels=128,
|
7 |
-
radix=2,
|
8 |
-
reduction_factor=4,
|
9 |
-
avg_down_stride=True))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/evaluate.py
DELETED
@@ -1,151 +0,0 @@
|
|
1 |
-
import datetime
|
2 |
-
from pathlib import Path
|
3 |
-
|
4 |
-
import pandas as pd
|
5 |
-
import torch
|
6 |
-
from datasets import load_dataset
|
7 |
-
from tqdm import tqdm
|
8 |
-
|
9 |
-
from modules import shared
|
10 |
-
from modules.models import load_model, unload_model
|
11 |
-
from modules.models_settings import get_model_metadata, update_model_parameters
|
12 |
-
from modules.text_generation import encode
|
13 |
-
|
14 |
-
|
15 |
-
def load_past_evaluations():
|
16 |
-
if Path('logs/evaluations.csv').exists():
|
17 |
-
df = pd.read_csv(Path('logs/evaluations.csv'), dtype=str)
|
18 |
-
df['Perplexity'] = pd.to_numeric(df['Perplexity'])
|
19 |
-
return df
|
20 |
-
else:
|
21 |
-
return pd.DataFrame(columns=['Model', 'LoRAs', 'Dataset', 'Perplexity', 'stride', 'max_length', 'Date', 'Comment'])
|
22 |
-
|
23 |
-
|
24 |
-
past_evaluations = load_past_evaluations()
|
25 |
-
|
26 |
-
|
27 |
-
def save_past_evaluations(df):
|
28 |
-
global past_evaluations
|
29 |
-
past_evaluations = df
|
30 |
-
filepath = Path('logs/evaluations.csv')
|
31 |
-
filepath.parent.mkdir(parents=True, exist_ok=True)
|
32 |
-
df.to_csv(filepath, index=False)
|
33 |
-
|
34 |
-
|
35 |
-
def calculate_perplexity(models, input_dataset, stride, _max_length):
|
36 |
-
'''
|
37 |
-
Based on:
|
38 |
-
https://huggingface.co/docs/transformers/perplexity#calculating-ppl-with-fixedlength-models
|
39 |
-
'''
|
40 |
-
|
41 |
-
global past_evaluations
|
42 |
-
cumulative_log = ''
|
43 |
-
cumulative_log += "Loading the input dataset...\n\n"
|
44 |
-
yield cumulative_log
|
45 |
-
|
46 |
-
# Copied from https://github.com/qwopqwop200/GPTQ-for-LLaMa/blob/triton/utils/datautils.py
|
47 |
-
if input_dataset == 'wikitext':
|
48 |
-
data = load_dataset('wikitext', 'wikitext-2-raw-v1', split='test')
|
49 |
-
text = "\n\n".join(data['text'])
|
50 |
-
elif input_dataset == 'ptb':
|
51 |
-
data = load_dataset('ptb_text_only', 'penn_treebank', split='validation')
|
52 |
-
text = "\n\n".join(data['sentence'])
|
53 |
-
elif input_dataset == 'ptb_new':
|
54 |
-
data = load_dataset('ptb_text_only', 'penn_treebank', split='test')
|
55 |
-
text = " ".join(data['sentence'])
|
56 |
-
else:
|
57 |
-
with open(Path(f'training/datasets/{input_dataset}.txt'), 'r', encoding='utf-8') as f:
|
58 |
-
text = f.read()
|
59 |
-
|
60 |
-
for model in models:
|
61 |
-
if is_in_past_evaluations(model, input_dataset, stride, _max_length):
|
62 |
-
cumulative_log += f"{model} has already been tested. Ignoring.\n\n"
|
63 |
-
yield cumulative_log
|
64 |
-
continue
|
65 |
-
|
66 |
-
if model != 'current model':
|
67 |
-
try:
|
68 |
-
yield cumulative_log + f"Loading {model}...\n\n"
|
69 |
-
model_settings = get_model_metadata(model)
|
70 |
-
shared.settings.update({k: v for k, v in model_settings.items() if k in shared.settings}) # hijacking the interface defaults
|
71 |
-
update_model_parameters(model_settings) # hijacking the command-line arguments
|
72 |
-
shared.model_name = model
|
73 |
-
unload_model()
|
74 |
-
shared.model, shared.tokenizer = load_model(shared.model_name)
|
75 |
-
except:
|
76 |
-
cumulative_log += f"Failed to load {model}. Moving on.\n\n"
|
77 |
-
yield cumulative_log
|
78 |
-
continue
|
79 |
-
|
80 |
-
cumulative_log += f"Processing {shared.model_name}...\n\n"
|
81 |
-
yield cumulative_log + "Tokenizing the input dataset...\n\n"
|
82 |
-
encodings = encode(text, add_special_tokens=False)
|
83 |
-
seq_len = encodings.shape[1]
|
84 |
-
if _max_length:
|
85 |
-
max_length = _max_length
|
86 |
-
elif hasattr(shared.model.config, 'max_position_embeddings'):
|
87 |
-
max_length = shared.model.config.max_position_embeddings
|
88 |
-
else:
|
89 |
-
max_length = 2048
|
90 |
-
|
91 |
-
nlls = []
|
92 |
-
prev_end_loc = 0
|
93 |
-
for begin_loc in tqdm(range(0, seq_len, stride)):
|
94 |
-
yield cumulative_log + f"Evaluating... {100*begin_loc/seq_len:.2f}%"
|
95 |
-
end_loc = min(begin_loc + max_length, seq_len)
|
96 |
-
trg_len = end_loc - prev_end_loc # may be different from stride on last loop
|
97 |
-
input_ids = encodings[:, begin_loc:end_loc]
|
98 |
-
target_ids = input_ids.clone()
|
99 |
-
target_ids[:, :-trg_len] = -100
|
100 |
-
|
101 |
-
with torch.no_grad():
|
102 |
-
outputs = shared.model(input_ids=input_ids, labels=target_ids)
|
103 |
-
|
104 |
-
# loss is calculated using CrossEntropyLoss which averages over valid labels
|
105 |
-
# N.B. the model only calculates loss over trg_len - 1 labels, because it internally shifts the labels
|
106 |
-
# to the left by 1.
|
107 |
-
neg_log_likelihood = outputs.loss
|
108 |
-
|
109 |
-
nlls.append(neg_log_likelihood)
|
110 |
-
|
111 |
-
prev_end_loc = end_loc
|
112 |
-
if end_loc == seq_len:
|
113 |
-
break
|
114 |
-
|
115 |
-
ppl = torch.exp(torch.stack(nlls).mean())
|
116 |
-
add_entry_to_past_evaluations(float(ppl), shared.model_name, input_dataset, stride, _max_length)
|
117 |
-
save_past_evaluations(past_evaluations)
|
118 |
-
cumulative_log += f"The perplexity for {shared.model_name} is: {float(ppl)}\n\n"
|
119 |
-
yield cumulative_log
|
120 |
-
|
121 |
-
|
122 |
-
def add_entry_to_past_evaluations(perplexity, model, dataset, stride, max_length):
|
123 |
-
global past_evaluations
|
124 |
-
entry = {
|
125 |
-
'Model': model,
|
126 |
-
'LoRAs': ', '.join(shared.lora_names) or '-',
|
127 |
-
'Dataset': dataset,
|
128 |
-
'Perplexity': perplexity,
|
129 |
-
'stride': str(stride),
|
130 |
-
'max_length': str(max_length),
|
131 |
-
'Date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
|
132 |
-
'Comment': ''
|
133 |
-
}
|
134 |
-
past_evaluations = pd.concat([past_evaluations, pd.DataFrame([entry])], ignore_index=True)
|
135 |
-
|
136 |
-
|
137 |
-
def is_in_past_evaluations(model, dataset, stride, max_length):
|
138 |
-
entries = past_evaluations[(past_evaluations['Model'] == model) &
|
139 |
-
(past_evaluations['Dataset'] == dataset) &
|
140 |
-
(past_evaluations['max_length'] == str(max_length)) &
|
141 |
-
(past_evaluations['stride'] == str(stride))]
|
142 |
-
|
143 |
-
if entries.shape[0] > 0:
|
144 |
-
return True
|
145 |
-
else:
|
146 |
-
return False
|
147 |
-
|
148 |
-
|
149 |
-
def generate_markdown_table():
|
150 |
-
sorted_df = past_evaluations.sort_values(by=['Dataset', 'stride', 'Perplexity', 'Date'])
|
151 |
-
return sorted_df
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnnonSubmission/xai-cl/data_transforms.py
DELETED
@@ -1,96 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torchvision
|
3 |
-
import torchvision.transforms as transforms
|
4 |
-
import torch.nn as nn
|
5 |
-
from PIL import Image, ImageOps, ImageFilter
|
6 |
-
import random
|
7 |
-
|
8 |
-
def add_normalization_to_transform(unnormalized_transforms):
|
9 |
-
"""Adds ImageNet normalization to all transforms"""
|
10 |
-
normalized_transform = {}
|
11 |
-
for key, value in unnormalized_transforms.items():
|
12 |
-
normalized_transform[key] = transforms.Compose([value,
|
13 |
-
transforms.Normalize(mean=[0.485, 0.456, 0.406],
|
14 |
-
std=[0.229, 0.224, 0.225])])
|
15 |
-
return normalized_transform
|
16 |
-
|
17 |
-
def modify_transforms(normal_transforms, no_shift_transforms, ig_transforms):
|
18 |
-
normal_transforms = add_normalization_to_transform(normal_transforms)
|
19 |
-
no_shift_transforms = add_normalization_to_transform(no_shift_transforms)
|
20 |
-
ig_transforms = add_normalization_to_transform(ig_transforms)
|
21 |
-
return normal_transforms, no_shift_transforms, ig_transforms
|
22 |
-
|
23 |
-
class Solarization(object):
|
24 |
-
def __init__(self, p):
|
25 |
-
self.p = p
|
26 |
-
|
27 |
-
def __call__(self, img):
|
28 |
-
if random.random() < self.p:
|
29 |
-
return ImageOps.solarize(img)
|
30 |
-
else:
|
31 |
-
return img
|
32 |
-
|
33 |
-
# no imagent normalization for simclrv2
|
34 |
-
pure_transform = transforms.Compose([transforms.Resize(256),
|
35 |
-
transforms.CenterCrop(224),
|
36 |
-
transforms.ToTensor()])
|
37 |
-
|
38 |
-
aug_transform = transforms.Compose([transforms.RandomResizedCrop(224),
|
39 |
-
transforms.RandomHorizontalFlip(p=0.5),
|
40 |
-
transforms.RandomApply([transforms.ColorJitter(0.8, 0.8, 0.8, 0.2)], p=0.8),
|
41 |
-
transforms.RandomGrayscale(p=0.2),
|
42 |
-
transforms.RandomApply([transforms.GaussianBlur(kernel_size=(21,21), sigma=(0.1,2.0))], p=0.5),
|
43 |
-
transforms.ToTensor()])
|
44 |
-
|
45 |
-
ig_pure_transform = transforms.Compose([transforms.Resize(256),
|
46 |
-
transforms.CenterCrop(224),
|
47 |
-
transforms.ToTensor()])
|
48 |
-
|
49 |
-
ig_transform_colorjitter = transforms.Compose([transforms.Resize(256),
|
50 |
-
transforms.CenterCrop(224),
|
51 |
-
transforms.RandomApply([transforms.ColorJitter(0.8, 0.8, 0.8, 0.4)], p=1),
|
52 |
-
transforms.ToTensor()])
|
53 |
-
|
54 |
-
ig_transform_blur = transforms.Compose([transforms.Resize(256),
|
55 |
-
transforms.CenterCrop(224),
|
56 |
-
transforms.RandomApply([transforms.GaussianBlur(kernel_size=(11,11), sigma=(5,5))], p=1),
|
57 |
-
transforms.ToTensor()])
|
58 |
-
|
59 |
-
ig_transform_solarize = transforms.Compose([transforms.Resize(256),
|
60 |
-
transforms.CenterCrop(224),
|
61 |
-
Solarization(p=1.0),
|
62 |
-
transforms.ToTensor()])
|
63 |
-
|
64 |
-
ig_transform_grayscale = transforms.Compose([transforms.Resize(256),
|
65 |
-
transforms.CenterCrop(224),
|
66 |
-
transforms.RandomGrayscale(p=1),
|
67 |
-
transforms.ToTensor()])
|
68 |
-
|
69 |
-
|
70 |
-
ig_transform_combine = transforms.Compose([transforms.Resize(256),
|
71 |
-
transforms.CenterCrop(224),
|
72 |
-
transforms.RandomApply([transforms.ColorJitter(0.8, 0.8, 0.8, 0.2)], p=0.8),
|
73 |
-
transforms.RandomGrayscale(p=0.2),
|
74 |
-
transforms.RandomApply([transforms.GaussianBlur(kernel_size=(21,21), sigma=(0.1, 2.0))], p=0.5),
|
75 |
-
transforms.ToTensor()])
|
76 |
-
|
77 |
-
pure_transform_no_shift = transforms.Compose([transforms.Resize((224, 224)),
|
78 |
-
transforms.ToTensor()])
|
79 |
-
|
80 |
-
aug_transform_no_shift = transforms.Compose([transforms.Resize((224, 224)),
|
81 |
-
transforms.RandomApply([transforms.ColorJitter(0.8, 0.8, 0.8, 0.2)], p=0.8),
|
82 |
-
transforms.RandomGrayscale(p=0.2),
|
83 |
-
transforms.ToTensor()])
|
84 |
-
|
85 |
-
normal_transforms = {'pure': pure_transform,
|
86 |
-
'aug': aug_transform}
|
87 |
-
|
88 |
-
no_shift_transforms = {'pure': pure_transform_no_shift,
|
89 |
-
'aug': aug_transform_no_shift}
|
90 |
-
|
91 |
-
ig_transforms = {'pure': ig_pure_transform,
|
92 |
-
'color_jitter': ig_transform_colorjitter,
|
93 |
-
'blur': ig_transform_blur,
|
94 |
-
'grayscale': ig_transform_grayscale,
|
95 |
-
'solarize': ig_transform_solarize,
|
96 |
-
'combine': ig_transform_combine}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Counter Strike 1.6 Original.md
DELETED
@@ -1,149 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cómo descargar y jugar Counter-Strike 1.6 Original</h1>
|
3 |
-
<p></p>
|
4 |
-
<h2>descargar counter strike 1.6 original</h2><br /><p><b><b>Download Zip</b> ⇒⇒⇒ <a href="https://bltlly.com/2v6K4r">https://bltlly.com/2v6K4r</a></b></p><br /><br />
|
5 |
-
<h2>Introducción</h2>
|
6 |
-
<p>Counter-Strike 1.6 original, también conocido como Half-Life: Counter-Strike o CS 1.6, es un juego de disparos táctico en primera persona que fue lanzado en 2000 por Valve. Inicialmente fue desarrollado y lanzado como una modificación de Half-Life por Minh "Gooseman" Le y Jess Cliffe en 1999, antes de ser contratados por Valve y la propiedad intelectual del juego fue adquirida. </p>
|
7 |
-
<p>El juego se desarrolla en varios lugares alrededor del mundo, donde los jugadores asumen el papel de fuerzas antiterroristas o militantes terroristas. Durante cada ronda de juego, los dos equipos tienen la tarea de derrotarse entre sí mediante el logro de los objetivos del mapa o la eliminación de todos los combatientes enemigos. Cada jugador puede personalizar su arsenal de armas y accesorios al comienzo de cada partido, con la moneda que se gana después del final de cada ronda. </p>
|
8 |
-
<p>Counter-Strike 1.6 original es uno de los juegos FPS multijugador más populares e influyentes de todos los tiempos, con millones de jugadores y fans en todo el mundo. Ha generado varias secuelas, remakes, ports, spin-offs, mods y torneos a lo largo de los años. También es uno de los títulos de esports más grandes, con equipos profesionales y jugadores compitiendo por la fama y la fortuna en varias ligas y eventos. </p>
|
9 |
-
<p></p>
|
10 |
-
<p>Algunas de las principales características y modos de juego de Counter-Strike 1.6 original son:</p>
|
11 |
-
<ul>
|
12 |
-
<li><b>Diseño original:</b> El juego tiene un aspecto clásico y auténtico, con armas originales, modelos, sonidos, mapas y menús. </li>
|
13 |
-
<li><b>Bots originales:</b> El juego incluye bots incorporados (zbots) que se pueden controlar presionando el botón H. Se pueden usar para practicar sin conexión o llenar espacios vacíos en servidores en línea. </li>
|
14 |
-
|
15 |
-
<li><b>Rescate de rehenes:</b> Este es otro modo de juego popular en Counter-Strike 1.6 original. Los antiterroristas deben rescatar a un grupo de rehenes retenidos por los terroristas en su base y escoltarlos a una zona segura en el mapa. Los terroristas ganan si impiden que los rehenes sean rescatados o eliminan a todos los antiterroristas. </li>
|
16 |
-
<li><b>Asesinato:</b> Este es un modo de juego raro en Counter-Strike 1.6 original. Uno de los antiterroristas es elegido para actuar como un VIP y debe ser escoltado por sus compañeros de equipo a un lugar designado en el mapa. Los terroristas ganan si matan la cuenta si no tienes una ya. Puedes registrarte gratis en el sitio web de Steam o descargar el cliente de Steam y registrarte desde allí. </li>
|
17 |
-
<li>Inicia sesión en tu cuenta de Steam y busca el original de Counter-Strike 1.6 en la tienda de Steam. También puedes usar este enlace para acceder directamente a la página del juego: [Counter-Strike on Steam]. </li>
|
18 |
-
<li>Haga clic en el "Añadir al carrito" botón y proceder a la caja. El juego cuesta $9.99 USD a partir de junio de 2023, pero puede encontrar descuentos o paquetes durante las ventas o promociones. </li>
|
19 |
-
<li>Después de completar tu pago, el juego se agregará a tu biblioteca de Steam. A continuación, puede descargarlo e instalarlo haciendo clic en el botón "Instalar" en la página del juego o en su biblioteca. </li>
|
20 |
-
<li>Una vez que el juego está instalado, puede iniciarlo haciendo clic en el botón "Jugar" en la página del juego o en su biblioteca. También puede crear un acceso directo de escritorio para facilitar el acceso. </li>
|
21 |
-
</ol>
|
22 |
-
<h3>Cómo descargar el juego de otras fuentes</h3>
|
23 |
-
<p>Si no quieres comprar el juego en Steam, también puedes descargarlo de otras fuentes que ofrecen versiones gratuitas o no oficiales de Counter-Strike 1.6 original. Sin embargo, debe tener cuidado y precaución al hacerlo, ya que algunas de estas fuentes pueden contener virus, malware u otros archivos dañinos que podrían dañar su computadora o comprometer su seguridad. Estos son algunos consejos a seguir:</p>
|
24 |
-
<ul>
|
25 |
-
|
26 |
-
<li>Escanee el archivo descargado con un programa antivirus o anti-malware antes de abrirlo o instalarlo. Elimine cualquier archivo que se detecte como infectado o sospechoso. </li>
|
27 |
-
<li>Asegúrese de que el archivo descargado es compatible con su sistema operativo y cumple con los requisitos mínimos del sistema para el juego. Es posible que necesite instalar software o controladores adicionales para ejecutar el juego correctamente. </li>
|
28 |
-
<li>Siga las instrucciones proporcionadas por el sitio web o el archivo para instalar y ejecutar el juego. Algunos archivos pueden requerir que los extraigas usando un programa como WinRAR o 7-Zip antes de instalarlos. </li>
|
29 |
-
<li>Ten en cuenta los riesgos y limitaciones de descargar el juego desde otras fuentes. Es posible que no puedas jugar en línea con otros jugadores que tengan la versión oficial del juego, o que puedas encontrar errores, errores o fallos que afecten tu experiencia de juego. </li>
|
30 |
-
</ul>
|
31 |
-
<h3>Cómo comprobar los requisitos del sistema y la compatibilidad</h3>
|
32 |
-
<p>Antes de descargar y jugar Counter-Strike 1.6 original, usted debe asegurarse de que su computadora cumple con los requisitos mínimos del sistema para el juego y es compatible con él. Estos son algunos de los requisitos del sistema y problemas de compatibilidad que debe comprobar:</p>
|
33 |
-
<tabla>
|
34 |
-
<tr><th>Requisito del sistema</th><th>Mínimo</th><th>Recomendado</th></tr>
|
35 |
-
<tr><td>Sistema operativo</td><td>Windows XP/Vista/7/8/10</td><td><td>Windows XP/Vista/7/8/10</td></tr>
|
36 |
-
<tr><td>CPU</td><td>Pentium III 500 MHz o equivalente</td><td>Pentium 4 1 GHz o equivalente</td></tr>
|
37 |
-
<tr><td>RAM</td><td>96 MB</td><td>256 MB</td></tr>
|
38 |
-
<tr><td>Tarjeta gráfica</td><td>Tarjeta de video de 16 MB con soporte para DirectX 8</td><td>Tarjeta de video de 32 MB con soporte para DirectX 9</td></tr>
|
39 |
-
<tr><td>Espacio en disco duro</td><td>500 MB</td><td>1 GB</td></tr>
|
40 |
-
<tr><td>Conexión a Internet</td><td>Banda ancha (para jugar en línea)</td><td>Banda ancha (para jugar en línea)</td></tr>
|
41 |
-
<tr><td>Tarjeta de sonido</td><td>Tarjeta de sonido compatible con DirectX</td><td>Tarjeta de sonido compatible con DirectX</td></tr>
|
42 |
-
|
43 |
-
<tr><td>DVD-ROM Drive (para copia física)</td><td>N/A</td><td><N/A</td></tr>
|
44 |
-
<tr><th colspan="3">Problemas de compatibilidad</th></tr>
|
45 |
-
<tr><td colspan="3">Algunos usuarios pueden experimentar problemas al ejecutar Counter-Strike 1.6 original en versiones más recientes de Windows, como Windows 10. Algunos de estos problemas incluyen bajo FPS, pantalla negra, retraso del ratón, problemas de sonido, etc. Para solucionar estos problemas, es posible que tenga que ajustar algunos ajustes en las propiedades del juego, como el modo de compatibilidad, modo administrador, resolución, etc. Puede encontrar más información y soluciones en varios foros y sitios web dedicados a Counter Strike 1.6 original. </td></tr>
|
46 |
-
</tabla>
|
47 |
-
<h2>Cómo jugar Counter-Strike 1.6 original</h2>
|
48 |
-
<p>Ahora que has descargado e instalado Counter-Strike 1.6 original, estás listo para jugar y divertirte. Estos son algunos de los pasos y consejos básicos para ayudarte a empezar:</p>
|
49 |
-
<h3>Cómo unirse a un servidor y seleccionar un equipo</h3>
|
50 |
-
<p>Para jugar Counter-Strike 1.6 original en línea, es necesario unirse a un servidor que alberga el juego. Puede unirse a un servidor existente o crear su propio servidor. Estos son los pasos para unirse a un servidor existente:</p>
|
51 |
-
<ol>
|
52 |
-
<li> Inicie el juego y haga clic en el botón "Buscar servidores" en el menú principal. </li>
|
53 |
-
<li>Aparecerá una lista de servidores disponibles, mostrando su nombre, mapa, reproductores, ping, etc. Puede filtrar la lista usando las pestañas en la parte superior o el cuadro de búsqueda en la parte inferior. </li>
|
54 |
-
<li>Seleccione un servidor al que desea unirse y haga clic en el botón "Conectar" en la parte inferior derecha. También puede hacer doble clic en el nombre del servidor o hacer clic derecho y elegir "Conectar" en el menú. </li>
|
55 |
-
<li>El juego cargará el mapa y te conectará al servidor. Es posible que deba esperar unos segundos o minutos dependiendo de la velocidad de Internet y la configuración del servidor. </li>
|
56 |
-
|
57 |
-
<li>A continuación, verá una pantalla que muestra los miembros de su equipo y sus resultados. También puede chatear con sus compañeros de equipo o todos los jugadores utilizando las teclas Y o U respectivamente. Pulse OK para continuar. </li>
|
58 |
-
</ol>
|
59 |
-
<p>Para crear tu propio servidor, debes seguir estos pasos:</p>
|
60 |
-
<ol>
|
61 |
-
<li> Inicie el juego y haga clic en el botón "Crear servidor" en el menú principal. </li>
|
62 |
-
<li>Aparecerá una ventana que le permite configurar la configuración de su servidor, como nombre, contraseña, mapa, modo de juego, jugadores máximos, etc. También puede habilitar o desactivar bots, trucos, fuego amigo, etc.</li>
|
63 |
-
<li>Después de configurar la configuración de su servidor, haga clic en el botón "Inicio" en la parte inferior derecha. El juego cargará el mapa y creará su servidor. </li>
|
64 |
-
<li>A continuación, verá una pantalla que le pide que elija un equipo: Terroristas o Antiterroristas. También puede elegir ser un espectador y ver jugar a otros jugadores. Haga clic en el equipo al que desea unirse y pulse OK.</li>
|
65 |
-
<li>A continuación, verá una pantalla que muestra los miembros de su equipo y sus resultados. También puede chatear con sus compañeros de equipo o todos los jugadores utilizando las teclas Y o U respectivamente. Pulse OK para continuar. </li>
|
66 |
-
</ol>
|
67 |
-
<h3>Cómo comprar armas y equipos</h3>
|
68 |
-
<p>Al comienzo de cada ronda, usted tiene una cantidad limitada de tiempo y dinero para comprar armas y equipos para usted y sus compañeros de equipo. Puede comprar artículos desde el menú de compra presionando la tecla B o usando la rueda del ratón. Estos son algunos de los artículos que puede comprar:</p>
|
69 |
-
<ul>
|
70 |
-
<li><b>Pistolas:</b> Estas son armas secundarias que son baratas y fáciles de usar, pero tienen poco daño y precisión. Algunos ejemplos son Glock, USP, Desert Eagle, etc.</li>
|
71 |
-
<li><b>Rifles:</b> Estas son armas primarias que son caras y poderosas, pero tienen un alto retroceso y peso. Algunos ejemplos son AK-47, M4A1, AWP, etc.</li>
|
72 |
-
<li><b>Escopetas:</b> Estas son armas primarias que son baratas y eficaces a corta distancia, pero tienen baja precisión y capacidad de munición. Algunos ejemplos son XM1014, M3 Super 90, etc.</li>
|
73 |
-
|
74 |
-
<li><b>Ametralladoras:</b> Estas son armas primarias que son muy caras y de fuego pesado, pero tienen un alto retroceso y ruido. Algunos ejemplos son M249 Para, Negev, etc.</li>
|
75 |
-
<li><b>Granadas:</b> Estos son elementos desechables que pueden causar daño o efectos a enemigos o aliados. Algunos ejemplos son la granada HE (explosiva), flashbang (cegadora), granada de humo (oscurecimiento), etc.</li>
|
76 |
-
<li><b>Armadura:</b> Este es un artículo que puede protegerte de balas y granadas. Puedes comprar kevlar (armadura corporal) o kevlar + casco (armadura para la cabeza). </li>
|
77 |
-
<li><b>Gafas de visión nocturna:</b> Este es un elemento que puede ayudarte a ver en áreas oscuras. Puedes activarlo o desactivarlo presionando la tecla N. </li>
|
78 |
-
<li><b>Desactivar kit:</b> Este es un elemento que puede ayudarle a desactivar la bomba más rápido. Solo puede comprarla si es un antiterrorista. </li>
|
79 |
-
</ul>
|
80 |
-
<p>También puedes comprar artículos para tus compañeros de equipo usando el menú de compra o tirándolos al suelo. Puede soltar elementos pulsando la tecla G o utilizando la rueda del ratón. También puedes solicitar elementos a tus compañeros de equipo usando los comandos de radio o el chat. </p>
|
81 |
-
<h3>Cómo comunicarse con compañeros de equipo y usar comandos de radio</h3>
|
82 |
-
<p>La comunicación es muy importante en el original de Counter-Strike 1.6, ya que puede ayudarte a coordinar con tus compañeros de equipo y compartir información sobre la ubicación, el estado y las acciones del enemigo. Puede comunicarse con sus compañeros de equipo mediante el chat de voz, el chat de texto o los comandos de radio. </p>
|
83 |
-
<p>Para usar el chat de voz, necesitas tener un micrófono y activarlo en las opciones del juego. A continuación, puede presionar y mantener pulsada la tecla K para hablar con sus compañeros de equipo. También puede ajustar el volumen y silenciar a otros jugadores en las opciones del juego. </p>
|
84 |
-
<p>Para usar el chat de texto, debe presionar la tecla Y para chatear con todos los jugadores o la tecla U para chatear solo con sus compañeros de equipo. A continuación, puede escribir su mensaje y pulse Enter para enviarlo. También puedes usar algunos comandos y atajos en el chat, como /me, /quit, /timeleft, etc.</p>
|
85 |
-
|
86 |
-
<h3>Cómo completar objetivos y ganar rondas</h3>
|
87 |
-
<p>El objetivo principal de Counter-Strike 1.6 original es completar los objetivos de tu equipo y ganar rondas contra el equipo enemigo. Los objetivos varían según el modo de juego y el mapa, pero por lo general implican plantar o desactivar una bomba, rescatar o proteger a los rehenes, asesinar o proteger a un VIP, o escapar o evitar una fuga. </p>
|
88 |
-
<p>Para ganar una ronda, necesitas completar el objetivo de tu equipo o eliminar a todos los jugadores enemigos antes de que se acabe el tiempo. El límite de tiempo para cada ronda suele ser de 2 minutos y 30 segundos, pero puede variar dependiendo de la configuración del servidor. Si ningún equipo completa su objetivo o elimina a todos los jugadores enemigos antes de que acabe el tiempo, la ronda terminará en un empate. </p>
|
89 |
-
<p>Para ganar un partido, necesitas ganar más rondas que el equipo enemigo. El número de rondas para cada partido suele ser de 30, pero puede variar dependiendo de la configuración del servidor. Si ambos equipos ganan un número igual de rondas después de 30 rondas, el partido terminará en un empate. </p>
|
90 |
-
<h2>Consejos y trucos para Counter-Strike 1.6 original</h2>
|
91 |
-
<p>Counter-Strike 1.6 original es un juego que requiere habilidad, estrategia, trabajo en equipo y práctica para dominar. Estos son algunos consejos y trucos que pueden ayudarte a mejorar tu jugabilidad y rendimiento:</p>
|
92 |
-
<h3>Cómo mejorar tu puntería y control de retroceso</h3>
|
93 |
-
<p>Apuntar y controlar el retroceso son dos de las habilidades más importantes en Counter-Strike 1.6 original, ya que determinan con qué precisión y eficacia puedes disparar a tus enemigos. Estos son algunos consejos para mejorar tu puntería y control de retroceso:</p>
|
94 |
-
<ul>
|
95 |
-
<li><b>Práctica:</b> La mejor manera de mejorar tu puntería y control de retroceso es practicar de forma regular y consistente. Puedes practicar offline con bots o online con otros jugadores en diferentes mapas y modos. También puede utilizar mapas de entrenamiento o servidores dedicados que ofrecen varios ejercicios y desafíos para apuntar y controlar el retroceso. </li>
|
96 |
-
|
97 |
-
<li><b>Punto de mira:</b> El punto de mira es el símbolo que muestra dónde irán tus balas cuando dispares. Usted debe elegir un punto de mira que sea cómodo y visible para usted, pero no demasiado grande o distracción. Puedes personalizar tu punto de mira en las opciones del juego o mediante comandos de consola. </li>
|
98 |
-
<li><b>Posicionamiento:</b> La posición de tu punto de mira en la pantalla afecta la rapidez y precisión con la que puedes apuntar a tus enemigos. Siempre debes mantener tu punto de mira a la altura de la cabeza y cerca de las esquinas o bordes de paredes, puertas, ventanas, etc. De esta manera, puedes reducir la distancia y el tiempo que necesitas para mover tu punto de mira para disparar a tus enemigos. </li>
|
99 |
-
<li><b>Movimiento:</b> El movimiento de tu personaje afecta la precisión y estabilidad de tus disparos. Siempre debes dejar de moverte antes de disparar, ya que moverte mientras disparas hará que tus balas se extiendan y se desvíen de tu punto de mira. Puedes usar la tecla shift para caminar lenta y silenciosamente, o la tecla ctrl para agacharte y bajar tu perfil. </li>
|
100 |
-
<li><b>Estallido:</b> La técnica de estallido consiste en disparar algunas balas a la vez, en lugar de rociar o tocar. De esta manera, puede controlar mejor el retroceso y la propagación de sus disparos, y conservar su munición y precisión. Debes disparar de 2 a 4 balas por disparo, dependiendo del arma y la distancia. También debe hacer una pausa breve entre cada ráfaga para dejar que el retroceso se restablezca. </li>
|
101 |
-
<li><b>Pulverización:</b> La técnica de pulverización consiste en disparar muchas balas a la vez, sin detenerse o detenerse. De esta manera, puedes infligir más daño y suprimir a tus enemigos más rápido, pero a costa de precisión y munición. Solo debes rociar cuando estás muy cerca de tus enemigos, o cuando no tienes otra opción. También debe aprender los patrones de pulverización de diferentes armas y compensarlos moviendo el ratón en la dirección opuesta. </li>
|
102 |
-
|
103 |
-
</ul>
|
104 |
-
<h3>Cómo usar los auriculares y ajustar el volumen</h3>
|
105 |
-
<p>El sonido es otro aspecto importante del original de Counter-Strike 1.6, ya que puede ayudarte a escuchar y localizar a tus enemigos y aliados, así como otros sonidos como pasos, disparos, granadas, etc. Estos son algunos consejos para usar auriculares y ajustar tu volumen:</p>
|
106 |
-
<ul>
|
107 |
-
<li><b>Auriculares:</b> Siempre debe usar auriculares en lugar de altavoces al reproducir Counter-Strike 1.6 original, ya que los auriculares pueden proporcionar una mejor calidad de sonido y direccionalidad que los altavoces. Los auriculares también pueden bloquear los ruidos externos y las distracciones que pueden interferir con su juego. Usted debe elegir los auriculares que son cómodos y caben bien en sus oídos, y que tienen buen balance de graves y agudos. </li>
|
108 |
-
<li><b>Volumen:</b> Debes ajustar tu volumen a un nivel lo suficientemente alto para que puedas escuchar todos los sonidos del juego con claridad, pero no demasiado fuerte para que te duela los oídos o cause daño auditivo. También debes evitar usar cualquier potenciador de sonido o ecualizador que pueda distorsionar o alterar los sonidos originales del juego. </li>
|
109 |
-
<li><b>Configuración:</b> Deberías revisar y ajustar tu configuración de sonido en las opciones del juego o usando comandos de consola. Debes habilitar el sonido 3D o HRTF (función de transferencia relacionada con la cabeza) si está disponible, ya que pueden mejorar la conciencia espacial y el realismo de los sonidos en el juego. También debes desactivar cualquier música o sonido ambiental que pueda distraerte o molestarte durante el juego. </li>
|
110 |
-
</ul>
|
111 |
-
<h3>Cómo permanecer quieto cuando se dispara y moverse constantemente cuando no</h3>
|
112 |
-
<p>El movimiento es otra habilidad importante en Counter-Strike 1.6 original, ya que afecta a lo rápido y ágil que estás en el campo de batalla. Aquí hay algunos consejos para quedarse quieto al disparar y moverse constantemente cuando no:</p>
|
113 |
-
<ul>
|
114 |
-
|
115 |
-
<li><b>Muévete constantemente cuando no dispares:</b> Cuando no estés disparando, siempre debes seguir moviéndote alrededor del mapa, ya que quedarte quieto te hará un objetivo fácil para tus enemigos. Puede usar las teclas W, A, S, D para avanzar, izquierda, hacia atrás, derecha respectivamente. También puede utilizar las teclas Q y E para inclinarse hacia la izquierda o hacia la derecha, respectivamente. También puede utilizar el ratón para mirar a su alrededor y apuntar. Debes moverte de forma impredecible y aleatoria, y evitar correr en líneas rectas o permanecer en áreas abiertas. </li>
|
116 |
-
<li><b>Strafe:</b> Strafing es una técnica que consiste en moverse de lado mientras se mira hacia adelante. De esta manera, puedes esquivar el fuego enemigo y mantener tu puntería al mismo tiempo. Puede pulsar las teclas A o D mientras mueve el ratón en la dirección opuesta. También puede usar las teclas Q y E para inclinarse hacia la izquierda o hacia la derecha mientras se desvía. </li>
|
117 |
-
<li><b>Bunny hop:</b> Bunny hopping es una técnica que consiste en saltar repetidamente mientras se avanza. De esta manera, puede aumentar su velocidad y movilidad, y hacerse más difícil de golpear. Puede saltar saltando presionando las teclas W y barra espaciadora alternativamente mientras mueve el ratón ligeramente hacia la izquierda o la derecha. </li>
|
118 |
-
</ul>
|
119 |
-
<h3>Cómo disparar a través de las paredes y los puntos comunes de pre-fuego</h3>
|
120 |
-
<p>Disparar a través de paredes y puntos comunes previos al disparo son dos técnicas avanzadas que pueden darte una ventaja sobre tus enemigos en Counter-Strike 1.6 original. Aquí hay algunos consejos para disparar a través de las paredes y los puntos comunes pre-fuego:</p>
|
121 |
-
<ul>
|
122 |
-
<li><b>Dispara a través de las paredes:</b> Algunas paredes y objetos en Counter-Strike 1.6 original son penetrables, lo que significa que puedes disparar a través de ellos y golpear a tus enemigos detrás de ellos. Puedes usar esto para sorprender o dañar a tus enemigos sin exponerte. Puedes saber si una pared u objeto es penetrable disparándole y viendo si deja un agujero de bala o una marca. También puede utilizar el comando de consola r_decals 0 para eliminar todas las calcomanías del mapa, lo que facilita ver las paredes y objetos penetrables. </li>
|
123 |
-
|
124 |
-
</ul>
|
125 |
-
<h3>Cómo mantener la calma y tomar el tiro</h3>
|
126 |
-
<p>Mantener la calma y tomar el disparo son dos habilidades esenciales en Counter-Strike 1.6 original, ya que afectan a lo bien que se realiza bajo presión y lo seguro que está en sus habilidades. Aquí hay algunos consejos para mantener la calma y tomar la foto:</p>
|
127 |
-
<ul>
|
128 |
-
<li><b>Respirar:</b> La respiración es una forma simple pero efectiva de calmarse y relajar la mente y el cuerpo. Debe respirar profunda y lentamente, inhalando por la nariz y exhalando por la boca. También debes enfocarte en tu respiración e ignorar cualquier distracción o pensamientos negativos. </li>
|
129 |
-
<li><b>Piensa:</b> Pensar es una forma crucial pero a menudo pasada por alto para mejorar la toma de decisiones y las habilidades para resolver problemas. Debes pensar de forma lógica y estratégica, analizando la situación y sopesando los pros y los contras de cada opción. También debes pensar positiva y optimistamente, creyendo en ti mismo y en tus compañeros de equipo. </li>
|
130 |
-
<li><b>Act:</b> Actuar es la forma final pero más importante de ejecutar tu plan y lograr tu objetivo. Debes actuar con rapidez y confianza, confiando en tus instintos y habilidades. También debe actuar con calma y paciencia, esperando el momento adecuado y la oportunidad de tomar la foto. </li>
|
131 |
-
</ul>
|
132 |
-
<h2>Conclusión</h2>
|
133 |
-
<p>En conclusión, Counter-Strike 1.6 original es un juego que ofrece mucha diversión, desafío y emoción para los fans de FPS de todas las edades y niveles. Es un juego que requiere habilidad, estrategia, trabajo en equipo y práctica para dominar, pero también te recompensa con satisfacción, disfrute y mejora. Es un juego que tiene una rica historia, una comunidad leal y un futuro brillante. </p>
|
134 |
-
|
135 |
-
<p>Espero que haya encontrado este artículo útil e informativo. Si tiene alguna pregunta, comentario o retroalimentación, no dude en compartirlos conmigo. Me encantaría saber de ti y ayudarte. ¡Gracias por leer y jugar feliz! </p>
|
136 |
-
<h2>Preguntas frecuentes</h2>
|
137 |
-
<p>Aquí están algunas de las preguntas más frecuentes sobre Counter-Strike 1.6 original:</p>
|
138 |
-
<h3>P: ¿Cuál es la diferencia entre Counter-Strike 1.6 original y Counter-Strike: Fuente? </h3>
|
139 |
-
<p>A: Counter-Strike: Source es un remake de Counter-Strike 1.6 original que fue lanzado en 2004 por Valve. Utiliza el motor Source, que ofrece gráficos, física, sonido y jugabilidad mejorados. Sin embargo, algunos jugadores prefieren Counter-Strike 1.6 original por su diseño clásico y auténtico, jugabilidad y sensación. </p>
|
140 |
-
<h3>P: ¿Cuál es la diferencia entre Counter-Strike 1.6 original y Counter-Strike: Ofensiva Global? </h3>
|
141 |
-
<p>A: Counter-Strike: Global Offensive es la última entrega de la serie Counter-Strike que fue lanzada en 2012 por Valve. Cuenta con gráficos actualizados, modos, mapas, armas y personajes, así como nuevas características como pieles, rangos, matchmaking, etc. Sin embargo, algunos jugadores prefieren Counter-Strike 1.6 original por su simplicidad y nostalgia. </p>
|
142 |
-
<h3>Q: ¿Cómo puedo jugar Counter-Strike 1.6 original en un ordenador Mac o Linux? </h3>
|
143 |
-
<p>A: Desafortunadamente, Counter-Strike 1.6 original no está soportado oficialmente en computadoras Mac o Linux. Sin embargo, es posible que pueda jugarlo utilizando un programa como Wine o CrossOver que le permite ejecutar aplicaciones de Windows en computadoras Mac o Linux. Puede encontrar más información e instrucciones sobre cómo hacer esto en varios sitios web y foros en línea. </p>
|
144 |
-
<h3>Q: ¿Cómo puedo jugar Counter-Strike 1.6 original en un dispositivo móvil? </h3>
|
145 |
-
|
146 |
-
<h3>Q: ¿Cómo puedo jugar Counter-Strike 1.6 original con mods o mapas personalizados? </h3>
|
147 |
-
<p>A: Hay muchos mods y mapas personalizados que han sido creados por fans y desarrolladores para Counter-Strike 1.6 original a lo largo de los años. Estos mods y mapas pueden añadir nuevas características, modos, armas, personajes, etc. al juego, o cambiar su apariencia, jugabilidad o dificultad. Puede encontrar y descargar estos mods y mapas de varios sitios web y servidores en línea. También puedes crear tus propios mods y mapas usando herramientas como Hammer Editor o AMX Mod X.</p> 64aa2da5cf<br />
|
148 |
-
<br />
|
149 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/cachecontrol/adapter.py
DELETED
@@ -1,137 +0,0 @@
|
|
1 |
-
# SPDX-FileCopyrightText: 2015 Eric Larson
|
2 |
-
#
|
3 |
-
# SPDX-License-Identifier: Apache-2.0
|
4 |
-
|
5 |
-
import types
|
6 |
-
import functools
|
7 |
-
import zlib
|
8 |
-
|
9 |
-
from pip._vendor.requests.adapters import HTTPAdapter
|
10 |
-
|
11 |
-
from .controller import CacheController, PERMANENT_REDIRECT_STATUSES
|
12 |
-
from .cache import DictCache
|
13 |
-
from .filewrapper import CallbackFileWrapper
|
14 |
-
|
15 |
-
|
16 |
-
class CacheControlAdapter(HTTPAdapter):
|
17 |
-
invalidating_methods = {"PUT", "PATCH", "DELETE"}
|
18 |
-
|
19 |
-
def __init__(
|
20 |
-
self,
|
21 |
-
cache=None,
|
22 |
-
cache_etags=True,
|
23 |
-
controller_class=None,
|
24 |
-
serializer=None,
|
25 |
-
heuristic=None,
|
26 |
-
cacheable_methods=None,
|
27 |
-
*args,
|
28 |
-
**kw
|
29 |
-
):
|
30 |
-
super(CacheControlAdapter, self).__init__(*args, **kw)
|
31 |
-
self.cache = DictCache() if cache is None else cache
|
32 |
-
self.heuristic = heuristic
|
33 |
-
self.cacheable_methods = cacheable_methods or ("GET",)
|
34 |
-
|
35 |
-
controller_factory = controller_class or CacheController
|
36 |
-
self.controller = controller_factory(
|
37 |
-
self.cache, cache_etags=cache_etags, serializer=serializer
|
38 |
-
)
|
39 |
-
|
40 |
-
def send(self, request, cacheable_methods=None, **kw):
|
41 |
-
"""
|
42 |
-
Send a request. Use the request information to see if it
|
43 |
-
exists in the cache and cache the response if we need to and can.
|
44 |
-
"""
|
45 |
-
cacheable = cacheable_methods or self.cacheable_methods
|
46 |
-
if request.method in cacheable:
|
47 |
-
try:
|
48 |
-
cached_response = self.controller.cached_request(request)
|
49 |
-
except zlib.error:
|
50 |
-
cached_response = None
|
51 |
-
if cached_response:
|
52 |
-
return self.build_response(request, cached_response, from_cache=True)
|
53 |
-
|
54 |
-
# check for etags and add headers if appropriate
|
55 |
-
request.headers.update(self.controller.conditional_headers(request))
|
56 |
-
|
57 |
-
resp = super(CacheControlAdapter, self).send(request, **kw)
|
58 |
-
|
59 |
-
return resp
|
60 |
-
|
61 |
-
def build_response(
|
62 |
-
self, request, response, from_cache=False, cacheable_methods=None
|
63 |
-
):
|
64 |
-
"""
|
65 |
-
Build a response by making a request or using the cache.
|
66 |
-
|
67 |
-
This will end up calling send and returning a potentially
|
68 |
-
cached response
|
69 |
-
"""
|
70 |
-
cacheable = cacheable_methods or self.cacheable_methods
|
71 |
-
if not from_cache and request.method in cacheable:
|
72 |
-
# Check for any heuristics that might update headers
|
73 |
-
# before trying to cache.
|
74 |
-
if self.heuristic:
|
75 |
-
response = self.heuristic.apply(response)
|
76 |
-
|
77 |
-
# apply any expiration heuristics
|
78 |
-
if response.status == 304:
|
79 |
-
# We must have sent an ETag request. This could mean
|
80 |
-
# that we've been expired already or that we simply
|
81 |
-
# have an etag. In either case, we want to try and
|
82 |
-
# update the cache if that is the case.
|
83 |
-
cached_response = self.controller.update_cached_response(
|
84 |
-
request, response
|
85 |
-
)
|
86 |
-
|
87 |
-
if cached_response is not response:
|
88 |
-
from_cache = True
|
89 |
-
|
90 |
-
# We are done with the server response, read a
|
91 |
-
# possible response body (compliant servers will
|
92 |
-
# not return one, but we cannot be 100% sure) and
|
93 |
-
# release the connection back to the pool.
|
94 |
-
response.read(decode_content=False)
|
95 |
-
response.release_conn()
|
96 |
-
|
97 |
-
response = cached_response
|
98 |
-
|
99 |
-
# We always cache the 301 responses
|
100 |
-
elif int(response.status) in PERMANENT_REDIRECT_STATUSES:
|
101 |
-
self.controller.cache_response(request, response)
|
102 |
-
else:
|
103 |
-
# Wrap the response file with a wrapper that will cache the
|
104 |
-
# response when the stream has been consumed.
|
105 |
-
response._fp = CallbackFileWrapper(
|
106 |
-
response._fp,
|
107 |
-
functools.partial(
|
108 |
-
self.controller.cache_response, request, response
|
109 |
-
),
|
110 |
-
)
|
111 |
-
if response.chunked:
|
112 |
-
super_update_chunk_length = response._update_chunk_length
|
113 |
-
|
114 |
-
def _update_chunk_length(self):
|
115 |
-
super_update_chunk_length()
|
116 |
-
if self.chunk_left == 0:
|
117 |
-
self._fp._close()
|
118 |
-
|
119 |
-
response._update_chunk_length = types.MethodType(
|
120 |
-
_update_chunk_length, response
|
121 |
-
)
|
122 |
-
|
123 |
-
resp = super(CacheControlAdapter, self).build_response(request, response)
|
124 |
-
|
125 |
-
# See if we should invalidate the cache.
|
126 |
-
if request.method in self.invalidating_methods and resp.ok:
|
127 |
-
cache_url = self.controller.cache_url(request.url)
|
128 |
-
self.cache.delete(cache_url)
|
129 |
-
|
130 |
-
# Give the request a from_cache attr to let people use it
|
131 |
-
resp.from_cache = from_cache
|
132 |
-
|
133 |
-
return resp
|
134 |
-
|
135 |
-
def close(self):
|
136 |
-
self.cache.close()
|
137 |
-
super(CacheControlAdapter, self).close()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/codingstatemachine.py
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
######################## BEGIN LICENSE BLOCK ########################
|
2 |
-
# The Original Code is mozilla.org code.
|
3 |
-
#
|
4 |
-
# The Initial Developer of the Original Code is
|
5 |
-
# Netscape Communications Corporation.
|
6 |
-
# Portions created by the Initial Developer are Copyright (C) 1998
|
7 |
-
# the Initial Developer. All Rights Reserved.
|
8 |
-
#
|
9 |
-
# Contributor(s):
|
10 |
-
# Mark Pilgrim - port to Python
|
11 |
-
#
|
12 |
-
# This library is free software; you can redistribute it and/or
|
13 |
-
# modify it under the terms of the GNU Lesser General Public
|
14 |
-
# License as published by the Free Software Foundation; either
|
15 |
-
# version 2.1 of the License, or (at your option) any later version.
|
16 |
-
#
|
17 |
-
# This library is distributed in the hope that it will be useful,
|
18 |
-
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
19 |
-
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
20 |
-
# Lesser General Public License for more details.
|
21 |
-
#
|
22 |
-
# You should have received a copy of the GNU Lesser General Public
|
23 |
-
# License along with this library; if not, write to the Free Software
|
24 |
-
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
25 |
-
# 02110-1301 USA
|
26 |
-
######################### END LICENSE BLOCK #########################
|
27 |
-
|
28 |
-
import logging
|
29 |
-
|
30 |
-
from .codingstatemachinedict import CodingStateMachineDict
|
31 |
-
from .enums import MachineState
|
32 |
-
|
33 |
-
|
34 |
-
class CodingStateMachine:
|
35 |
-
"""
|
36 |
-
A state machine to verify a byte sequence for a particular encoding. For
|
37 |
-
each byte the detector receives, it will feed that byte to every active
|
38 |
-
state machine available, one byte at a time. The state machine changes its
|
39 |
-
state based on its previous state and the byte it receives. There are 3
|
40 |
-
states in a state machine that are of interest to an auto-detector:
|
41 |
-
|
42 |
-
START state: This is the state to start with, or a legal byte sequence
|
43 |
-
(i.e. a valid code point) for character has been identified.
|
44 |
-
|
45 |
-
ME state: This indicates that the state machine identified a byte sequence
|
46 |
-
that is specific to the charset it is designed for and that
|
47 |
-
there is no other possible encoding which can contain this byte
|
48 |
-
sequence. This will to lead to an immediate positive answer for
|
49 |
-
the detector.
|
50 |
-
|
51 |
-
ERROR state: This indicates the state machine identified an illegal byte
|
52 |
-
sequence for that encoding. This will lead to an immediate
|
53 |
-
negative answer for this encoding. Detector will exclude this
|
54 |
-
encoding from consideration from here on.
|
55 |
-
"""
|
56 |
-
|
57 |
-
def __init__(self, sm: CodingStateMachineDict) -> None:
|
58 |
-
self._model = sm
|
59 |
-
self._curr_byte_pos = 0
|
60 |
-
self._curr_char_len = 0
|
61 |
-
self._curr_state = MachineState.START
|
62 |
-
self.active = True
|
63 |
-
self.logger = logging.getLogger(__name__)
|
64 |
-
self.reset()
|
65 |
-
|
66 |
-
def reset(self) -> None:
|
67 |
-
self._curr_state = MachineState.START
|
68 |
-
|
69 |
-
def next_state(self, c: int) -> int:
|
70 |
-
# for each byte we get its class
|
71 |
-
# if it is first byte, we also get byte length
|
72 |
-
byte_class = self._model["class_table"][c]
|
73 |
-
if self._curr_state == MachineState.START:
|
74 |
-
self._curr_byte_pos = 0
|
75 |
-
self._curr_char_len = self._model["char_len_table"][byte_class]
|
76 |
-
# from byte's class and state_table, we get its next state
|
77 |
-
curr_state = self._curr_state * self._model["class_factor"] + byte_class
|
78 |
-
self._curr_state = self._model["state_table"][curr_state]
|
79 |
-
self._curr_byte_pos += 1
|
80 |
-
return self._curr_state
|
81 |
-
|
82 |
-
def get_current_charlen(self) -> int:
|
83 |
-
return self._curr_char_len
|
84 |
-
|
85 |
-
def get_coding_state_machine(self) -> str:
|
86 |
-
return self._model["name"]
|
87 |
-
|
88 |
-
@property
|
89 |
-
def language(self) -> str:
|
90 |
-
return self._model["language"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/repr.py
DELETED
@@ -1,149 +0,0 @@
|
|
1 |
-
import inspect
|
2 |
-
from functools import partial
|
3 |
-
from typing import (
|
4 |
-
Any,
|
5 |
-
Callable,
|
6 |
-
Iterable,
|
7 |
-
List,
|
8 |
-
Optional,
|
9 |
-
Tuple,
|
10 |
-
Type,
|
11 |
-
TypeVar,
|
12 |
-
Union,
|
13 |
-
overload,
|
14 |
-
)
|
15 |
-
|
16 |
-
T = TypeVar("T")
|
17 |
-
|
18 |
-
|
19 |
-
Result = Iterable[Union[Any, Tuple[Any], Tuple[str, Any], Tuple[str, Any, Any]]]
|
20 |
-
RichReprResult = Result
|
21 |
-
|
22 |
-
|
23 |
-
class ReprError(Exception):
|
24 |
-
"""An error occurred when attempting to build a repr."""
|
25 |
-
|
26 |
-
|
27 |
-
@overload
|
28 |
-
def auto(cls: Optional[Type[T]]) -> Type[T]:
|
29 |
-
...
|
30 |
-
|
31 |
-
|
32 |
-
@overload
|
33 |
-
def auto(*, angular: bool = False) -> Callable[[Type[T]], Type[T]]:
|
34 |
-
...
|
35 |
-
|
36 |
-
|
37 |
-
def auto(
|
38 |
-
cls: Optional[Type[T]] = None, *, angular: Optional[bool] = None
|
39 |
-
) -> Union[Type[T], Callable[[Type[T]], Type[T]]]:
|
40 |
-
"""Class decorator to create __repr__ from __rich_repr__"""
|
41 |
-
|
42 |
-
def do_replace(cls: Type[T], angular: Optional[bool] = None) -> Type[T]:
|
43 |
-
def auto_repr(self: T) -> str:
|
44 |
-
"""Create repr string from __rich_repr__"""
|
45 |
-
repr_str: List[str] = []
|
46 |
-
append = repr_str.append
|
47 |
-
|
48 |
-
angular: bool = getattr(self.__rich_repr__, "angular", False) # type: ignore[attr-defined]
|
49 |
-
for arg in self.__rich_repr__(): # type: ignore[attr-defined]
|
50 |
-
if isinstance(arg, tuple):
|
51 |
-
if len(arg) == 1:
|
52 |
-
append(repr(arg[0]))
|
53 |
-
else:
|
54 |
-
key, value, *default = arg
|
55 |
-
if key is None:
|
56 |
-
append(repr(value))
|
57 |
-
else:
|
58 |
-
if default and default[0] == value:
|
59 |
-
continue
|
60 |
-
append(f"{key}={value!r}")
|
61 |
-
else:
|
62 |
-
append(repr(arg))
|
63 |
-
if angular:
|
64 |
-
return f"<{self.__class__.__name__} {' '.join(repr_str)}>"
|
65 |
-
else:
|
66 |
-
return f"{self.__class__.__name__}({', '.join(repr_str)})"
|
67 |
-
|
68 |
-
def auto_rich_repr(self: Type[T]) -> Result:
|
69 |
-
"""Auto generate __rich_rep__ from signature of __init__"""
|
70 |
-
try:
|
71 |
-
signature = inspect.signature(self.__init__)
|
72 |
-
for name, param in signature.parameters.items():
|
73 |
-
if param.kind == param.POSITIONAL_ONLY:
|
74 |
-
yield getattr(self, name)
|
75 |
-
elif param.kind in (
|
76 |
-
param.POSITIONAL_OR_KEYWORD,
|
77 |
-
param.KEYWORD_ONLY,
|
78 |
-
):
|
79 |
-
if param.default == param.empty:
|
80 |
-
yield getattr(self, param.name)
|
81 |
-
else:
|
82 |
-
yield param.name, getattr(self, param.name), param.default
|
83 |
-
except Exception as error:
|
84 |
-
raise ReprError(
|
85 |
-
f"Failed to auto generate __rich_repr__; {error}"
|
86 |
-
) from None
|
87 |
-
|
88 |
-
if not hasattr(cls, "__rich_repr__"):
|
89 |
-
auto_rich_repr.__doc__ = "Build a rich repr"
|
90 |
-
cls.__rich_repr__ = auto_rich_repr # type: ignore[attr-defined]
|
91 |
-
|
92 |
-
auto_repr.__doc__ = "Return repr(self)"
|
93 |
-
cls.__repr__ = auto_repr # type: ignore[assignment]
|
94 |
-
if angular is not None:
|
95 |
-
cls.__rich_repr__.angular = angular # type: ignore[attr-defined]
|
96 |
-
return cls
|
97 |
-
|
98 |
-
if cls is None:
|
99 |
-
return partial(do_replace, angular=angular)
|
100 |
-
else:
|
101 |
-
return do_replace(cls, angular=angular)
|
102 |
-
|
103 |
-
|
104 |
-
@overload
|
105 |
-
def rich_repr(cls: Optional[Type[T]]) -> Type[T]:
|
106 |
-
...
|
107 |
-
|
108 |
-
|
109 |
-
@overload
|
110 |
-
def rich_repr(*, angular: bool = False) -> Callable[[Type[T]], Type[T]]:
|
111 |
-
...
|
112 |
-
|
113 |
-
|
114 |
-
def rich_repr(
|
115 |
-
cls: Optional[Type[T]] = None, *, angular: bool = False
|
116 |
-
) -> Union[Type[T], Callable[[Type[T]], Type[T]]]:
|
117 |
-
if cls is None:
|
118 |
-
return auto(angular=angular)
|
119 |
-
else:
|
120 |
-
return auto(cls)
|
121 |
-
|
122 |
-
|
123 |
-
if __name__ == "__main__":
|
124 |
-
|
125 |
-
@auto
|
126 |
-
class Foo:
|
127 |
-
def __rich_repr__(self) -> Result:
|
128 |
-
yield "foo"
|
129 |
-
yield "bar", {"shopping": ["eggs", "ham", "pineapple"]}
|
130 |
-
yield "buy", "hand sanitizer"
|
131 |
-
|
132 |
-
foo = Foo()
|
133 |
-
from pip._vendor.rich.console import Console
|
134 |
-
|
135 |
-
console = Console()
|
136 |
-
|
137 |
-
console.rule("Standard repr")
|
138 |
-
console.print(foo)
|
139 |
-
|
140 |
-
console.print(foo, width=60)
|
141 |
-
console.print(foo, width=30)
|
142 |
-
|
143 |
-
console.rule("Angular repr")
|
144 |
-
Foo.__rich_repr__.angular = True # type: ignore[attr-defined]
|
145 |
-
|
146 |
-
console.print(foo)
|
147 |
-
|
148 |
-
console.print(foo, width=60)
|
149 |
-
console.print(foo, width=30)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/command/__init__.py
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
from distutils.command.bdist import bdist
|
2 |
-
import sys
|
3 |
-
|
4 |
-
if 'egg' not in bdist.format_commands:
|
5 |
-
try:
|
6 |
-
bdist.format_commands['egg'] = ('bdist_egg', "Python .egg file")
|
7 |
-
except TypeError:
|
8 |
-
# For backward compatibility with older distutils (stdlib)
|
9 |
-
bdist.format_command['egg'] = ('bdist_egg', "Python .egg file")
|
10 |
-
bdist.format_commands.append('egg')
|
11 |
-
|
12 |
-
del bdist, sys
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/C6AI/HDRL/Dockerfile
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
FROM ghcr.io/livebook-dev/livebook:latest-cuda11.8
|
2 |
-
|
3 |
-
ENV LIVEBOOK_APP_SERVICE_NAME "🐳 Hugging Face - $SPACE_TITLE"
|
4 |
-
ENV LIVEBOOK_APP_SERVICE_URL "https://huggingface.co/spaces/$SPACE_AUTHOR_NAME/$SPACE_REPO_NAME"
|
5 |
-
ENV LIVEBOOK_UPDATE_INSTRUCTIONS_URL "https://livebook.dev"
|
6 |
-
ENV LIVEBOOK_WITHIN_IFRAME "true"
|
7 |
-
ENV LIVEBOOK_DATA_PATH "/data"
|
8 |
-
ENV LIVEBOOK_PORT 7860
|
9 |
-
|
10 |
-
EXPOSE 7860
|
11 |
-
|
12 |
-
USER root
|
13 |
-
RUN mkdir -p /data
|
14 |
-
RUN chmod 777 /data
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/memory/detail/host_system_resource.h
DELETED
@@ -1,33 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2018 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// #include the host system's memory_resource header
|
22 |
-
#define __THRUST_HOST_SYSTEM_MEMORY_HEADER <__THRUST_HOST_SYSTEM_ROOT/memory_resource.h>
|
23 |
-
#include __THRUST_HOST_SYSTEM_MEMORY_HEADER
|
24 |
-
#undef __THRUST_HOST_SYSTEM_MEMORY_HEADER
|
25 |
-
|
26 |
-
namespace thrust
|
27 |
-
{
|
28 |
-
|
29 |
-
typedef thrust::system::__THRUST_HOST_SYSTEM_NAMESPACE::memory_resource
|
30 |
-
host_memory_resource;
|
31 |
-
|
32 |
-
} // end thrust
|
33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/GroundingDINO/backbone/backbone.py
DELETED
@@ -1,221 +0,0 @@
|
|
1 |
-
# ------------------------------------------------------------------------
|
2 |
-
# Grounding DINO
|
3 |
-
# url: https://github.com/IDEA-Research/GroundingDINO
|
4 |
-
# Copyright (c) 2023 IDEA. All Rights Reserved.
|
5 |
-
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
|
6 |
-
# ------------------------------------------------------------------------
|
7 |
-
# Conditional DETR
|
8 |
-
# Copyright (c) 2021 Microsoft. All Rights Reserved.
|
9 |
-
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
|
10 |
-
# ------------------------------------------------------------------------
|
11 |
-
# Copied from DETR (https://github.com/facebookresearch/detr)
|
12 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
13 |
-
# ------------------------------------------------------------------------
|
14 |
-
|
15 |
-
"""
|
16 |
-
Backbone modules.
|
17 |
-
"""
|
18 |
-
|
19 |
-
from typing import Dict, List
|
20 |
-
|
21 |
-
import torch
|
22 |
-
import torch.nn.functional as F
|
23 |
-
import torchvision
|
24 |
-
from torch import nn
|
25 |
-
from torchvision.models._utils import IntermediateLayerGetter
|
26 |
-
|
27 |
-
from groundingdino.util.misc import NestedTensor, clean_state_dict, is_main_process
|
28 |
-
|
29 |
-
from .position_encoding import build_position_encoding
|
30 |
-
from .swin_transformer import build_swin_transformer
|
31 |
-
|
32 |
-
|
33 |
-
class FrozenBatchNorm2d(torch.nn.Module):
|
34 |
-
"""
|
35 |
-
BatchNorm2d where the batch statistics and the affine parameters are fixed.
|
36 |
-
|
37 |
-
Copy-paste from torchvision.misc.ops with added eps before rqsrt,
|
38 |
-
without which any other models than torchvision.models.resnet[18,34,50,101]
|
39 |
-
produce nans.
|
40 |
-
"""
|
41 |
-
|
42 |
-
def __init__(self, n):
|
43 |
-
super(FrozenBatchNorm2d, self).__init__()
|
44 |
-
self.register_buffer("weight", torch.ones(n))
|
45 |
-
self.register_buffer("bias", torch.zeros(n))
|
46 |
-
self.register_buffer("running_mean", torch.zeros(n))
|
47 |
-
self.register_buffer("running_var", torch.ones(n))
|
48 |
-
|
49 |
-
def _load_from_state_dict(
|
50 |
-
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
|
51 |
-
):
|
52 |
-
num_batches_tracked_key = prefix + "num_batches_tracked"
|
53 |
-
if num_batches_tracked_key in state_dict:
|
54 |
-
del state_dict[num_batches_tracked_key]
|
55 |
-
|
56 |
-
super(FrozenBatchNorm2d, self)._load_from_state_dict(
|
57 |
-
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
|
58 |
-
)
|
59 |
-
|
60 |
-
def forward(self, x):
|
61 |
-
# move reshapes to the beginning
|
62 |
-
# to make it fuser-friendly
|
63 |
-
w = self.weight.reshape(1, -1, 1, 1)
|
64 |
-
b = self.bias.reshape(1, -1, 1, 1)
|
65 |
-
rv = self.running_var.reshape(1, -1, 1, 1)
|
66 |
-
rm = self.running_mean.reshape(1, -1, 1, 1)
|
67 |
-
eps = 1e-5
|
68 |
-
scale = w * (rv + eps).rsqrt()
|
69 |
-
bias = b - rm * scale
|
70 |
-
return x * scale + bias
|
71 |
-
|
72 |
-
|
73 |
-
class BackboneBase(nn.Module):
|
74 |
-
def __init__(
|
75 |
-
self,
|
76 |
-
backbone: nn.Module,
|
77 |
-
train_backbone: bool,
|
78 |
-
num_channels: int,
|
79 |
-
return_interm_indices: list,
|
80 |
-
):
|
81 |
-
super().__init__()
|
82 |
-
for name, parameter in backbone.named_parameters():
|
83 |
-
if (
|
84 |
-
not train_backbone
|
85 |
-
or "layer2" not in name
|
86 |
-
and "layer3" not in name
|
87 |
-
and "layer4" not in name
|
88 |
-
):
|
89 |
-
parameter.requires_grad_(False)
|
90 |
-
|
91 |
-
return_layers = {}
|
92 |
-
for idx, layer_index in enumerate(return_interm_indices):
|
93 |
-
return_layers.update(
|
94 |
-
{"layer{}".format(5 - len(return_interm_indices) + idx): "{}".format(layer_index)}
|
95 |
-
)
|
96 |
-
|
97 |
-
# if len:
|
98 |
-
# if use_stage1_feature:
|
99 |
-
# return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
|
100 |
-
# else:
|
101 |
-
# return_layers = {"layer2": "0", "layer3": "1", "layer4": "2"}
|
102 |
-
# else:
|
103 |
-
# return_layers = {'layer4': "0"}
|
104 |
-
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
|
105 |
-
self.num_channels = num_channels
|
106 |
-
|
107 |
-
def forward(self, tensor_list: NestedTensor):
|
108 |
-
xs = self.body(tensor_list.tensors)
|
109 |
-
out: Dict[str, NestedTensor] = {}
|
110 |
-
for name, x in xs.items():
|
111 |
-
m = tensor_list.mask
|
112 |
-
assert m is not None
|
113 |
-
mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
|
114 |
-
out[name] = NestedTensor(x, mask)
|
115 |
-
# import ipdb; ipdb.set_trace()
|
116 |
-
return out
|
117 |
-
|
118 |
-
|
119 |
-
class Backbone(BackboneBase):
|
120 |
-
"""ResNet backbone with frozen BatchNorm."""
|
121 |
-
|
122 |
-
def __init__(
|
123 |
-
self,
|
124 |
-
name: str,
|
125 |
-
train_backbone: bool,
|
126 |
-
dilation: bool,
|
127 |
-
return_interm_indices: list,
|
128 |
-
batch_norm=FrozenBatchNorm2d,
|
129 |
-
):
|
130 |
-
if name in ["resnet18", "resnet34", "resnet50", "resnet101"]:
|
131 |
-
backbone = getattr(torchvision.models, name)(
|
132 |
-
replace_stride_with_dilation=[False, False, dilation],
|
133 |
-
pretrained=is_main_process(),
|
134 |
-
norm_layer=batch_norm,
|
135 |
-
)
|
136 |
-
else:
|
137 |
-
raise NotImplementedError("Why you can get here with name {}".format(name))
|
138 |
-
# num_channels = 512 if name in ('resnet18', 'resnet34') else 2048
|
139 |
-
assert name not in ("resnet18", "resnet34"), "Only resnet50 and resnet101 are available."
|
140 |
-
assert return_interm_indices in [[0, 1, 2, 3], [1, 2, 3], [3]]
|
141 |
-
num_channels_all = [256, 512, 1024, 2048]
|
142 |
-
num_channels = num_channels_all[4 - len(return_interm_indices) :]
|
143 |
-
super().__init__(backbone, train_backbone, num_channels, return_interm_indices)
|
144 |
-
|
145 |
-
|
146 |
-
class Joiner(nn.Sequential):
|
147 |
-
def __init__(self, backbone, position_embedding):
|
148 |
-
super().__init__(backbone, position_embedding)
|
149 |
-
|
150 |
-
def forward(self, tensor_list: NestedTensor):
|
151 |
-
xs = self[0](tensor_list)
|
152 |
-
out: List[NestedTensor] = []
|
153 |
-
pos = []
|
154 |
-
for name, x in xs.items():
|
155 |
-
out.append(x)
|
156 |
-
# position encoding
|
157 |
-
pos.append(self[1](x).to(x.tensors.dtype))
|
158 |
-
|
159 |
-
return out, pos
|
160 |
-
|
161 |
-
|
162 |
-
def build_backbone(args):
|
163 |
-
"""
|
164 |
-
Useful args:
|
165 |
-
- backbone: backbone name
|
166 |
-
- lr_backbone:
|
167 |
-
- dilation
|
168 |
-
- return_interm_indices: available: [0,1,2,3], [1,2,3], [3]
|
169 |
-
- backbone_freeze_keywords:
|
170 |
-
- use_checkpoint: for swin only for now
|
171 |
-
|
172 |
-
"""
|
173 |
-
position_embedding = build_position_encoding(args)
|
174 |
-
train_backbone = True
|
175 |
-
if not train_backbone:
|
176 |
-
raise ValueError("Please set lr_backbone > 0")
|
177 |
-
return_interm_indices = args.return_interm_indices
|
178 |
-
assert return_interm_indices in [[0, 1, 2, 3], [1, 2, 3], [3]]
|
179 |
-
args.backbone_freeze_keywords
|
180 |
-
use_checkpoint = getattr(args, "use_checkpoint", False)
|
181 |
-
|
182 |
-
if args.backbone in ["resnet50", "resnet101"]:
|
183 |
-
backbone = Backbone(
|
184 |
-
args.backbone,
|
185 |
-
train_backbone,
|
186 |
-
args.dilation,
|
187 |
-
return_interm_indices,
|
188 |
-
batch_norm=FrozenBatchNorm2d,
|
189 |
-
)
|
190 |
-
bb_num_channels = backbone.num_channels
|
191 |
-
elif args.backbone in [
|
192 |
-
"swin_T_224_1k",
|
193 |
-
"swin_B_224_22k",
|
194 |
-
"swin_B_384_22k",
|
195 |
-
"swin_L_224_22k",
|
196 |
-
"swin_L_384_22k",
|
197 |
-
]:
|
198 |
-
pretrain_img_size = int(args.backbone.split("_")[-2])
|
199 |
-
backbone = build_swin_transformer(
|
200 |
-
args.backbone,
|
201 |
-
pretrain_img_size=pretrain_img_size,
|
202 |
-
out_indices=tuple(return_interm_indices),
|
203 |
-
dilation=False,
|
204 |
-
use_checkpoint=use_checkpoint,
|
205 |
-
)
|
206 |
-
|
207 |
-
bb_num_channels = backbone.num_features[4 - len(return_interm_indices) :]
|
208 |
-
else:
|
209 |
-
raise NotImplementedError("Unknown backbone {}".format(args.backbone))
|
210 |
-
|
211 |
-
assert len(bb_num_channels) == len(
|
212 |
-
return_interm_indices
|
213 |
-
), f"len(bb_num_channels) {len(bb_num_channels)} != len(return_interm_indices) {len(return_interm_indices)}"
|
214 |
-
|
215 |
-
model = Joiner(backbone, position_embedding)
|
216 |
-
model.num_channels = bb_num_channels
|
217 |
-
assert isinstance(
|
218 |
-
bb_num_channels, List
|
219 |
-
), "bb_num_channels is expected to be a List but {}".format(type(bb_num_channels))
|
220 |
-
# import ipdb; ipdb.set_trace()
|
221 |
-
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DCandE/rvc-models/infer_pack/models.py
DELETED
@@ -1,982 +0,0 @@
|
|
1 |
-
import math, pdb, os
|
2 |
-
from time import time as ttime
|
3 |
-
import torch
|
4 |
-
from torch import nn
|
5 |
-
from torch.nn import functional as F
|
6 |
-
from infer_pack import modules
|
7 |
-
from infer_pack import attentions
|
8 |
-
from infer_pack import commons
|
9 |
-
from infer_pack.commons import init_weights, get_padding
|
10 |
-
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
|
11 |
-
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
|
12 |
-
from infer_pack.commons import init_weights
|
13 |
-
import numpy as np
|
14 |
-
from infer_pack import commons
|
15 |
-
|
16 |
-
|
17 |
-
class TextEncoder256(nn.Module):
|
18 |
-
def __init__(
|
19 |
-
self,
|
20 |
-
out_channels,
|
21 |
-
hidden_channels,
|
22 |
-
filter_channels,
|
23 |
-
n_heads,
|
24 |
-
n_layers,
|
25 |
-
kernel_size,
|
26 |
-
p_dropout,
|
27 |
-
f0=True,
|
28 |
-
):
|
29 |
-
super().__init__()
|
30 |
-
self.out_channels = out_channels
|
31 |
-
self.hidden_channels = hidden_channels
|
32 |
-
self.filter_channels = filter_channels
|
33 |
-
self.n_heads = n_heads
|
34 |
-
self.n_layers = n_layers
|
35 |
-
self.kernel_size = kernel_size
|
36 |
-
self.p_dropout = p_dropout
|
37 |
-
self.emb_phone = nn.Linear(256, hidden_channels)
|
38 |
-
self.lrelu = nn.LeakyReLU(0.1, inplace=True)
|
39 |
-
if f0 == True:
|
40 |
-
self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
|
41 |
-
self.encoder = attentions.Encoder(
|
42 |
-
hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
|
43 |
-
)
|
44 |
-
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
45 |
-
|
46 |
-
def forward(self, phone, pitch, lengths):
|
47 |
-
if pitch == None:
|
48 |
-
x = self.emb_phone(phone)
|
49 |
-
else:
|
50 |
-
x = self.emb_phone(phone) + self.emb_pitch(pitch)
|
51 |
-
x = x * math.sqrt(self.hidden_channels) # [b, t, h]
|
52 |
-
x = self.lrelu(x)
|
53 |
-
x = torch.transpose(x, 1, -1) # [b, h, t]
|
54 |
-
x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
|
55 |
-
x.dtype
|
56 |
-
)
|
57 |
-
x = self.encoder(x * x_mask, x_mask)
|
58 |
-
stats = self.proj(x) * x_mask
|
59 |
-
|
60 |
-
m, logs = torch.split(stats, self.out_channels, dim=1)
|
61 |
-
return m, logs, x_mask
|
62 |
-
|
63 |
-
|
64 |
-
class TextEncoder256Sim(nn.Module):
|
65 |
-
def __init__(
|
66 |
-
self,
|
67 |
-
out_channels,
|
68 |
-
hidden_channels,
|
69 |
-
filter_channels,
|
70 |
-
n_heads,
|
71 |
-
n_layers,
|
72 |
-
kernel_size,
|
73 |
-
p_dropout,
|
74 |
-
f0=True,
|
75 |
-
):
|
76 |
-
super().__init__()
|
77 |
-
self.out_channels = out_channels
|
78 |
-
self.hidden_channels = hidden_channels
|
79 |
-
self.filter_channels = filter_channels
|
80 |
-
self.n_heads = n_heads
|
81 |
-
self.n_layers = n_layers
|
82 |
-
self.kernel_size = kernel_size
|
83 |
-
self.p_dropout = p_dropout
|
84 |
-
self.emb_phone = nn.Linear(256, hidden_channels)
|
85 |
-
self.lrelu = nn.LeakyReLU(0.1, inplace=True)
|
86 |
-
if f0 == True:
|
87 |
-
self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
|
88 |
-
self.encoder = attentions.Encoder(
|
89 |
-
hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
|
90 |
-
)
|
91 |
-
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
|
92 |
-
|
93 |
-
def forward(self, phone, pitch, lengths):
|
94 |
-
if pitch == None:
|
95 |
-
x = self.emb_phone(phone)
|
96 |
-
else:
|
97 |
-
x = self.emb_phone(phone) + self.emb_pitch(pitch)
|
98 |
-
x = x * math.sqrt(self.hidden_channels) # [b, t, h]
|
99 |
-
x = self.lrelu(x)
|
100 |
-
x = torch.transpose(x, 1, -1) # [b, h, t]
|
101 |
-
x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
|
102 |
-
x.dtype
|
103 |
-
)
|
104 |
-
x = self.encoder(x * x_mask, x_mask)
|
105 |
-
x = self.proj(x) * x_mask
|
106 |
-
return x, x_mask
|
107 |
-
|
108 |
-
|
109 |
-
class ResidualCouplingBlock(nn.Module):
|
110 |
-
def __init__(
|
111 |
-
self,
|
112 |
-
channels,
|
113 |
-
hidden_channels,
|
114 |
-
kernel_size,
|
115 |
-
dilation_rate,
|
116 |
-
n_layers,
|
117 |
-
n_flows=4,
|
118 |
-
gin_channels=0,
|
119 |
-
):
|
120 |
-
super().__init__()
|
121 |
-
self.channels = channels
|
122 |
-
self.hidden_channels = hidden_channels
|
123 |
-
self.kernel_size = kernel_size
|
124 |
-
self.dilation_rate = dilation_rate
|
125 |
-
self.n_layers = n_layers
|
126 |
-
self.n_flows = n_flows
|
127 |
-
self.gin_channels = gin_channels
|
128 |
-
|
129 |
-
self.flows = nn.ModuleList()
|
130 |
-
for i in range(n_flows):
|
131 |
-
self.flows.append(
|
132 |
-
modules.ResidualCouplingLayer(
|
133 |
-
channels,
|
134 |
-
hidden_channels,
|
135 |
-
kernel_size,
|
136 |
-
dilation_rate,
|
137 |
-
n_layers,
|
138 |
-
gin_channels=gin_channels,
|
139 |
-
mean_only=True,
|
140 |
-
)
|
141 |
-
)
|
142 |
-
self.flows.append(modules.Flip())
|
143 |
-
|
144 |
-
def forward(self, x, x_mask, g=None, reverse=False):
|
145 |
-
if not reverse:
|
146 |
-
for flow in self.flows:
|
147 |
-
x, _ = flow(x, x_mask, g=g, reverse=reverse)
|
148 |
-
else:
|
149 |
-
for flow in reversed(self.flows):
|
150 |
-
x = flow(x, x_mask, g=g, reverse=reverse)
|
151 |
-
return x
|
152 |
-
|
153 |
-
def remove_weight_norm(self):
|
154 |
-
for i in range(self.n_flows):
|
155 |
-
self.flows[i * 2].remove_weight_norm()
|
156 |
-
|
157 |
-
|
158 |
-
class PosteriorEncoder(nn.Module):
|
159 |
-
def __init__(
|
160 |
-
self,
|
161 |
-
in_channels,
|
162 |
-
out_channels,
|
163 |
-
hidden_channels,
|
164 |
-
kernel_size,
|
165 |
-
dilation_rate,
|
166 |
-
n_layers,
|
167 |
-
gin_channels=0,
|
168 |
-
):
|
169 |
-
super().__init__()
|
170 |
-
self.in_channels = in_channels
|
171 |
-
self.out_channels = out_channels
|
172 |
-
self.hidden_channels = hidden_channels
|
173 |
-
self.kernel_size = kernel_size
|
174 |
-
self.dilation_rate = dilation_rate
|
175 |
-
self.n_layers = n_layers
|
176 |
-
self.gin_channels = gin_channels
|
177 |
-
|
178 |
-
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
|
179 |
-
self.enc = modules.WN(
|
180 |
-
hidden_channels,
|
181 |
-
kernel_size,
|
182 |
-
dilation_rate,
|
183 |
-
n_layers,
|
184 |
-
gin_channels=gin_channels,
|
185 |
-
)
|
186 |
-
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
187 |
-
|
188 |
-
def forward(self, x, x_lengths, g=None):
|
189 |
-
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
|
190 |
-
x.dtype
|
191 |
-
)
|
192 |
-
x = self.pre(x) * x_mask
|
193 |
-
x = self.enc(x, x_mask, g=g)
|
194 |
-
stats = self.proj(x) * x_mask
|
195 |
-
m, logs = torch.split(stats, self.out_channels, dim=1)
|
196 |
-
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
|
197 |
-
return z, m, logs, x_mask
|
198 |
-
|
199 |
-
def remove_weight_norm(self):
|
200 |
-
self.enc.remove_weight_norm()
|
201 |
-
|
202 |
-
|
203 |
-
class Generator(torch.nn.Module):
|
204 |
-
def __init__(
|
205 |
-
self,
|
206 |
-
initial_channel,
|
207 |
-
resblock,
|
208 |
-
resblock_kernel_sizes,
|
209 |
-
resblock_dilation_sizes,
|
210 |
-
upsample_rates,
|
211 |
-
upsample_initial_channel,
|
212 |
-
upsample_kernel_sizes,
|
213 |
-
gin_channels=0,
|
214 |
-
):
|
215 |
-
super(Generator, self).__init__()
|
216 |
-
self.num_kernels = len(resblock_kernel_sizes)
|
217 |
-
self.num_upsamples = len(upsample_rates)
|
218 |
-
self.conv_pre = Conv1d(
|
219 |
-
initial_channel, upsample_initial_channel, 7, 1, padding=3
|
220 |
-
)
|
221 |
-
resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
|
222 |
-
|
223 |
-
self.ups = nn.ModuleList()
|
224 |
-
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
225 |
-
self.ups.append(
|
226 |
-
weight_norm(
|
227 |
-
ConvTranspose1d(
|
228 |
-
upsample_initial_channel // (2**i),
|
229 |
-
upsample_initial_channel // (2 ** (i + 1)),
|
230 |
-
k,
|
231 |
-
u,
|
232 |
-
padding=(k - u) // 2,
|
233 |
-
)
|
234 |
-
)
|
235 |
-
)
|
236 |
-
|
237 |
-
self.resblocks = nn.ModuleList()
|
238 |
-
for i in range(len(self.ups)):
|
239 |
-
ch = upsample_initial_channel // (2 ** (i + 1))
|
240 |
-
for j, (k, d) in enumerate(
|
241 |
-
zip(resblock_kernel_sizes, resblock_dilation_sizes)
|
242 |
-
):
|
243 |
-
self.resblocks.append(resblock(ch, k, d))
|
244 |
-
|
245 |
-
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
246 |
-
self.ups.apply(init_weights)
|
247 |
-
|
248 |
-
if gin_channels != 0:
|
249 |
-
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
250 |
-
|
251 |
-
def forward(self, x, g=None):
|
252 |
-
x = self.conv_pre(x)
|
253 |
-
if g is not None:
|
254 |
-
x = x + self.cond(g)
|
255 |
-
|
256 |
-
for i in range(self.num_upsamples):
|
257 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
258 |
-
x = self.ups[i](x)
|
259 |
-
xs = None
|
260 |
-
for j in range(self.num_kernels):
|
261 |
-
if xs is None:
|
262 |
-
xs = self.resblocks[i * self.num_kernels + j](x)
|
263 |
-
else:
|
264 |
-
xs += self.resblocks[i * self.num_kernels + j](x)
|
265 |
-
x = xs / self.num_kernels
|
266 |
-
x = F.leaky_relu(x)
|
267 |
-
x = self.conv_post(x)
|
268 |
-
x = torch.tanh(x)
|
269 |
-
|
270 |
-
return x
|
271 |
-
|
272 |
-
def remove_weight_norm(self):
|
273 |
-
for l in self.ups:
|
274 |
-
remove_weight_norm(l)
|
275 |
-
for l in self.resblocks:
|
276 |
-
l.remove_weight_norm()
|
277 |
-
|
278 |
-
|
279 |
-
class SineGen(torch.nn.Module):
|
280 |
-
"""Definition of sine generator
|
281 |
-
SineGen(samp_rate, harmonic_num = 0,
|
282 |
-
sine_amp = 0.1, noise_std = 0.003,
|
283 |
-
voiced_threshold = 0,
|
284 |
-
flag_for_pulse=False)
|
285 |
-
samp_rate: sampling rate in Hz
|
286 |
-
harmonic_num: number of harmonic overtones (default 0)
|
287 |
-
sine_amp: amplitude of sine-wavefrom (default 0.1)
|
288 |
-
noise_std: std of Gaussian noise (default 0.003)
|
289 |
-
voiced_thoreshold: F0 threshold for U/V classification (default 0)
|
290 |
-
flag_for_pulse: this SinGen is used inside PulseGen (default False)
|
291 |
-
Note: when flag_for_pulse is True, the first time step of a voiced
|
292 |
-
segment is always sin(np.pi) or cos(0)
|
293 |
-
"""
|
294 |
-
|
295 |
-
def __init__(
|
296 |
-
self,
|
297 |
-
samp_rate,
|
298 |
-
harmonic_num=0,
|
299 |
-
sine_amp=0.1,
|
300 |
-
noise_std=0.003,
|
301 |
-
voiced_threshold=0,
|
302 |
-
flag_for_pulse=False,
|
303 |
-
):
|
304 |
-
super(SineGen, self).__init__()
|
305 |
-
self.sine_amp = sine_amp
|
306 |
-
self.noise_std = noise_std
|
307 |
-
self.harmonic_num = harmonic_num
|
308 |
-
self.dim = self.harmonic_num + 1
|
309 |
-
self.sampling_rate = samp_rate
|
310 |
-
self.voiced_threshold = voiced_threshold
|
311 |
-
|
312 |
-
def _f02uv(self, f0):
|
313 |
-
# generate uv signal
|
314 |
-
uv = torch.ones_like(f0)
|
315 |
-
uv = uv * (f0 > self.voiced_threshold)
|
316 |
-
return uv
|
317 |
-
|
318 |
-
def forward(self, f0, upp):
|
319 |
-
"""sine_tensor, uv = forward(f0)
|
320 |
-
input F0: tensor(batchsize=1, length, dim=1)
|
321 |
-
f0 for unvoiced steps should be 0
|
322 |
-
output sine_tensor: tensor(batchsize=1, length, dim)
|
323 |
-
output uv: tensor(batchsize=1, length, 1)
|
324 |
-
"""
|
325 |
-
with torch.no_grad():
|
326 |
-
f0 = f0[:, None].transpose(1, 2)
|
327 |
-
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
|
328 |
-
# fundamental component
|
329 |
-
f0_buf[:, :, 0] = f0[:, :, 0]
|
330 |
-
for idx in np.arange(self.harmonic_num):
|
331 |
-
f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
|
332 |
-
idx + 2
|
333 |
-
) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
|
334 |
-
rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
|
335 |
-
rand_ini = torch.rand(
|
336 |
-
f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
|
337 |
-
)
|
338 |
-
rand_ini[:, 0] = 0
|
339 |
-
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
|
340 |
-
tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
|
341 |
-
tmp_over_one *= upp
|
342 |
-
tmp_over_one = F.interpolate(
|
343 |
-
tmp_over_one.transpose(2, 1),
|
344 |
-
scale_factor=upp,
|
345 |
-
mode="linear",
|
346 |
-
align_corners=True,
|
347 |
-
).transpose(2, 1)
|
348 |
-
rad_values = F.interpolate(
|
349 |
-
rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
|
350 |
-
).transpose(
|
351 |
-
2, 1
|
352 |
-
) #######
|
353 |
-
tmp_over_one %= 1
|
354 |
-
tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
|
355 |
-
cumsum_shift = torch.zeros_like(rad_values)
|
356 |
-
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
|
357 |
-
sine_waves = torch.sin(
|
358 |
-
torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
|
359 |
-
)
|
360 |
-
sine_waves = sine_waves * self.sine_amp
|
361 |
-
uv = self._f02uv(f0)
|
362 |
-
uv = F.interpolate(
|
363 |
-
uv.transpose(2, 1), scale_factor=upp, mode="nearest"
|
364 |
-
).transpose(2, 1)
|
365 |
-
noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
|
366 |
-
noise = noise_amp * torch.randn_like(sine_waves)
|
367 |
-
sine_waves = sine_waves * uv + noise
|
368 |
-
return sine_waves, uv, noise
|
369 |
-
|
370 |
-
|
371 |
-
class SourceModuleHnNSF(torch.nn.Module):
|
372 |
-
"""SourceModule for hn-nsf
|
373 |
-
SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
|
374 |
-
add_noise_std=0.003, voiced_threshod=0)
|
375 |
-
sampling_rate: sampling_rate in Hz
|
376 |
-
harmonic_num: number of harmonic above F0 (default: 0)
|
377 |
-
sine_amp: amplitude of sine source signal (default: 0.1)
|
378 |
-
add_noise_std: std of additive Gaussian noise (default: 0.003)
|
379 |
-
note that amplitude of noise in unvoiced is decided
|
380 |
-
by sine_amp
|
381 |
-
voiced_threshold: threhold to set U/V given F0 (default: 0)
|
382 |
-
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
|
383 |
-
F0_sampled (batchsize, length, 1)
|
384 |
-
Sine_source (batchsize, length, 1)
|
385 |
-
noise_source (batchsize, length 1)
|
386 |
-
uv (batchsize, length, 1)
|
387 |
-
"""
|
388 |
-
|
389 |
-
def __init__(
|
390 |
-
self,
|
391 |
-
sampling_rate,
|
392 |
-
harmonic_num=0,
|
393 |
-
sine_amp=0.1,
|
394 |
-
add_noise_std=0.003,
|
395 |
-
voiced_threshod=0,
|
396 |
-
is_half=True,
|
397 |
-
):
|
398 |
-
super(SourceModuleHnNSF, self).__init__()
|
399 |
-
|
400 |
-
self.sine_amp = sine_amp
|
401 |
-
self.noise_std = add_noise_std
|
402 |
-
self.is_half = is_half
|
403 |
-
# to produce sine waveforms
|
404 |
-
self.l_sin_gen = SineGen(
|
405 |
-
sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
|
406 |
-
)
|
407 |
-
|
408 |
-
# to merge source harmonics into a single excitation
|
409 |
-
self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
|
410 |
-
self.l_tanh = torch.nn.Tanh()
|
411 |
-
|
412 |
-
def forward(self, x, upp=None):
|
413 |
-
sine_wavs, uv, _ = self.l_sin_gen(x, upp)
|
414 |
-
if self.is_half:
|
415 |
-
sine_wavs = sine_wavs.half()
|
416 |
-
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
|
417 |
-
return sine_merge, None, None # noise, uv
|
418 |
-
|
419 |
-
|
420 |
-
class GeneratorNSF(torch.nn.Module):
|
421 |
-
def __init__(
|
422 |
-
self,
|
423 |
-
initial_channel,
|
424 |
-
resblock,
|
425 |
-
resblock_kernel_sizes,
|
426 |
-
resblock_dilation_sizes,
|
427 |
-
upsample_rates,
|
428 |
-
upsample_initial_channel,
|
429 |
-
upsample_kernel_sizes,
|
430 |
-
gin_channels,
|
431 |
-
sr,
|
432 |
-
is_half=False,
|
433 |
-
):
|
434 |
-
super(GeneratorNSF, self).__init__()
|
435 |
-
self.num_kernels = len(resblock_kernel_sizes)
|
436 |
-
self.num_upsamples = len(upsample_rates)
|
437 |
-
|
438 |
-
self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
|
439 |
-
self.m_source = SourceModuleHnNSF(
|
440 |
-
sampling_rate=sr, harmonic_num=0, is_half=is_half
|
441 |
-
)
|
442 |
-
self.noise_convs = nn.ModuleList()
|
443 |
-
self.conv_pre = Conv1d(
|
444 |
-
initial_channel, upsample_initial_channel, 7, 1, padding=3
|
445 |
-
)
|
446 |
-
resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
|
447 |
-
|
448 |
-
self.ups = nn.ModuleList()
|
449 |
-
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
450 |
-
c_cur = upsample_initial_channel // (2 ** (i + 1))
|
451 |
-
self.ups.append(
|
452 |
-
weight_norm(
|
453 |
-
ConvTranspose1d(
|
454 |
-
upsample_initial_channel // (2**i),
|
455 |
-
upsample_initial_channel // (2 ** (i + 1)),
|
456 |
-
k,
|
457 |
-
u,
|
458 |
-
padding=(k - u) // 2,
|
459 |
-
)
|
460 |
-
)
|
461 |
-
)
|
462 |
-
if i + 1 < len(upsample_rates):
|
463 |
-
stride_f0 = np.prod(upsample_rates[i + 1 :])
|
464 |
-
self.noise_convs.append(
|
465 |
-
Conv1d(
|
466 |
-
1,
|
467 |
-
c_cur,
|
468 |
-
kernel_size=stride_f0 * 2,
|
469 |
-
stride=stride_f0,
|
470 |
-
padding=stride_f0 // 2,
|
471 |
-
)
|
472 |
-
)
|
473 |
-
else:
|
474 |
-
self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
|
475 |
-
|
476 |
-
self.resblocks = nn.ModuleList()
|
477 |
-
for i in range(len(self.ups)):
|
478 |
-
ch = upsample_initial_channel // (2 ** (i + 1))
|
479 |
-
for j, (k, d) in enumerate(
|
480 |
-
zip(resblock_kernel_sizes, resblock_dilation_sizes)
|
481 |
-
):
|
482 |
-
self.resblocks.append(resblock(ch, k, d))
|
483 |
-
|
484 |
-
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
485 |
-
self.ups.apply(init_weights)
|
486 |
-
|
487 |
-
if gin_channels != 0:
|
488 |
-
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
489 |
-
|
490 |
-
self.upp = np.prod(upsample_rates)
|
491 |
-
|
492 |
-
def forward(self, x, f0, g=None):
|
493 |
-
har_source, noi_source, uv = self.m_source(f0, self.upp)
|
494 |
-
har_source = har_source.transpose(1, 2)
|
495 |
-
x = self.conv_pre(x)
|
496 |
-
if g is not None:
|
497 |
-
x = x + self.cond(g)
|
498 |
-
|
499 |
-
for i in range(self.num_upsamples):
|
500 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
501 |
-
x = self.ups[i](x)
|
502 |
-
x_source = self.noise_convs[i](har_source)
|
503 |
-
x = x + x_source
|
504 |
-
xs = None
|
505 |
-
for j in range(self.num_kernels):
|
506 |
-
if xs is None:
|
507 |
-
xs = self.resblocks[i * self.num_kernels + j](x)
|
508 |
-
else:
|
509 |
-
xs += self.resblocks[i * self.num_kernels + j](x)
|
510 |
-
x = xs / self.num_kernels
|
511 |
-
x = F.leaky_relu(x)
|
512 |
-
x = self.conv_post(x)
|
513 |
-
x = torch.tanh(x)
|
514 |
-
return x
|
515 |
-
|
516 |
-
def remove_weight_norm(self):
|
517 |
-
for l in self.ups:
|
518 |
-
remove_weight_norm(l)
|
519 |
-
for l in self.resblocks:
|
520 |
-
l.remove_weight_norm()
|
521 |
-
|
522 |
-
|
523 |
-
sr2sr = {
|
524 |
-
"32k": 32000,
|
525 |
-
"40k": 40000,
|
526 |
-
"48k": 48000,
|
527 |
-
}
|
528 |
-
|
529 |
-
|
530 |
-
class SynthesizerTrnMs256NSFsid(nn.Module):
|
531 |
-
def __init__(
|
532 |
-
self,
|
533 |
-
spec_channels,
|
534 |
-
segment_size,
|
535 |
-
inter_channels,
|
536 |
-
hidden_channels,
|
537 |
-
filter_channels,
|
538 |
-
n_heads,
|
539 |
-
n_layers,
|
540 |
-
kernel_size,
|
541 |
-
p_dropout,
|
542 |
-
resblock,
|
543 |
-
resblock_kernel_sizes,
|
544 |
-
resblock_dilation_sizes,
|
545 |
-
upsample_rates,
|
546 |
-
upsample_initial_channel,
|
547 |
-
upsample_kernel_sizes,
|
548 |
-
spk_embed_dim,
|
549 |
-
gin_channels,
|
550 |
-
sr,
|
551 |
-
**kwargs
|
552 |
-
):
|
553 |
-
super().__init__()
|
554 |
-
if type(sr) == type("strr"):
|
555 |
-
sr = sr2sr[sr]
|
556 |
-
self.spec_channels = spec_channels
|
557 |
-
self.inter_channels = inter_channels
|
558 |
-
self.hidden_channels = hidden_channels
|
559 |
-
self.filter_channels = filter_channels
|
560 |
-
self.n_heads = n_heads
|
561 |
-
self.n_layers = n_layers
|
562 |
-
self.kernel_size = kernel_size
|
563 |
-
self.p_dropout = p_dropout
|
564 |
-
self.resblock = resblock
|
565 |
-
self.resblock_kernel_sizes = resblock_kernel_sizes
|
566 |
-
self.resblock_dilation_sizes = resblock_dilation_sizes
|
567 |
-
self.upsample_rates = upsample_rates
|
568 |
-
self.upsample_initial_channel = upsample_initial_channel
|
569 |
-
self.upsample_kernel_sizes = upsample_kernel_sizes
|
570 |
-
self.segment_size = segment_size
|
571 |
-
self.gin_channels = gin_channels
|
572 |
-
# self.hop_length = hop_length#
|
573 |
-
self.spk_embed_dim = spk_embed_dim
|
574 |
-
self.enc_p = TextEncoder256(
|
575 |
-
inter_channels,
|
576 |
-
hidden_channels,
|
577 |
-
filter_channels,
|
578 |
-
n_heads,
|
579 |
-
n_layers,
|
580 |
-
kernel_size,
|
581 |
-
p_dropout,
|
582 |
-
)
|
583 |
-
self.dec = GeneratorNSF(
|
584 |
-
inter_channels,
|
585 |
-
resblock,
|
586 |
-
resblock_kernel_sizes,
|
587 |
-
resblock_dilation_sizes,
|
588 |
-
upsample_rates,
|
589 |
-
upsample_initial_channel,
|
590 |
-
upsample_kernel_sizes,
|
591 |
-
gin_channels=gin_channels,
|
592 |
-
sr=sr,
|
593 |
-
is_half=kwargs["is_half"],
|
594 |
-
)
|
595 |
-
self.enc_q = PosteriorEncoder(
|
596 |
-
spec_channels,
|
597 |
-
inter_channels,
|
598 |
-
hidden_channels,
|
599 |
-
5,
|
600 |
-
1,
|
601 |
-
16,
|
602 |
-
gin_channels=gin_channels,
|
603 |
-
)
|
604 |
-
self.flow = ResidualCouplingBlock(
|
605 |
-
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
|
606 |
-
)
|
607 |
-
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
|
608 |
-
print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
|
609 |
-
|
610 |
-
def remove_weight_norm(self):
|
611 |
-
self.dec.remove_weight_norm()
|
612 |
-
self.flow.remove_weight_norm()
|
613 |
-
self.enc_q.remove_weight_norm()
|
614 |
-
|
615 |
-
def forward(
|
616 |
-
self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
|
617 |
-
): # 这里ds是id,[bs,1]
|
618 |
-
# print(1,pitch.shape)#[bs,t]
|
619 |
-
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
|
620 |
-
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
621 |
-
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
622 |
-
z_p = self.flow(z, y_mask, g=g)
|
623 |
-
z_slice, ids_slice = commons.rand_slice_segments(
|
624 |
-
z, y_lengths, self.segment_size
|
625 |
-
)
|
626 |
-
# print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
|
627 |
-
pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
|
628 |
-
# print(-2,pitchf.shape,z_slice.shape)
|
629 |
-
o = self.dec(z_slice, pitchf, g=g)
|
630 |
-
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
631 |
-
|
632 |
-
def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
|
633 |
-
g = self.emb_g(sid).unsqueeze(-1)
|
634 |
-
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
635 |
-
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
636 |
-
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
637 |
-
o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
|
638 |
-
return o, x_mask, (z, z_p, m_p, logs_p)
|
639 |
-
|
640 |
-
|
641 |
-
class SynthesizerTrnMs256NSFsid_nono(nn.Module):
|
642 |
-
def __init__(
|
643 |
-
self,
|
644 |
-
spec_channels,
|
645 |
-
segment_size,
|
646 |
-
inter_channels,
|
647 |
-
hidden_channels,
|
648 |
-
filter_channels,
|
649 |
-
n_heads,
|
650 |
-
n_layers,
|
651 |
-
kernel_size,
|
652 |
-
p_dropout,
|
653 |
-
resblock,
|
654 |
-
resblock_kernel_sizes,
|
655 |
-
resblock_dilation_sizes,
|
656 |
-
upsample_rates,
|
657 |
-
upsample_initial_channel,
|
658 |
-
upsample_kernel_sizes,
|
659 |
-
spk_embed_dim,
|
660 |
-
gin_channels,
|
661 |
-
sr=None,
|
662 |
-
**kwargs
|
663 |
-
):
|
664 |
-
super().__init__()
|
665 |
-
self.spec_channels = spec_channels
|
666 |
-
self.inter_channels = inter_channels
|
667 |
-
self.hidden_channels = hidden_channels
|
668 |
-
self.filter_channels = filter_channels
|
669 |
-
self.n_heads = n_heads
|
670 |
-
self.n_layers = n_layers
|
671 |
-
self.kernel_size = kernel_size
|
672 |
-
self.p_dropout = p_dropout
|
673 |
-
self.resblock = resblock
|
674 |
-
self.resblock_kernel_sizes = resblock_kernel_sizes
|
675 |
-
self.resblock_dilation_sizes = resblock_dilation_sizes
|
676 |
-
self.upsample_rates = upsample_rates
|
677 |
-
self.upsample_initial_channel = upsample_initial_channel
|
678 |
-
self.upsample_kernel_sizes = upsample_kernel_sizes
|
679 |
-
self.segment_size = segment_size
|
680 |
-
self.gin_channels = gin_channels
|
681 |
-
# self.hop_length = hop_length#
|
682 |
-
self.spk_embed_dim = spk_embed_dim
|
683 |
-
self.enc_p = TextEncoder256(
|
684 |
-
inter_channels,
|
685 |
-
hidden_channels,
|
686 |
-
filter_channels,
|
687 |
-
n_heads,
|
688 |
-
n_layers,
|
689 |
-
kernel_size,
|
690 |
-
p_dropout,
|
691 |
-
f0=False,
|
692 |
-
)
|
693 |
-
self.dec = Generator(
|
694 |
-
inter_channels,
|
695 |
-
resblock,
|
696 |
-
resblock_kernel_sizes,
|
697 |
-
resblock_dilation_sizes,
|
698 |
-
upsample_rates,
|
699 |
-
upsample_initial_channel,
|
700 |
-
upsample_kernel_sizes,
|
701 |
-
gin_channels=gin_channels,
|
702 |
-
)
|
703 |
-
self.enc_q = PosteriorEncoder(
|
704 |
-
spec_channels,
|
705 |
-
inter_channels,
|
706 |
-
hidden_channels,
|
707 |
-
5,
|
708 |
-
1,
|
709 |
-
16,
|
710 |
-
gin_channels=gin_channels,
|
711 |
-
)
|
712 |
-
self.flow = ResidualCouplingBlock(
|
713 |
-
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
|
714 |
-
)
|
715 |
-
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
|
716 |
-
print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
|
717 |
-
|
718 |
-
def remove_weight_norm(self):
|
719 |
-
self.dec.remove_weight_norm()
|
720 |
-
self.flow.remove_weight_norm()
|
721 |
-
self.enc_q.remove_weight_norm()
|
722 |
-
|
723 |
-
def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
|
724 |
-
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
|
725 |
-
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
726 |
-
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
727 |
-
z_p = self.flow(z, y_mask, g=g)
|
728 |
-
z_slice, ids_slice = commons.rand_slice_segments(
|
729 |
-
z, y_lengths, self.segment_size
|
730 |
-
)
|
731 |
-
o = self.dec(z_slice, g=g)
|
732 |
-
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
733 |
-
|
734 |
-
def infer(self, phone, phone_lengths, sid, max_len=None):
|
735 |
-
g = self.emb_g(sid).unsqueeze(-1)
|
736 |
-
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
737 |
-
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
738 |
-
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
739 |
-
o = self.dec((z * x_mask)[:, :, :max_len], g=g)
|
740 |
-
return o, x_mask, (z, z_p, m_p, logs_p)
|
741 |
-
|
742 |
-
|
743 |
-
class SynthesizerTrnMs256NSFsid_sim(nn.Module):
|
744 |
-
"""
|
745 |
-
Synthesizer for Training
|
746 |
-
"""
|
747 |
-
|
748 |
-
def __init__(
|
749 |
-
self,
|
750 |
-
spec_channels,
|
751 |
-
segment_size,
|
752 |
-
inter_channels,
|
753 |
-
hidden_channels,
|
754 |
-
filter_channels,
|
755 |
-
n_heads,
|
756 |
-
n_layers,
|
757 |
-
kernel_size,
|
758 |
-
p_dropout,
|
759 |
-
resblock,
|
760 |
-
resblock_kernel_sizes,
|
761 |
-
resblock_dilation_sizes,
|
762 |
-
upsample_rates,
|
763 |
-
upsample_initial_channel,
|
764 |
-
upsample_kernel_sizes,
|
765 |
-
spk_embed_dim,
|
766 |
-
# hop_length,
|
767 |
-
gin_channels=0,
|
768 |
-
use_sdp=True,
|
769 |
-
**kwargs
|
770 |
-
):
|
771 |
-
super().__init__()
|
772 |
-
self.spec_channels = spec_channels
|
773 |
-
self.inter_channels = inter_channels
|
774 |
-
self.hidden_channels = hidden_channels
|
775 |
-
self.filter_channels = filter_channels
|
776 |
-
self.n_heads = n_heads
|
777 |
-
self.n_layers = n_layers
|
778 |
-
self.kernel_size = kernel_size
|
779 |
-
self.p_dropout = p_dropout
|
780 |
-
self.resblock = resblock
|
781 |
-
self.resblock_kernel_sizes = resblock_kernel_sizes
|
782 |
-
self.resblock_dilation_sizes = resblock_dilation_sizes
|
783 |
-
self.upsample_rates = upsample_rates
|
784 |
-
self.upsample_initial_channel = upsample_initial_channel
|
785 |
-
self.upsample_kernel_sizes = upsample_kernel_sizes
|
786 |
-
self.segment_size = segment_size
|
787 |
-
self.gin_channels = gin_channels
|
788 |
-
# self.hop_length = hop_length#
|
789 |
-
self.spk_embed_dim = spk_embed_dim
|
790 |
-
self.enc_p = TextEncoder256Sim(
|
791 |
-
inter_channels,
|
792 |
-
hidden_channels,
|
793 |
-
filter_channels,
|
794 |
-
n_heads,
|
795 |
-
n_layers,
|
796 |
-
kernel_size,
|
797 |
-
p_dropout,
|
798 |
-
)
|
799 |
-
self.dec = GeneratorNSF(
|
800 |
-
inter_channels,
|
801 |
-
resblock,
|
802 |
-
resblock_kernel_sizes,
|
803 |
-
resblock_dilation_sizes,
|
804 |
-
upsample_rates,
|
805 |
-
upsample_initial_channel,
|
806 |
-
upsample_kernel_sizes,
|
807 |
-
gin_channels=gin_channels,
|
808 |
-
is_half=kwargs["is_half"],
|
809 |
-
)
|
810 |
-
|
811 |
-
self.flow = ResidualCouplingBlock(
|
812 |
-
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
|
813 |
-
)
|
814 |
-
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
|
815 |
-
print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
|
816 |
-
|
817 |
-
def remove_weight_norm(self):
|
818 |
-
self.dec.remove_weight_norm()
|
819 |
-
self.flow.remove_weight_norm()
|
820 |
-
self.enc_q.remove_weight_norm()
|
821 |
-
|
822 |
-
def forward(
|
823 |
-
self, phone, phone_lengths, pitch, pitchf, y_lengths, ds
|
824 |
-
): # y是spec不需要了现在
|
825 |
-
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
|
826 |
-
x, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
827 |
-
x = self.flow(x, x_mask, g=g, reverse=True)
|
828 |
-
z_slice, ids_slice = commons.rand_slice_segments(
|
829 |
-
x, y_lengths, self.segment_size
|
830 |
-
)
|
831 |
-
|
832 |
-
pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
|
833 |
-
o = self.dec(z_slice, pitchf, g=g)
|
834 |
-
return o, ids_slice
|
835 |
-
|
836 |
-
def infer(
|
837 |
-
self, phone, phone_lengths, pitch, pitchf, ds, max_len=None
|
838 |
-
): # y是spec不需要了现在
|
839 |
-
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
|
840 |
-
x, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
841 |
-
x = self.flow(x, x_mask, g=g, reverse=True)
|
842 |
-
o = self.dec((x * x_mask)[:, :, :max_len], pitchf, g=g)
|
843 |
-
return o, o
|
844 |
-
|
845 |
-
|
846 |
-
class MultiPeriodDiscriminator(torch.nn.Module):
|
847 |
-
def __init__(self, use_spectral_norm=False):
|
848 |
-
super(MultiPeriodDiscriminator, self).__init__()
|
849 |
-
periods = [2, 3, 5, 7, 11, 17]
|
850 |
-
# periods = [3, 5, 7, 11, 17, 23, 37]
|
851 |
-
|
852 |
-
discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
|
853 |
-
discs = discs + [
|
854 |
-
DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
|
855 |
-
]
|
856 |
-
self.discriminators = nn.ModuleList(discs)
|
857 |
-
|
858 |
-
def forward(self, y, y_hat):
|
859 |
-
y_d_rs = [] #
|
860 |
-
y_d_gs = []
|
861 |
-
fmap_rs = []
|
862 |
-
fmap_gs = []
|
863 |
-
for i, d in enumerate(self.discriminators):
|
864 |
-
y_d_r, fmap_r = d(y)
|
865 |
-
y_d_g, fmap_g = d(y_hat)
|
866 |
-
# for j in range(len(fmap_r)):
|
867 |
-
# print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
|
868 |
-
y_d_rs.append(y_d_r)
|
869 |
-
y_d_gs.append(y_d_g)
|
870 |
-
fmap_rs.append(fmap_r)
|
871 |
-
fmap_gs.append(fmap_g)
|
872 |
-
|
873 |
-
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
874 |
-
|
875 |
-
|
876 |
-
class DiscriminatorS(torch.nn.Module):
|
877 |
-
def __init__(self, use_spectral_norm=False):
|
878 |
-
super(DiscriminatorS, self).__init__()
|
879 |
-
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
880 |
-
self.convs = nn.ModuleList(
|
881 |
-
[
|
882 |
-
norm_f(Conv1d(1, 16, 15, 1, padding=7)),
|
883 |
-
norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
|
884 |
-
norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
|
885 |
-
norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
|
886 |
-
norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
|
887 |
-
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
|
888 |
-
]
|
889 |
-
)
|
890 |
-
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
|
891 |
-
|
892 |
-
def forward(self, x):
|
893 |
-
fmap = []
|
894 |
-
|
895 |
-
for l in self.convs:
|
896 |
-
x = l(x)
|
897 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
898 |
-
fmap.append(x)
|
899 |
-
x = self.conv_post(x)
|
900 |
-
fmap.append(x)
|
901 |
-
x = torch.flatten(x, 1, -1)
|
902 |
-
|
903 |
-
return x, fmap
|
904 |
-
|
905 |
-
|
906 |
-
class DiscriminatorP(torch.nn.Module):
|
907 |
-
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
|
908 |
-
super(DiscriminatorP, self).__init__()
|
909 |
-
self.period = period
|
910 |
-
self.use_spectral_norm = use_spectral_norm
|
911 |
-
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
912 |
-
self.convs = nn.ModuleList(
|
913 |
-
[
|
914 |
-
norm_f(
|
915 |
-
Conv2d(
|
916 |
-
1,
|
917 |
-
32,
|
918 |
-
(kernel_size, 1),
|
919 |
-
(stride, 1),
|
920 |
-
padding=(get_padding(kernel_size, 1), 0),
|
921 |
-
)
|
922 |
-
),
|
923 |
-
norm_f(
|
924 |
-
Conv2d(
|
925 |
-
32,
|
926 |
-
128,
|
927 |
-
(kernel_size, 1),
|
928 |
-
(stride, 1),
|
929 |
-
padding=(get_padding(kernel_size, 1), 0),
|
930 |
-
)
|
931 |
-
),
|
932 |
-
norm_f(
|
933 |
-
Conv2d(
|
934 |
-
128,
|
935 |
-
512,
|
936 |
-
(kernel_size, 1),
|
937 |
-
(stride, 1),
|
938 |
-
padding=(get_padding(kernel_size, 1), 0),
|
939 |
-
)
|
940 |
-
),
|
941 |
-
norm_f(
|
942 |
-
Conv2d(
|
943 |
-
512,
|
944 |
-
1024,
|
945 |
-
(kernel_size, 1),
|
946 |
-
(stride, 1),
|
947 |
-
padding=(get_padding(kernel_size, 1), 0),
|
948 |
-
)
|
949 |
-
),
|
950 |
-
norm_f(
|
951 |
-
Conv2d(
|
952 |
-
1024,
|
953 |
-
1024,
|
954 |
-
(kernel_size, 1),
|
955 |
-
1,
|
956 |
-
padding=(get_padding(kernel_size, 1), 0),
|
957 |
-
)
|
958 |
-
),
|
959 |
-
]
|
960 |
-
)
|
961 |
-
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
|
962 |
-
|
963 |
-
def forward(self, x):
|
964 |
-
fmap = []
|
965 |
-
|
966 |
-
# 1d to 2d
|
967 |
-
b, c, t = x.shape
|
968 |
-
if t % self.period != 0: # pad first
|
969 |
-
n_pad = self.period - (t % self.period)
|
970 |
-
x = F.pad(x, (0, n_pad), "reflect")
|
971 |
-
t = t + n_pad
|
972 |
-
x = x.view(b, c, t // self.period, self.period)
|
973 |
-
|
974 |
-
for l in self.convs:
|
975 |
-
x = l(x)
|
976 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
977 |
-
fmap.append(x)
|
978 |
-
x = self.conv_post(x)
|
979 |
-
fmap.append(x)
|
980 |
-
x = torch.flatten(x, 1, -1)
|
981 |
-
|
982 |
-
return x, fmap
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DHEIVER/AnimeGANv2/app.py
DELETED
@@ -1,56 +0,0 @@
|
|
1 |
-
from PIL import Image
|
2 |
-
import torch
|
3 |
-
import gradio as gr
|
4 |
-
|
5 |
-
model2 = torch.hub.load(
|
6 |
-
"AK391/animegan2-pytorch:main",
|
7 |
-
"generator",
|
8 |
-
pretrained=True,
|
9 |
-
device="cpu",
|
10 |
-
progress=False
|
11 |
-
)
|
12 |
-
|
13 |
-
model1 = torch.hub.load(
|
14 |
-
"AK391/animegan2-pytorch:main",
|
15 |
-
"generator",
|
16 |
-
pretrained="face_paint_512_v1",
|
17 |
-
device="cpu"
|
18 |
-
)
|
19 |
-
|
20 |
-
face2paint = torch.hub.load(
|
21 |
-
'AK391/animegan2-pytorch:main',
|
22 |
-
'face2paint',
|
23 |
-
size=512,
|
24 |
-
device="cpu",
|
25 |
-
side_by_side=False
|
26 |
-
)
|
27 |
-
|
28 |
-
def inference(img, ver):
|
29 |
-
if ver == 'versão 2 (🔺 robustez, 🔻 estilização)':
|
30 |
-
out = face2paint(model2, img)
|
31 |
-
else:
|
32 |
-
out = face2paint(model1, img)
|
33 |
-
return out
|
34 |
-
|
35 |
-
title = "AnimeGANv2"
|
36 |
-
description = "Demonstração do AnimeGanv2 para retratos de rostos. Para usá-lo, simplesmente faça o upload da sua imagem."
|
37 |
-
article = "<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Repositório do Github Pytorch</a></p> <center><img src='https://visitor-badge.glitch.me/badge?page_id=akhaliq_animegan' alt='insígnia de visitantes'></center></p>"
|
38 |
-
|
39 |
-
gr.Interface(
|
40 |
-
fn=inference,
|
41 |
-
inputs=[
|
42 |
-
gr.inputs.Image(type="pil"),
|
43 |
-
gr.inputs.Radio(
|
44 |
-
['versão 1 (🔺 estilização, 🔻 robustez)', 'versão 2 (🔺 robustez, 🔻 estilização)'],
|
45 |
-
type="value",
|
46 |
-
default='versão 2 (🔺 robustez, 🔻 estilização)',
|
47 |
-
label='versão'
|
48 |
-
)
|
49 |
-
],
|
50 |
-
outputs=gr.outputs.Image(type="pil"),
|
51 |
-
title=title,
|
52 |
-
description=description,
|
53 |
-
article=article,
|
54 |
-
allow_flagging=False,
|
55 |
-
allow_screenshot=False
|
56 |
-
).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/web_middlewares.py
DELETED
@@ -1,119 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
from typing import TYPE_CHECKING, Awaitable, Callable, Tuple, Type, TypeVar
|
3 |
-
|
4 |
-
from .typedefs import Handler
|
5 |
-
from .web_exceptions import HTTPPermanentRedirect, _HTTPMove
|
6 |
-
from .web_request import Request
|
7 |
-
from .web_response import StreamResponse
|
8 |
-
from .web_urldispatcher import SystemRoute
|
9 |
-
|
10 |
-
__all__ = (
|
11 |
-
"middleware",
|
12 |
-
"normalize_path_middleware",
|
13 |
-
)
|
14 |
-
|
15 |
-
if TYPE_CHECKING: # pragma: no cover
|
16 |
-
from .web_app import Application
|
17 |
-
|
18 |
-
_Func = TypeVar("_Func")
|
19 |
-
|
20 |
-
|
21 |
-
async def _check_request_resolves(request: Request, path: str) -> Tuple[bool, Request]:
|
22 |
-
alt_request = request.clone(rel_url=path)
|
23 |
-
|
24 |
-
match_info = await request.app.router.resolve(alt_request)
|
25 |
-
alt_request._match_info = match_info
|
26 |
-
|
27 |
-
if match_info.http_exception is None:
|
28 |
-
return True, alt_request
|
29 |
-
|
30 |
-
return False, request
|
31 |
-
|
32 |
-
|
33 |
-
def middleware(f: _Func) -> _Func:
|
34 |
-
f.__middleware_version__ = 1 # type: ignore[attr-defined]
|
35 |
-
return f
|
36 |
-
|
37 |
-
|
38 |
-
_Middleware = Callable[[Request, Handler], Awaitable[StreamResponse]]
|
39 |
-
|
40 |
-
|
41 |
-
def normalize_path_middleware(
|
42 |
-
*,
|
43 |
-
append_slash: bool = True,
|
44 |
-
remove_slash: bool = False,
|
45 |
-
merge_slashes: bool = True,
|
46 |
-
redirect_class: Type[_HTTPMove] = HTTPPermanentRedirect,
|
47 |
-
) -> _Middleware:
|
48 |
-
"""Factory for producing a middleware that normalizes the path of a request.
|
49 |
-
|
50 |
-
Normalizing means:
|
51 |
-
- Add or remove a trailing slash to the path.
|
52 |
-
- Double slashes are replaced by one.
|
53 |
-
|
54 |
-
The middleware returns as soon as it finds a path that resolves
|
55 |
-
correctly. The order if both merge and append/remove are enabled is
|
56 |
-
1) merge slashes
|
57 |
-
2) append/remove slash
|
58 |
-
3) both merge slashes and append/remove slash.
|
59 |
-
If the path resolves with at least one of those conditions, it will
|
60 |
-
redirect to the new path.
|
61 |
-
|
62 |
-
Only one of `append_slash` and `remove_slash` can be enabled. If both
|
63 |
-
are `True` the factory will raise an assertion error
|
64 |
-
|
65 |
-
If `append_slash` is `True` the middleware will append a slash when
|
66 |
-
needed. If a resource is defined with trailing slash and the request
|
67 |
-
comes without it, it will append it automatically.
|
68 |
-
|
69 |
-
If `remove_slash` is `True`, `append_slash` must be `False`. When enabled
|
70 |
-
the middleware will remove trailing slashes and redirect if the resource
|
71 |
-
is defined
|
72 |
-
|
73 |
-
If merge_slashes is True, merge multiple consecutive slashes in the
|
74 |
-
path into one.
|
75 |
-
"""
|
76 |
-
correct_configuration = not (append_slash and remove_slash)
|
77 |
-
assert correct_configuration, "Cannot both remove and append slash"
|
78 |
-
|
79 |
-
@middleware
|
80 |
-
async def impl(request: Request, handler: Handler) -> StreamResponse:
|
81 |
-
if isinstance(request.match_info.route, SystemRoute):
|
82 |
-
paths_to_check = []
|
83 |
-
if "?" in request.raw_path:
|
84 |
-
path, query = request.raw_path.split("?", 1)
|
85 |
-
query = "?" + query
|
86 |
-
else:
|
87 |
-
query = ""
|
88 |
-
path = request.raw_path
|
89 |
-
|
90 |
-
if merge_slashes:
|
91 |
-
paths_to_check.append(re.sub("//+", "/", path))
|
92 |
-
if append_slash and not request.path.endswith("/"):
|
93 |
-
paths_to_check.append(path + "/")
|
94 |
-
if remove_slash and request.path.endswith("/"):
|
95 |
-
paths_to_check.append(path[:-1])
|
96 |
-
if merge_slashes and append_slash:
|
97 |
-
paths_to_check.append(re.sub("//+", "/", path + "/"))
|
98 |
-
if merge_slashes and remove_slash:
|
99 |
-
merged_slashes = re.sub("//+", "/", path)
|
100 |
-
paths_to_check.append(merged_slashes[:-1])
|
101 |
-
|
102 |
-
for path in paths_to_check:
|
103 |
-
path = re.sub("^//+", "/", path) # SECURITY: GHSA-v6wp-4m6f-gcjg
|
104 |
-
resolves, request = await _check_request_resolves(request, path)
|
105 |
-
if resolves:
|
106 |
-
raise redirect_class(request.raw_path + query)
|
107 |
-
|
108 |
-
return await handler(request)
|
109 |
-
|
110 |
-
return impl
|
111 |
-
|
112 |
-
|
113 |
-
def _fix_request_current_app(app: "Application") -> _Middleware:
|
114 |
-
@middleware
|
115 |
-
async def impl(request: Request, handler: Handler) -> StreamResponse:
|
116 |
-
with request.match_info.set_current_app(app):
|
117 |
-
return await handler(request)
|
118 |
-
|
119 |
-
return impl
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/contourpy/chunk.py
DELETED
@@ -1,89 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import math
|
4 |
-
|
5 |
-
|
6 |
-
def calc_chunk_sizes(
|
7 |
-
chunk_size: int | tuple[int, int] | None,
|
8 |
-
chunk_count: int | tuple[int, int] | None,
|
9 |
-
total_chunk_count: int | None,
|
10 |
-
ny: int,
|
11 |
-
nx: int,
|
12 |
-
) -> tuple[int, int]:
|
13 |
-
"""Calculate chunk sizes.
|
14 |
-
|
15 |
-
Args:
|
16 |
-
chunk_size (int or tuple(int, int), optional): Chunk size in (y, x) directions, or the same
|
17 |
-
size in both directions if only one is specified.
|
18 |
-
chunk_count (int or tuple(int, int), optional): Chunk count in (y, x) directions, or the
|
19 |
-
same count in both irections if only one is specified.
|
20 |
-
total_chunk_count (int, optional): Total number of chunks.
|
21 |
-
ny (int): Number of grid points in y-direction.
|
22 |
-
nx (int): Number of grid points in x-direction.
|
23 |
-
|
24 |
-
Return:
|
25 |
-
tuple(int, int): Chunk sizes (y_chunk_size, x_chunk_size).
|
26 |
-
|
27 |
-
Note:
|
28 |
-
A maximum of one of ``chunk_size``, ``chunk_count`` and ``total_chunk_count`` may be
|
29 |
-
specified.
|
30 |
-
"""
|
31 |
-
if sum([chunk_size is not None, chunk_count is not None, total_chunk_count is not None]) > 1:
|
32 |
-
raise ValueError("Only one of chunk_size, chunk_count and total_chunk_count should be set")
|
33 |
-
|
34 |
-
if total_chunk_count is not None:
|
35 |
-
max_chunk_count = (nx-1)*(ny-1)
|
36 |
-
total_chunk_count = min(max(total_chunk_count, 1), max_chunk_count)
|
37 |
-
if total_chunk_count == 1:
|
38 |
-
chunk_size = 0
|
39 |
-
elif total_chunk_count == max_chunk_count:
|
40 |
-
chunk_size = (1, 1)
|
41 |
-
else:
|
42 |
-
factors = two_factors(total_chunk_count)
|
43 |
-
if ny > nx:
|
44 |
-
chunk_count = factors
|
45 |
-
else:
|
46 |
-
chunk_count = (factors[1], factors[0])
|
47 |
-
|
48 |
-
if chunk_count is not None:
|
49 |
-
if isinstance(chunk_count, tuple):
|
50 |
-
y_chunk_count, x_chunk_count = chunk_count
|
51 |
-
else:
|
52 |
-
y_chunk_count = x_chunk_count = chunk_count
|
53 |
-
x_chunk_count = min(max(x_chunk_count, 1), nx-1)
|
54 |
-
y_chunk_count = min(max(y_chunk_count, 1), ny-1)
|
55 |
-
chunk_size = (math.ceil((ny-1) / y_chunk_count), math.ceil((nx-1) / x_chunk_count))
|
56 |
-
|
57 |
-
if chunk_size is None:
|
58 |
-
y_chunk_size = x_chunk_size = 0
|
59 |
-
elif isinstance(chunk_size, tuple):
|
60 |
-
y_chunk_size, x_chunk_size = chunk_size
|
61 |
-
else:
|
62 |
-
y_chunk_size = x_chunk_size = chunk_size
|
63 |
-
|
64 |
-
if x_chunk_size < 0 or y_chunk_size < 0:
|
65 |
-
raise ValueError("chunk_size cannot be negative")
|
66 |
-
|
67 |
-
return y_chunk_size, x_chunk_size
|
68 |
-
|
69 |
-
|
70 |
-
def two_factors(n: int) -> tuple[int, int]:
|
71 |
-
"""Split an integer into two integer factors.
|
72 |
-
|
73 |
-
The two factors will be as close as possible to the sqrt of n, and are returned in decreasing
|
74 |
-
order. Worst case returns (n, 1).
|
75 |
-
|
76 |
-
Args:
|
77 |
-
n (int): The integer to factorize.
|
78 |
-
|
79 |
-
Return:
|
80 |
-
tuple(int, int): The two factors of n, in decreasing order.
|
81 |
-
"""
|
82 |
-
i = math.ceil(math.sqrt(n))
|
83 |
-
while n % i != 0:
|
84 |
-
i -= 1
|
85 |
-
j = n // i
|
86 |
-
if i > j:
|
87 |
-
return i, j
|
88 |
-
else:
|
89 |
-
return j, i
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/encodings/codecs.py
DELETED
@@ -1,135 +0,0 @@
|
|
1 |
-
"""Extend the Python codecs module with a few encodings that are used in OpenType (name table)
|
2 |
-
but missing from Python. See https://github.com/fonttools/fonttools/issues/236 for details."""
|
3 |
-
|
4 |
-
import codecs
|
5 |
-
import encodings
|
6 |
-
|
7 |
-
|
8 |
-
class ExtendCodec(codecs.Codec):
|
9 |
-
def __init__(self, name, base_encoding, mapping):
|
10 |
-
self.name = name
|
11 |
-
self.base_encoding = base_encoding
|
12 |
-
self.mapping = mapping
|
13 |
-
self.reverse = {v: k for k, v in mapping.items()}
|
14 |
-
self.max_len = max(len(v) for v in mapping.values())
|
15 |
-
self.info = codecs.CodecInfo(
|
16 |
-
name=self.name, encode=self.encode, decode=self.decode
|
17 |
-
)
|
18 |
-
codecs.register_error(name, self.error)
|
19 |
-
|
20 |
-
def _map(self, mapper, output_type, exc_type, input, errors):
|
21 |
-
base_error_handler = codecs.lookup_error(errors)
|
22 |
-
length = len(input)
|
23 |
-
out = output_type()
|
24 |
-
while input:
|
25 |
-
# first try to use self.error as the error handler
|
26 |
-
try:
|
27 |
-
part = mapper(input, self.base_encoding, errors=self.name)
|
28 |
-
out += part
|
29 |
-
break # All converted
|
30 |
-
except exc_type as e:
|
31 |
-
# else convert the correct part, handle error as requested and continue
|
32 |
-
out += mapper(input[: e.start], self.base_encoding, self.name)
|
33 |
-
replacement, pos = base_error_handler(e)
|
34 |
-
out += replacement
|
35 |
-
input = input[pos:]
|
36 |
-
return out, length
|
37 |
-
|
38 |
-
def encode(self, input, errors="strict"):
|
39 |
-
return self._map(codecs.encode, bytes, UnicodeEncodeError, input, errors)
|
40 |
-
|
41 |
-
def decode(self, input, errors="strict"):
|
42 |
-
return self._map(codecs.decode, str, UnicodeDecodeError, input, errors)
|
43 |
-
|
44 |
-
def error(self, e):
|
45 |
-
if isinstance(e, UnicodeDecodeError):
|
46 |
-
for end in range(e.start + 1, e.end + 1):
|
47 |
-
s = e.object[e.start : end]
|
48 |
-
if s in self.mapping:
|
49 |
-
return self.mapping[s], end
|
50 |
-
elif isinstance(e, UnicodeEncodeError):
|
51 |
-
for end in range(e.start + 1, e.start + self.max_len + 1):
|
52 |
-
s = e.object[e.start : end]
|
53 |
-
if s in self.reverse:
|
54 |
-
return self.reverse[s], end
|
55 |
-
e.encoding = self.name
|
56 |
-
raise e
|
57 |
-
|
58 |
-
|
59 |
-
_extended_encodings = {
|
60 |
-
"x_mac_japanese_ttx": (
|
61 |
-
"shift_jis",
|
62 |
-
{
|
63 |
-
b"\xFC": chr(0x007C),
|
64 |
-
b"\x7E": chr(0x007E),
|
65 |
-
b"\x80": chr(0x005C),
|
66 |
-
b"\xA0": chr(0x00A0),
|
67 |
-
b"\xFD": chr(0x00A9),
|
68 |
-
b"\xFE": chr(0x2122),
|
69 |
-
b"\xFF": chr(0x2026),
|
70 |
-
},
|
71 |
-
),
|
72 |
-
"x_mac_trad_chinese_ttx": (
|
73 |
-
"big5",
|
74 |
-
{
|
75 |
-
b"\x80": chr(0x005C),
|
76 |
-
b"\xA0": chr(0x00A0),
|
77 |
-
b"\xFD": chr(0x00A9),
|
78 |
-
b"\xFE": chr(0x2122),
|
79 |
-
b"\xFF": chr(0x2026),
|
80 |
-
},
|
81 |
-
),
|
82 |
-
"x_mac_korean_ttx": (
|
83 |
-
"euc_kr",
|
84 |
-
{
|
85 |
-
b"\x80": chr(0x00A0),
|
86 |
-
b"\x81": chr(0x20A9),
|
87 |
-
b"\x82": chr(0x2014),
|
88 |
-
b"\x83": chr(0x00A9),
|
89 |
-
b"\xFE": chr(0x2122),
|
90 |
-
b"\xFF": chr(0x2026),
|
91 |
-
},
|
92 |
-
),
|
93 |
-
"x_mac_simp_chinese_ttx": (
|
94 |
-
"gb2312",
|
95 |
-
{
|
96 |
-
b"\x80": chr(0x00FC),
|
97 |
-
b"\xA0": chr(0x00A0),
|
98 |
-
b"\xFD": chr(0x00A9),
|
99 |
-
b"\xFE": chr(0x2122),
|
100 |
-
b"\xFF": chr(0x2026),
|
101 |
-
},
|
102 |
-
),
|
103 |
-
}
|
104 |
-
|
105 |
-
_cache = {}
|
106 |
-
|
107 |
-
|
108 |
-
def search_function(name):
|
109 |
-
name = encodings.normalize_encoding(name) # Rather undocumented...
|
110 |
-
if name in _extended_encodings:
|
111 |
-
if name not in _cache:
|
112 |
-
base_encoding, mapping = _extended_encodings[name]
|
113 |
-
assert name[-4:] == "_ttx"
|
114 |
-
# Python 2 didn't have any of the encodings that we are implementing
|
115 |
-
# in this file. Python 3 added aliases for the East Asian ones, mapping
|
116 |
-
# them "temporarily" to the same base encoding as us, with a comment
|
117 |
-
# suggesting that full implementation will appear some time later.
|
118 |
-
# As such, try the Python version of the x_mac_... first, if that is found,
|
119 |
-
# use *that* as our base encoding. This would make our encoding upgrade
|
120 |
-
# to the full encoding when and if Python finally implements that.
|
121 |
-
# http://bugs.python.org/issue24041
|
122 |
-
base_encodings = [name[:-4], base_encoding]
|
123 |
-
for base_encoding in base_encodings:
|
124 |
-
try:
|
125 |
-
codecs.lookup(base_encoding)
|
126 |
-
except LookupError:
|
127 |
-
continue
|
128 |
-
_cache[name] = ExtendCodec(name, base_encoding, mapping)
|
129 |
-
break
|
130 |
-
return _cache[name].info
|
131 |
-
|
132 |
-
return None
|
133 |
-
|
134 |
-
|
135 |
-
codecs.register(search_function)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/Form-3812b7f1.css
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
div.svelte-sfqy0y{display:flex;flex-direction:inherit;flex-wrap:wrap;gap:var(--form-gap-width);box-shadow:var(--block-shadow);border:var(--block-border-width) solid var(--border-color-primary);border-radius:var(--block-radius);background:var(--border-color-primary);overflow-y:hidden}div.svelte-sfqy0y .block{box-shadow:none!important;border-width:0px!important;border-radius:0!important}.hidden.svelte-sfqy0y{display:none}
|
|
|
|
spaces/DUOMO-Lab/TransGPT/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: TransGPT
|
3 |
-
emoji: 🔥
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.39.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Datasculptor/DescriptionGPT/detic/custom_solver.py
DELETED
@@ -1,78 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
from enum import Enum
|
3 |
-
import itertools
|
4 |
-
from typing import Any, Callable, Dict, Iterable, List, Set, Type, Union
|
5 |
-
import torch
|
6 |
-
|
7 |
-
from detectron2.config import CfgNode
|
8 |
-
|
9 |
-
from detectron2.solver.build import maybe_add_gradient_clipping
|
10 |
-
|
11 |
-
def match_name_keywords(n, name_keywords):
|
12 |
-
out = False
|
13 |
-
for b in name_keywords:
|
14 |
-
if b in n:
|
15 |
-
out = True
|
16 |
-
break
|
17 |
-
return out
|
18 |
-
|
19 |
-
def build_custom_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer:
|
20 |
-
"""
|
21 |
-
Build an optimizer from config.
|
22 |
-
"""
|
23 |
-
params: List[Dict[str, Any]] = []
|
24 |
-
memo: Set[torch.nn.parameter.Parameter] = set()
|
25 |
-
custom_multiplier_name = cfg.SOLVER.CUSTOM_MULTIPLIER_NAME
|
26 |
-
optimizer_type = cfg.SOLVER.OPTIMIZER
|
27 |
-
for key, value in model.named_parameters(recurse=True):
|
28 |
-
if not value.requires_grad:
|
29 |
-
continue
|
30 |
-
# Avoid duplicating parameters
|
31 |
-
if value in memo:
|
32 |
-
continue
|
33 |
-
memo.add(value)
|
34 |
-
lr = cfg.SOLVER.BASE_LR
|
35 |
-
weight_decay = cfg.SOLVER.WEIGHT_DECAY
|
36 |
-
if "backbone" in key:
|
37 |
-
lr = lr * cfg.SOLVER.BACKBONE_MULTIPLIER
|
38 |
-
if match_name_keywords(key, custom_multiplier_name):
|
39 |
-
lr = lr * cfg.SOLVER.CUSTOM_MULTIPLIER
|
40 |
-
print('Costum LR', key, lr)
|
41 |
-
param = {"params": [value], "lr": lr}
|
42 |
-
if optimizer_type != 'ADAMW':
|
43 |
-
param['weight_decay'] = weight_decay
|
44 |
-
params += [param]
|
45 |
-
|
46 |
-
def maybe_add_full_model_gradient_clipping(optim): # optim: the optimizer class
|
47 |
-
# detectron2 doesn't have full model gradient clipping now
|
48 |
-
clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE
|
49 |
-
enable = (
|
50 |
-
cfg.SOLVER.CLIP_GRADIENTS.ENABLED
|
51 |
-
and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model"
|
52 |
-
and clip_norm_val > 0.0
|
53 |
-
)
|
54 |
-
|
55 |
-
class FullModelGradientClippingOptimizer(optim):
|
56 |
-
def step(self, closure=None):
|
57 |
-
all_params = itertools.chain(*[x["params"] for x in self.param_groups])
|
58 |
-
torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val)
|
59 |
-
super().step(closure=closure)
|
60 |
-
|
61 |
-
return FullModelGradientClippingOptimizer if enable else optim
|
62 |
-
|
63 |
-
|
64 |
-
if optimizer_type == 'SGD':
|
65 |
-
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)(
|
66 |
-
params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM,
|
67 |
-
nesterov=cfg.SOLVER.NESTEROV
|
68 |
-
)
|
69 |
-
elif optimizer_type == 'ADAMW':
|
70 |
-
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)(
|
71 |
-
params, cfg.SOLVER.BASE_LR,
|
72 |
-
weight_decay=cfg.SOLVER.WEIGHT_DECAY
|
73 |
-
)
|
74 |
-
else:
|
75 |
-
raise NotImplementedError(f"no optimizer type {optimizer_type}")
|
76 |
-
if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model":
|
77 |
-
optimizer = maybe_add_gradient_clipping(cfg, optimizer)
|
78 |
-
return optimizer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Datasculptor/LoRA-DreamBooth-Training-UI/app_training.py
DELETED
@@ -1,144 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
|
3 |
-
from __future__ import annotations
|
4 |
-
|
5 |
-
import os
|
6 |
-
|
7 |
-
import gradio as gr
|
8 |
-
|
9 |
-
from constants import UploadTarget
|
10 |
-
from inference import InferencePipeline
|
11 |
-
from trainer import Trainer
|
12 |
-
|
13 |
-
|
14 |
-
def create_training_demo(trainer: Trainer,
|
15 |
-
pipe: InferencePipeline | None = None) -> gr.Blocks:
|
16 |
-
with gr.Blocks() as demo:
|
17 |
-
with gr.Row():
|
18 |
-
with gr.Column():
|
19 |
-
with gr.Box():
|
20 |
-
gr.Markdown('Training Data')
|
21 |
-
instance_images = gr.Files(label='Instance images')
|
22 |
-
instance_prompt = gr.Textbox(label='Instance prompt',
|
23 |
-
max_lines=1)
|
24 |
-
gr.Markdown('''
|
25 |
-
- Upload images of the style you are planning on training on.
|
26 |
-
- For an instance prompt, use a unique, made up word to avoid collisions.
|
27 |
-
''')
|
28 |
-
with gr.Box():
|
29 |
-
gr.Markdown('Output Model')
|
30 |
-
output_model_name = gr.Text(label='Name of your model',
|
31 |
-
max_lines=1)
|
32 |
-
delete_existing_model = gr.Checkbox(
|
33 |
-
label='Delete existing model of the same name',
|
34 |
-
value=False)
|
35 |
-
validation_prompt = gr.Text(label='Validation Prompt')
|
36 |
-
with gr.Box():
|
37 |
-
gr.Markdown('Upload Settings')
|
38 |
-
with gr.Row():
|
39 |
-
upload_to_hub = gr.Checkbox(
|
40 |
-
label='Upload model to Hub', value=True)
|
41 |
-
use_private_repo = gr.Checkbox(label='Private',
|
42 |
-
value=True)
|
43 |
-
delete_existing_repo = gr.Checkbox(
|
44 |
-
label='Delete existing repo of the same name',
|
45 |
-
value=False)
|
46 |
-
upload_to = gr.Radio(
|
47 |
-
label='Upload to',
|
48 |
-
choices=[_.value for _ in UploadTarget],
|
49 |
-
value=UploadTarget.LORA_LIBRARY.value)
|
50 |
-
gr.Markdown('''
|
51 |
-
- By default, trained models will be uploaded to [LoRA Library](https://huggingface.co/lora-library) (see [this example model](https://huggingface.co/lora-library/lora-dreambooth-sample-dog)).
|
52 |
-
- You can also choose "Personal Profile", in which case, the model will be uploaded to https://huggingface.co/{your_username}/{model_name}.
|
53 |
-
''')
|
54 |
-
|
55 |
-
with gr.Box():
|
56 |
-
gr.Markdown('Training Parameters')
|
57 |
-
with gr.Row():
|
58 |
-
base_model = gr.Text(
|
59 |
-
label='Base Model',
|
60 |
-
value='stabilityai/stable-diffusion-2-1-base',
|
61 |
-
max_lines=1)
|
62 |
-
resolution = gr.Dropdown(choices=['512', '768'],
|
63 |
-
value='512',
|
64 |
-
label='Resolution')
|
65 |
-
num_training_steps = gr.Number(
|
66 |
-
label='Number of Training Steps', value=1000, precision=0)
|
67 |
-
learning_rate = gr.Number(label='Learning Rate', value=0.0001)
|
68 |
-
gradient_accumulation = gr.Number(
|
69 |
-
label='Number of Gradient Accumulation',
|
70 |
-
value=1,
|
71 |
-
precision=0)
|
72 |
-
seed = gr.Slider(label='Seed',
|
73 |
-
minimum=0,
|
74 |
-
maximum=100000,
|
75 |
-
step=1,
|
76 |
-
value=0)
|
77 |
-
fp16 = gr.Checkbox(label='FP16', value=True)
|
78 |
-
use_8bit_adam = gr.Checkbox(label='Use 8bit Adam', value=True)
|
79 |
-
checkpointing_steps = gr.Number(label='Checkpointing Steps',
|
80 |
-
value=100,
|
81 |
-
precision=0)
|
82 |
-
use_wandb = gr.Checkbox(label='Use W&B',
|
83 |
-
value=False,
|
84 |
-
interactive=bool(
|
85 |
-
os.getenv('WANDB_API_KEY')))
|
86 |
-
validation_epochs = gr.Number(label='Validation Epochs',
|
87 |
-
value=100,
|
88 |
-
precision=0)
|
89 |
-
gr.Markdown('''
|
90 |
-
- The base model must be a model that is compatible with [diffusers](https://github.com/huggingface/diffusers) library.
|
91 |
-
- It takes a few minutes to download the base model first.
|
92 |
-
- It will take about 8 minutes to train for 1000 steps with a T4 GPU.
|
93 |
-
- You may want to try a small number of steps first, like 1, to see if everything works fine in your environment.
|
94 |
-
- You can check the training status by pressing the "Open logs" button if you are running this on your Space.
|
95 |
-
- You need to set the environment variable `WANDB_API_KEY` if you'd like to use [W&B](https://wandb.ai/site). See [W&B documentation](https://docs.wandb.ai/guides/track/advanced/environment-variables).
|
96 |
-
- **Note:** Due to [this issue](https://github.com/huggingface/accelerate/issues/944), currently, training will not terminate properly if you use W&B.
|
97 |
-
''')
|
98 |
-
|
99 |
-
remove_gpu_after_training = gr.Checkbox(
|
100 |
-
label='Remove GPU after training',
|
101 |
-
value=False,
|
102 |
-
interactive=bool(os.getenv('SPACE_ID')),
|
103 |
-
visible=False)
|
104 |
-
run_button = gr.Button('Start Training')
|
105 |
-
|
106 |
-
with gr.Box():
|
107 |
-
gr.Markdown('Output message')
|
108 |
-
output_message = gr.Markdown()
|
109 |
-
|
110 |
-
if pipe is not None:
|
111 |
-
run_button.click(fn=pipe.clear)
|
112 |
-
run_button.click(fn=trainer.run,
|
113 |
-
inputs=[
|
114 |
-
instance_images,
|
115 |
-
instance_prompt,
|
116 |
-
output_model_name,
|
117 |
-
delete_existing_model,
|
118 |
-
validation_prompt,
|
119 |
-
base_model,
|
120 |
-
resolution,
|
121 |
-
num_training_steps,
|
122 |
-
learning_rate,
|
123 |
-
gradient_accumulation,
|
124 |
-
seed,
|
125 |
-
fp16,
|
126 |
-
use_8bit_adam,
|
127 |
-
checkpointing_steps,
|
128 |
-
use_wandb,
|
129 |
-
validation_epochs,
|
130 |
-
upload_to_hub,
|
131 |
-
use_private_repo,
|
132 |
-
delete_existing_repo,
|
133 |
-
upload_to,
|
134 |
-
remove_gpu_after_training,
|
135 |
-
],
|
136 |
-
outputs=output_message)
|
137 |
-
return demo
|
138 |
-
|
139 |
-
|
140 |
-
if __name__ == '__main__':
|
141 |
-
hf_token = os.getenv('HF_TOKEN')
|
142 |
-
trainer = Trainer(hf_token)
|
143 |
-
demo = create_training_demo(trainer)
|
144 |
-
demo.queue(max_size=1).launch(share=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Datasculptor/MusicGen/audiocraft/modules/streaming.py
DELETED
@@ -1,135 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
"""
|
8 |
-
Streaming module API that should be implemented by all Streaming components,
|
9 |
-
"""
|
10 |
-
|
11 |
-
from contextlib import contextmanager
|
12 |
-
import typing as tp
|
13 |
-
from torch import nn
|
14 |
-
import torch
|
15 |
-
|
16 |
-
|
17 |
-
State = tp.Dict[str, torch.Tensor]
|
18 |
-
|
19 |
-
|
20 |
-
class StreamingModule(nn.Module):
|
21 |
-
"""Common API for streaming components.
|
22 |
-
|
23 |
-
Each streaming component has a streaming state, which is just a dict[str, Tensor].
|
24 |
-
By convention, the first dim of each tensor must be the batch size.
|
25 |
-
Don't use dots in the key names, as this would clash with submodules
|
26 |
-
(like in state_dict).
|
27 |
-
|
28 |
-
If `self._is_streaming` is True, the component should use and remember
|
29 |
-
the proper state inside `self._streaming_state`.
|
30 |
-
|
31 |
-
To set a streaming component in streaming state, use
|
32 |
-
|
33 |
-
with module.streaming():
|
34 |
-
...
|
35 |
-
|
36 |
-
This will automatically reset the streaming state when exiting the context manager.
|
37 |
-
This also automatically propagates to all streaming children module.
|
38 |
-
|
39 |
-
Some module might also implement the `StreamingModule.flush` method, although
|
40 |
-
this one is trickier, as all parents module must be StreamingModule and implement
|
41 |
-
it as well for it to work properly. See `StreamingSequential` after.
|
42 |
-
"""
|
43 |
-
def __init__(self) -> None:
|
44 |
-
super().__init__()
|
45 |
-
self._streaming_state: State = {}
|
46 |
-
self._is_streaming = False
|
47 |
-
|
48 |
-
def _apply_named_streaming(self, fn: tp.Any):
|
49 |
-
for name, module in self.named_modules():
|
50 |
-
if isinstance(module, StreamingModule):
|
51 |
-
fn(name, module)
|
52 |
-
|
53 |
-
def _set_streaming(self, streaming: bool):
|
54 |
-
def _set_streaming(name, module):
|
55 |
-
module._is_streaming = streaming
|
56 |
-
self._apply_named_streaming(_set_streaming)
|
57 |
-
|
58 |
-
@contextmanager
|
59 |
-
def streaming(self):
|
60 |
-
"""Context manager to enter streaming mode. Reset streaming state on exit.
|
61 |
-
"""
|
62 |
-
self._set_streaming(True)
|
63 |
-
try:
|
64 |
-
yield
|
65 |
-
finally:
|
66 |
-
self._set_streaming(False)
|
67 |
-
self.reset_streaming()
|
68 |
-
|
69 |
-
def reset_streaming(self):
|
70 |
-
"""Reset the streaming state.
|
71 |
-
"""
|
72 |
-
def _reset(name: str, module: StreamingModule):
|
73 |
-
module._streaming_state.clear()
|
74 |
-
|
75 |
-
self._apply_named_streaming(_reset)
|
76 |
-
|
77 |
-
def get_streaming_state(self) -> State:
|
78 |
-
"""Return the streaming state, including that of sub-modules.
|
79 |
-
"""
|
80 |
-
state: State = {}
|
81 |
-
|
82 |
-
def _add(name: str, module: StreamingModule):
|
83 |
-
if name:
|
84 |
-
name += "."
|
85 |
-
for key, value in module._streaming_state.items():
|
86 |
-
state[name + key] = value
|
87 |
-
|
88 |
-
self._apply_named_streaming(_add)
|
89 |
-
return state
|
90 |
-
|
91 |
-
def set_streaming_state(self, state: State):
|
92 |
-
"""Set the streaming state, including that of sub-modules.
|
93 |
-
"""
|
94 |
-
state = dict(state)
|
95 |
-
|
96 |
-
def _set(name: str, module: StreamingModule):
|
97 |
-
if name:
|
98 |
-
name += "."
|
99 |
-
module._streaming_state.clear()
|
100 |
-
for key, value in list(state.items()):
|
101 |
-
# complexity is not ideal here, but probably fine.
|
102 |
-
if key.startswith(name):
|
103 |
-
local_key = key[len(name):]
|
104 |
-
if '.' not in local_key:
|
105 |
-
module._streaming_state[local_key] = value
|
106 |
-
del state[key]
|
107 |
-
|
108 |
-
self._apply_named_streaming(_set)
|
109 |
-
assert len(state) == 0, list(state.keys())
|
110 |
-
|
111 |
-
def flush(self, x: tp.Optional[torch.Tensor] = None):
|
112 |
-
"""Flush any remaining outputs that were waiting for completion.
|
113 |
-
Typically, for convolutions, this will add the final padding
|
114 |
-
and process the last buffer.
|
115 |
-
|
116 |
-
This should take an optional argument `x`, which will be provided
|
117 |
-
if a module before this one in the streaming pipeline has already
|
118 |
-
spitted out a flushed out buffer.
|
119 |
-
"""
|
120 |
-
if x is None:
|
121 |
-
return None
|
122 |
-
else:
|
123 |
-
return self(x)
|
124 |
-
|
125 |
-
|
126 |
-
class StreamingSequential(StreamingModule, nn.Sequential):
|
127 |
-
"""A streaming compatible alternative of `nn.Sequential`.
|
128 |
-
"""
|
129 |
-
def flush(self, x: tp.Optional[torch.Tensor] = None):
|
130 |
-
for module in self:
|
131 |
-
if isinstance(module, StreamingModule):
|
132 |
-
x = module.flush(x)
|
133 |
-
elif x is not None:
|
134 |
-
x = module(x)
|
135 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DeepLearning101/Speech-Quality-Inspection_Meta-Denoiser/denoiser/augment.py
DELETED
@@ -1,191 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
# author: adefossez
|
7 |
-
|
8 |
-
import random
|
9 |
-
import torch as th
|
10 |
-
from torch import nn
|
11 |
-
from torch.nn import functional as F
|
12 |
-
|
13 |
-
from . import dsp
|
14 |
-
|
15 |
-
|
16 |
-
class Remix(nn.Module):
|
17 |
-
"""Remix.
|
18 |
-
Mixes different noises with clean speech within a given batch
|
19 |
-
"""
|
20 |
-
|
21 |
-
def forward(self, sources):
|
22 |
-
noise, clean = sources
|
23 |
-
bs, *other = noise.shape
|
24 |
-
device = noise.device
|
25 |
-
perm = th.argsort(th.rand(bs, device=device), dim=0)
|
26 |
-
return th.stack([noise[perm], clean])
|
27 |
-
|
28 |
-
|
29 |
-
class RevEcho(nn.Module):
|
30 |
-
"""
|
31 |
-
Hacky Reverb but runs on GPU without slowing down training.
|
32 |
-
This reverb adds a succession of attenuated echos of the input
|
33 |
-
signal to itself. Intuitively, the delay of the first echo will happen
|
34 |
-
after roughly 2x the radius of the room and is controlled by `first_delay`.
|
35 |
-
Then RevEcho keeps adding echos with the same delay and further attenuation
|
36 |
-
until the amplitude ratio between the last and first echo is 1e-3.
|
37 |
-
The attenuation factor and the number of echos to adds is controlled
|
38 |
-
by RT60 (measured in seconds). RT60 is the average time to get to -60dB
|
39 |
-
(remember volume is measured over the squared amplitude so this matches
|
40 |
-
the 1e-3 ratio).
|
41 |
-
|
42 |
-
At each call to RevEcho, `first_delay`, `initial` and `RT60` are
|
43 |
-
sampled from their range. Then, to prevent this reverb from being too regular,
|
44 |
-
the delay time is resampled uniformly within `first_delay +- 10%`,
|
45 |
-
as controlled by the `jitter` parameter. Finally, for a denser reverb,
|
46 |
-
multiple trains of echos are added with different jitter noises.
|
47 |
-
|
48 |
-
Args:
|
49 |
-
- initial: amplitude of the first echo as a fraction
|
50 |
-
of the input signal. For each sample, actually sampled from
|
51 |
-
`[0, initial]`. Larger values means louder reverb. Physically,
|
52 |
-
this would depend on the absorption of the room walls.
|
53 |
-
- rt60: range of values to sample the RT60 in seconds, i.e.
|
54 |
-
after RT60 seconds, the echo amplitude is 1e-3 of the first echo.
|
55 |
-
The default values follow the recommendations of
|
56 |
-
https://arxiv.org/ftp/arxiv/papers/2001/2001.08662.pdf, Section 2.4.
|
57 |
-
Physically this would also be related to the absorption of the
|
58 |
-
room walls and there is likely a relation between `RT60` and
|
59 |
-
`initial`, which we ignore here.
|
60 |
-
- first_delay: range of values to sample the first echo delay in seconds.
|
61 |
-
The default values are equivalent to sampling a room of 3 to 10 meters.
|
62 |
-
- repeat: how many train of echos with differents jitters to add.
|
63 |
-
Higher values means a denser reverb.
|
64 |
-
- jitter: jitter used to make each repetition of the reverb echo train
|
65 |
-
slightly different. For instance a jitter of 0.1 means
|
66 |
-
the delay between two echos will be in the range `first_delay +- 10%`,
|
67 |
-
with the jittering noise being resampled after each single echo.
|
68 |
-
- keep_clean: fraction of the reverb of the clean speech to add back
|
69 |
-
to the ground truth. 0 = dereverberation, 1 = no dereverberation.
|
70 |
-
- sample_rate: sample rate of the input signals.
|
71 |
-
"""
|
72 |
-
|
73 |
-
def __init__(self, proba=0.5, initial=0.3, rt60=(0.3, 1.3), first_delay=(0.01, 0.03),
|
74 |
-
repeat=3, jitter=0.1, keep_clean=0.1, sample_rate=16000):
|
75 |
-
super().__init__()
|
76 |
-
self.proba = proba
|
77 |
-
self.initial = initial
|
78 |
-
self.rt60 = rt60
|
79 |
-
self.first_delay = first_delay
|
80 |
-
self.repeat = repeat
|
81 |
-
self.jitter = jitter
|
82 |
-
self.keep_clean = keep_clean
|
83 |
-
self.sample_rate = sample_rate
|
84 |
-
|
85 |
-
def _reverb(self, source, initial, first_delay, rt60):
|
86 |
-
"""
|
87 |
-
Return the reverb for a single source.
|
88 |
-
"""
|
89 |
-
length = source.shape[-1]
|
90 |
-
reverb = th.zeros_like(source)
|
91 |
-
for _ in range(self.repeat):
|
92 |
-
frac = 1 # what fraction of the first echo amplitude is still here
|
93 |
-
echo = initial * source
|
94 |
-
while frac > 1e-3:
|
95 |
-
# First jitter noise for the delay
|
96 |
-
jitter = 1 + self.jitter * random.uniform(-1, 1)
|
97 |
-
delay = min(
|
98 |
-
1 + int(jitter * first_delay * self.sample_rate),
|
99 |
-
length)
|
100 |
-
# Delay the echo in time by padding with zero on the left
|
101 |
-
echo = F.pad(echo[:, :, :-delay], (delay, 0))
|
102 |
-
reverb += echo
|
103 |
-
|
104 |
-
# Second jitter noise for the attenuation
|
105 |
-
jitter = 1 + self.jitter * random.uniform(-1, 1)
|
106 |
-
# we want, with `d` the attenuation, d**(rt60 / first_ms) = 1e-3
|
107 |
-
# i.e. log10(d) = -3 * first_ms / rt60, so that
|
108 |
-
attenuation = 10**(-3 * jitter * first_delay / rt60)
|
109 |
-
echo *= attenuation
|
110 |
-
frac *= attenuation
|
111 |
-
return reverb
|
112 |
-
|
113 |
-
def forward(self, wav):
|
114 |
-
if random.random() >= self.proba:
|
115 |
-
return wav
|
116 |
-
noise, clean = wav
|
117 |
-
# Sample characteristics for the reverb
|
118 |
-
initial = random.random() * self.initial
|
119 |
-
first_delay = random.uniform(*self.first_delay)
|
120 |
-
rt60 = random.uniform(*self.rt60)
|
121 |
-
|
122 |
-
reverb_noise = self._reverb(noise, initial, first_delay, rt60)
|
123 |
-
# Reverb for the noise is always added back to the noise
|
124 |
-
noise += reverb_noise
|
125 |
-
reverb_clean = self._reverb(clean, initial, first_delay, rt60)
|
126 |
-
# Split clean reverb among the clean speech and noise
|
127 |
-
clean += self.keep_clean * reverb_clean
|
128 |
-
noise += (1 - self.keep_clean) * reverb_clean
|
129 |
-
|
130 |
-
return th.stack([noise, clean])
|
131 |
-
|
132 |
-
|
133 |
-
class BandMask(nn.Module):
|
134 |
-
"""BandMask.
|
135 |
-
Maskes bands of frequencies. Similar to Park, Daniel S., et al.
|
136 |
-
"Specaugment: A simple data augmentation method for automatic speech recognition."
|
137 |
-
(https://arxiv.org/pdf/1904.08779.pdf) but over the waveform.
|
138 |
-
"""
|
139 |
-
|
140 |
-
def __init__(self, maxwidth=0.2, bands=120, sample_rate=16_000):
|
141 |
-
"""__init__.
|
142 |
-
|
143 |
-
:param maxwidth: the maximum width to remove
|
144 |
-
:param bands: number of bands
|
145 |
-
:param sample_rate: signal sample rate
|
146 |
-
"""
|
147 |
-
super().__init__()
|
148 |
-
self.maxwidth = maxwidth
|
149 |
-
self.bands = bands
|
150 |
-
self.sample_rate = sample_rate
|
151 |
-
|
152 |
-
def forward(self, wav):
|
153 |
-
bands = self.bands
|
154 |
-
bandwidth = int(abs(self.maxwidth) * bands)
|
155 |
-
mels = dsp.mel_frequencies(bands, 40, self.sample_rate/2) / self.sample_rate
|
156 |
-
low = random.randrange(bands)
|
157 |
-
high = random.randrange(low, min(bands, low + bandwidth))
|
158 |
-
filters = dsp.LowPassFilters([mels[low], mels[high]]).to(wav.device)
|
159 |
-
low, midlow = filters(wav)
|
160 |
-
# band pass filtering
|
161 |
-
out = wav - midlow + low
|
162 |
-
return out
|
163 |
-
|
164 |
-
|
165 |
-
class Shift(nn.Module):
|
166 |
-
"""Shift."""
|
167 |
-
|
168 |
-
def __init__(self, shift=8192, same=False):
|
169 |
-
"""__init__.
|
170 |
-
|
171 |
-
:param shift: randomly shifts the signals up to a given factor
|
172 |
-
:param same: shifts both clean and noisy files by the same factor
|
173 |
-
"""
|
174 |
-
super().__init__()
|
175 |
-
self.shift = shift
|
176 |
-
self.same = same
|
177 |
-
|
178 |
-
def forward(self, wav):
|
179 |
-
sources, batch, channels, length = wav.shape
|
180 |
-
length = length - self.shift
|
181 |
-
if self.shift > 0:
|
182 |
-
if not self.training:
|
183 |
-
wav = wav[..., :length]
|
184 |
-
else:
|
185 |
-
offsets = th.randint(
|
186 |
-
self.shift,
|
187 |
-
[1 if self.same else sources, batch, 1, 1], device=wav.device)
|
188 |
-
offsets = offsets.expand(sources, -1, channels, -1)
|
189 |
-
indexes = th.arange(length, device=wav.device)
|
190 |
-
wav = wav.gather(3, indexes + offsets)
|
191 |
-
return wav
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Detomo/ai-avatar-frontend/src/setupTests.js
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
// jest-dom adds custom jest matchers for asserting on DOM nodes.
|
2 |
-
// allows you to do things like:
|
3 |
-
// expect(element).toHaveTextContent(/react/i)
|
4 |
-
// learn more: https://github.com/testing-library/jest-dom
|
5 |
-
import '@testing-library/jest-dom';
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DragGan/DragGan/viz/capture_widget.py
DELETED
@@ -1,92 +0,0 @@
|
|
1 |
-
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
import os
|
10 |
-
import re
|
11 |
-
import numpy as np
|
12 |
-
import imgui
|
13 |
-
import PIL.Image
|
14 |
-
from gui_utils import imgui_utils
|
15 |
-
from . import renderer
|
16 |
-
import torch
|
17 |
-
import torchvision
|
18 |
-
|
19 |
-
#----------------------------------------------------------------------------
|
20 |
-
|
21 |
-
class CaptureWidget:
|
22 |
-
def __init__(self, viz):
|
23 |
-
self.viz = viz
|
24 |
-
self.path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '_screenshots'))
|
25 |
-
self.dump_image = False
|
26 |
-
self.dump_gui = False
|
27 |
-
self.defer_frames = 0
|
28 |
-
self.disabled_time = 0
|
29 |
-
|
30 |
-
def dump_png(self, image):
|
31 |
-
viz = self.viz
|
32 |
-
try:
|
33 |
-
_height, _width, channels = image.shape
|
34 |
-
print(viz.result)
|
35 |
-
assert image.dtype == np.uint8
|
36 |
-
os.makedirs(self.path, exist_ok=True)
|
37 |
-
file_id = 0
|
38 |
-
for entry in os.scandir(self.path):
|
39 |
-
if entry.is_file():
|
40 |
-
match = re.fullmatch(r'(\d+).*', entry.name)
|
41 |
-
if match:
|
42 |
-
file_id = max(file_id, int(match.group(1)) + 1)
|
43 |
-
if channels == 1:
|
44 |
-
pil_image = PIL.Image.fromarray(image[:, :, 0], 'L')
|
45 |
-
else:
|
46 |
-
pil_image = PIL.Image.fromarray(image[:, :, :3], 'RGB')
|
47 |
-
pil_image.save(os.path.join(self.path, f'{file_id:05d}.png'))
|
48 |
-
np.save(os.path.join(self.path, f'{file_id:05d}.npy'), viz.result.w)
|
49 |
-
except:
|
50 |
-
viz.result.error = renderer.CapturedException()
|
51 |
-
|
52 |
-
@imgui_utils.scoped_by_object_id
|
53 |
-
def __call__(self, show=True):
|
54 |
-
viz = self.viz
|
55 |
-
if show:
|
56 |
-
with imgui_utils.grayed_out(self.disabled_time != 0):
|
57 |
-
imgui.text('Capture')
|
58 |
-
imgui.same_line(viz.label_w)
|
59 |
-
|
60 |
-
_changed, self.path = imgui_utils.input_text('##path', self.path, 1024,
|
61 |
-
flags=(imgui.INPUT_TEXT_AUTO_SELECT_ALL | imgui.INPUT_TEXT_ENTER_RETURNS_TRUE),
|
62 |
-
width=(-1),
|
63 |
-
help_text='PATH')
|
64 |
-
if imgui.is_item_hovered() and not imgui.is_item_active() and self.path != '':
|
65 |
-
imgui.set_tooltip(self.path)
|
66 |
-
imgui.text(' ')
|
67 |
-
imgui.same_line(viz.label_w)
|
68 |
-
if imgui_utils.button('Save image', width=viz.button_w, enabled=(self.disabled_time == 0 and 'image' in viz.result)):
|
69 |
-
self.dump_image = True
|
70 |
-
self.defer_frames = 2
|
71 |
-
self.disabled_time = 0.5
|
72 |
-
imgui.same_line()
|
73 |
-
if imgui_utils.button('Save GUI', width=viz.button_w, enabled=(self.disabled_time == 0)):
|
74 |
-
self.dump_gui = True
|
75 |
-
self.defer_frames = 2
|
76 |
-
self.disabled_time = 0.5
|
77 |
-
|
78 |
-
self.disabled_time = max(self.disabled_time - viz.frame_delta, 0)
|
79 |
-
if self.defer_frames > 0:
|
80 |
-
self.defer_frames -= 1
|
81 |
-
elif self.dump_image:
|
82 |
-
if 'image' in viz.result:
|
83 |
-
self.dump_png(viz.result.image)
|
84 |
-
self.dump_image = False
|
85 |
-
elif self.dump_gui:
|
86 |
-
viz.capture_next_frame()
|
87 |
-
self.dump_gui = False
|
88 |
-
captured_frame = viz.pop_captured_frame()
|
89 |
-
if captured_frame is not None:
|
90 |
-
self.dump_png(captured_frame)
|
91 |
-
|
92 |
-
#----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ECCV2022/storydalle/dalle/models/stage2/layers.py
DELETED
@@ -1,221 +0,0 @@
|
|
1 |
-
# ------------------------------------------------------------------------------------
|
2 |
-
# Minimal DALL-E
|
3 |
-
# Copyright (c) 2021 KakaoBrain. All Rights Reserved.
|
4 |
-
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
|
5 |
-
# ------------------------------------------------------------------------------------
|
6 |
-
# Modified from minGPT (https://github.com/karpathy/minGPT)
|
7 |
-
# Copyright (c) 2020 Andrej Karpathy. All Rights Reserved.
|
8 |
-
# ------------------------------------------------------------------------------------
|
9 |
-
|
10 |
-
import math
|
11 |
-
import torch
|
12 |
-
import torch.nn as nn
|
13 |
-
from torch.nn import functional as F
|
14 |
-
|
15 |
-
|
16 |
-
class GELU(nn.Module):
|
17 |
-
def __init__(self, use_approx=False):
|
18 |
-
super().__init__()
|
19 |
-
self.use_approx = use_approx
|
20 |
-
|
21 |
-
def forward(self, x):
|
22 |
-
if self.use_approx:
|
23 |
-
return x * torch.sigmoid(1.702 * x)
|
24 |
-
else:
|
25 |
-
return F.gelu(x)
|
26 |
-
|
27 |
-
|
28 |
-
class MultiHeadSelfAttention(nn.Module):
|
29 |
-
|
30 |
-
def __init__(self,
|
31 |
-
ctx_len: int,
|
32 |
-
embed_dim: int,
|
33 |
-
n_heads: int,
|
34 |
-
resid_pdrop: float,
|
35 |
-
attn_pdrop: float,
|
36 |
-
attn_bias: bool,
|
37 |
-
use_mask: bool = True):
|
38 |
-
super().__init__()
|
39 |
-
assert embed_dim % n_heads == 0
|
40 |
-
|
41 |
-
# key, query, value projections for all heads
|
42 |
-
self.key = nn.Linear(embed_dim, embed_dim, bias=attn_bias)
|
43 |
-
self.query = nn.Linear(embed_dim, embed_dim, bias=attn_bias)
|
44 |
-
self.value = nn.Linear(embed_dim, embed_dim, bias=attn_bias)
|
45 |
-
|
46 |
-
# regularization
|
47 |
-
self.attn_drop = nn.Dropout(attn_pdrop)
|
48 |
-
self.resid_drop = nn.Dropout(resid_pdrop)
|
49 |
-
|
50 |
-
# output projection
|
51 |
-
self.proj = nn.Linear(embed_dim, embed_dim, attn_bias)
|
52 |
-
|
53 |
-
self.n_heads = n_heads
|
54 |
-
self.ctx_len = ctx_len
|
55 |
-
self.use_mask = use_mask
|
56 |
-
if self.use_mask:
|
57 |
-
self.register_buffer("mask", torch.ones(ctx_len, ctx_len), persistent=False)
|
58 |
-
self.mask = torch.tril(self.mask).view(1, ctx_len, ctx_len)
|
59 |
-
|
60 |
-
def forward(self, x, use_cache=False, layer_past=None):
|
61 |
-
B, T, C = x.shape
|
62 |
-
x = x.transpose(0, 1).contiguous() # (B, T, C) -> (T, B, C)
|
63 |
-
|
64 |
-
# calculate query, key, values for all heads in batch and move head forward to be the batch dim
|
65 |
-
k = self.key(x).view(T, B*self.n_heads, C//self.n_heads).transpose(0, 1) # (B*nh, T, hs)
|
66 |
-
q = self.query(x).view(T, B*self.n_heads, C//self.n_heads).transpose(0, 1) # (B*nh, T, hs)
|
67 |
-
v = self.value(x).view(T, B*self.n_heads, C//self.n_heads).transpose(0, 1) # (B*nh, T, hs)
|
68 |
-
|
69 |
-
if use_cache:
|
70 |
-
present = torch.stack([k, v])
|
71 |
-
|
72 |
-
if layer_past is not None:
|
73 |
-
# print(layer_past.shape, k.shape, v.shape, q.shape)
|
74 |
-
# print("LayerPast shape", layer_past.shape)
|
75 |
-
past_key, past_value = layer_past
|
76 |
-
|
77 |
-
if len(past_key.shape) == 4:
|
78 |
-
_, _, seq_len, dim = past_key.shape
|
79 |
-
k = torch.cat([past_key.reshape(-1, seq_len, dim), k], dim=-2)
|
80 |
-
v = torch.cat([past_value.reshape(-1, seq_len, dim), v], dim=-2)
|
81 |
-
elif len(past_key.shape) == 3:
|
82 |
-
past_key, past_value = layer_past
|
83 |
-
k = torch.cat([past_key, k], dim=-2)
|
84 |
-
v = torch.cat([past_value, v], dim=-2)
|
85 |
-
else:
|
86 |
-
raise ValueError
|
87 |
-
|
88 |
-
if use_cache and layer_past is not None:
|
89 |
-
# Tensor shape below: (B * nh, 1, hs) X (B * nh, hs, K) -> (B * nh, 1, K)
|
90 |
-
att = torch.bmm(q, (k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))))
|
91 |
-
att = F.softmax(att, dim=-1)
|
92 |
-
att = self.attn_drop(att)
|
93 |
-
y = torch.bmm(att, v) # (B*nh, 1, K) X (B*nh, K, hs) -> (B*nh, 1, hs)
|
94 |
-
else:
|
95 |
-
# Tensor shape below: (B * nh, T, hs) X (B * nh, hs, T) -> (B * nh, T, T)
|
96 |
-
att = torch.bmm(q, (k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))))
|
97 |
-
if self.use_mask:
|
98 |
-
# TODO : Flip when not prompt tunign
|
99 |
-
# mask = self.mask if T == self.ctx_len else self.mask[:, :T, :T]
|
100 |
-
if T == self.ctx_len:
|
101 |
-
mask = self.mask
|
102 |
-
else:
|
103 |
-
mask = torch.tril(torch.ones(T, T)).view(1, T, T).to(att.device)
|
104 |
-
att = att.masked_fill(mask == 0, float('-inf'))
|
105 |
-
att = F.softmax(att, dim=-1)
|
106 |
-
att = self.attn_drop(att)
|
107 |
-
y = torch.bmm(att, v) # (B*nh, T, T) X (B*nh, T, hs) -> (B*nh, T, hs)
|
108 |
-
y = y.transpose(0, 1).contiguous().view(T, B, C) # re-assemble all head outputs side by side
|
109 |
-
|
110 |
-
# output projection
|
111 |
-
y = self.resid_drop(self.proj(y))
|
112 |
-
if use_cache:
|
113 |
-
return y.transpose(0, 1).contiguous(), present # (T, B, C) -> (B, T, C)
|
114 |
-
else:
|
115 |
-
return y.transpose(0, 1).contiguous() # (T, B, C) -> (B, T, C)
|
116 |
-
|
117 |
-
def forward_with_context(self, x, context, mask=None):
|
118 |
-
B, T, C = x.shape
|
119 |
-
x = x.transpose(0, 1).contiguous() # (B, T, C) -> (T, B, C)
|
120 |
-
|
121 |
-
# calculate query, key, values for all heads in batch and move head forward to be the batch dim
|
122 |
-
q = self.query(x).view(T, B*self.n_heads, C//self.n_heads).transpose(0, 1) # (B*nh, T, hs)
|
123 |
-
|
124 |
-
B, T_c, C = context.shape
|
125 |
-
k = self.key(context).view(T_c, B * self.n_heads, C // self.n_heads).transpose(0, 1) # (B*nh, T, hs)
|
126 |
-
v = self.value(context).view(T_c, B*self.n_heads, C//self.n_heads).transpose(0, 1) # (B*nh, T, hs)
|
127 |
-
|
128 |
-
# Tensor shape below: (B * nh, T, hs) X (B * nh, hs, Tc) -> (B * nh, T, Tc)
|
129 |
-
att = torch.bmm(q, (k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))))
|
130 |
-
att = F.softmax(att, dim=-1)
|
131 |
-
att = self.attn_drop(att)
|
132 |
-
y = torch.bmm(att, v) # (B*nh, T, T) X (B*nh, T, hs) -> (B*nh, T, hs)
|
133 |
-
y = y.transpose(0, 1).contiguous().view(T, B, C) # re-assemble all head outputs side by side
|
134 |
-
|
135 |
-
# output projection
|
136 |
-
y = self.resid_drop(self.proj(y)).transpose(0, 1).contiguous()
|
137 |
-
if mask is not None:
|
138 |
-
y = y.masked_fill(mask == 0, float('0.0'))
|
139 |
-
return y # (T, B, C) -> (B, T, C)
|
140 |
-
|
141 |
-
|
142 |
-
class Block(nn.Module):
|
143 |
-
|
144 |
-
def __init__(self,
|
145 |
-
ctx_len: int,
|
146 |
-
embed_dim: int,
|
147 |
-
n_heads: int,
|
148 |
-
mlp_bias: bool,
|
149 |
-
attn_bias: bool,
|
150 |
-
resid_pdrop: bool,
|
151 |
-
attn_pdrop: bool,
|
152 |
-
gelu_use_approx: bool):
|
153 |
-
super().__init__()
|
154 |
-
self.ln1 = nn.LayerNorm(embed_dim)
|
155 |
-
self.ln2 = nn.LayerNorm(embed_dim)
|
156 |
-
|
157 |
-
self.attn = MultiHeadSelfAttention(ctx_len=ctx_len,
|
158 |
-
embed_dim=embed_dim,
|
159 |
-
n_heads=n_heads,
|
160 |
-
attn_pdrop=attn_pdrop,
|
161 |
-
resid_pdrop=resid_pdrop,
|
162 |
-
attn_bias=attn_bias,
|
163 |
-
use_mask=True)
|
164 |
-
self.mlp = nn.Sequential(
|
165 |
-
nn.Linear(embed_dim, 4 * embed_dim, bias=mlp_bias),
|
166 |
-
GELU(gelu_use_approx),
|
167 |
-
nn.Linear(4 * embed_dim, embed_dim, bias=mlp_bias),
|
168 |
-
nn.Dropout(resid_pdrop),
|
169 |
-
)
|
170 |
-
|
171 |
-
def forward(self, x, layer_past=None):
|
172 |
-
x = x + self.attn(self.ln1(x), layer_past=layer_past)
|
173 |
-
x = x + self.mlp(self.ln2(x))
|
174 |
-
return x
|
175 |
-
|
176 |
-
def sample(self, x, layer_past=None):
|
177 |
-
attn, present = self.attn(self.ln1(x), use_cache=True, layer_past=layer_past)
|
178 |
-
x = x + attn
|
179 |
-
x = x + self.mlp(self.ln2(x))
|
180 |
-
return x, present
|
181 |
-
|
182 |
-
def sample_with_context(self, x, context, context_mask, cross_attn_layer, layer_past=None):
|
183 |
-
attn, present = self.attn(self.ln1(x), use_cache=True, layer_past=layer_past)
|
184 |
-
x = x + attn
|
185 |
-
|
186 |
-
c_attn = cross_attn_layer(x.to(device=context.device),
|
187 |
-
context,
|
188 |
-
context_mask.to(device=context.device))
|
189 |
-
|
190 |
-
x = x + c_attn.to(device=x.device)
|
191 |
-
|
192 |
-
x = x + self.mlp(self.ln2(x))
|
193 |
-
return x, present
|
194 |
-
|
195 |
-
|
196 |
-
class CrossAttentionLayer(nn.Module):
|
197 |
-
|
198 |
-
def __init__(self,
|
199 |
-
ctx_len: int,
|
200 |
-
embed_dim: int,
|
201 |
-
n_heads: int,
|
202 |
-
attn_bias: bool,
|
203 |
-
resid_pdrop: bool,
|
204 |
-
attn_pdrop: bool):
|
205 |
-
super().__init__()
|
206 |
-
|
207 |
-
self.ln1 = nn.LayerNorm(embed_dim)
|
208 |
-
self.ln2 = nn.LayerNorm(embed_dim)
|
209 |
-
self.attn = MultiHeadSelfAttention(ctx_len=ctx_len,
|
210 |
-
embed_dim=embed_dim,
|
211 |
-
n_heads=n_heads,
|
212 |
-
attn_pdrop=attn_pdrop,
|
213 |
-
resid_pdrop=resid_pdrop,
|
214 |
-
attn_bias=attn_bias,
|
215 |
-
use_mask=False)
|
216 |
-
|
217 |
-
def forward(self, x, context, context_mask=None):
|
218 |
-
attn = self.attn.forward_with_context(self.ln1(x), self.ln2(context), context_mask)
|
219 |
-
# x = x + attn
|
220 |
-
# return x
|
221 |
-
return attn
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|