parquet-converter commited on
Commit
ca0cdd8
·
1 Parent(s): fb4b01f

Update parquet files (step 16 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/101-5/gpt4free/testing/binghuan/BingHuan.py +0 -49
  2. spaces/123Kumar/vits-uma-genshin-honkai123/app.py +0 -124
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cricket BYOD Compatibility List What You Need to Know Before You Switch.md +0 -40
  4. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Diablo 2 Fury Within 1.09 A Mod Based on the Classic Patch 1.09 Version.md +0 -166
  5. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Winrar Mac [CRACKED].md +0 -33
  6. spaces/1gistliPinn/ChatGPT4/Examples/Complete Book Of Olympics Pdf Download.md +0 -25
  7. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dinosaur Sim APK and Become a Prehistoric Beast.md +0 -126
  8. spaces/1phancelerku/anime-remove-background/Download Parking Master Multiplayer 2 Mod Apk for Free - No Ads Unlimited Rewards.md +0 -114
  9. spaces/2ndelement/voicevox/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/policy.md +0 -3
  10. spaces/30Kanika/Animal_Image_Classifier/README.md +0 -13
  11. spaces/52Hz/CMFNet_dehazing/model/CMFNet.py +0 -191
  12. spaces/AIZ2H/08-Search-Streamlit-Session-State-QueryParameters/README.md +0 -13
  13. spaces/AIZero2Hero4Health/1-ASRLiveSpeechRecognition-GR/app.py +0 -169
  14. spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/generated/server/internal.js +0 -30
  15. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinput/ColorInput.d.ts +0 -59
  16. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/roundrectangle/Factory.js +0 -13
  17. spaces/AlekseyKorshuk/thin-plate-spline-motion-model/modules/keypoint_detector.py +0 -27
  18. spaces/AlexKozachuk/anything-v3.0/README.md +0 -13
  19. spaces/Allakhazam/Home/README.md +0 -11
  20. spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/criteria/clip_loss.py +0 -17
  21. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/experimental/rl/value_guided_sampling.py +0 -154
  22. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/versatile_diffusion/test_versatile_diffusion_image_variation.py +0 -57
  23. spaces/Andy1621/uniformer_image_detection/configs/swin/cascade_mask_rcnn_swin_small_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco.py +0 -140
  24. spaces/Andy1621/uniformer_image_segmentation/configs/dnlnet/dnl_r50-d8_769x769_80k_cityscapes.py +0 -12
  25. spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_512x512_80k_ade20k.py +0 -2
  26. spaces/Ariharasudhan/YoloV5/utils/segment/loss.py +0 -186
  27. spaces/ArkanDash/rvc-models/infer_pack/commons.py +0 -166
  28. spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/tuneavideo/models/resnet.py +0 -208
  29. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyparsing/unicode.py +0 -352
  30. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_functools.py +0 -104
  31. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/bdist_egg.py +0 -457
  32. spaces/Awesimo/jojogan/e4e_projection.py +0 -38
  33. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/datasets/objects365.py +0 -394
  34. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tools/benchmark.py +0 -197
  35. spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/spec_utils.py +0 -672
  36. spaces/Benson/text-generation/Examples/Agar.io Indir Apk.md +0 -110
  37. spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/vendored/__init__.py +0 -0
  38. spaces/Billyosoro/ESRGAN/realesrgan/models/realesrgan_model.py +0 -258
  39. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated_cpu.cpp +0 -73
  40. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/mask_ops.py +0 -247
  41. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tools/convert-torchvision-to-d2.py +0 -56
  42. spaces/CVPR/LIVE/thrust/thrust/random/detail/linear_feedback_shift_engine_wordmask.h +0 -47
  43. spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/reduce.h +0 -23
  44. spaces/CVPR/WALT/mmdet/core/bbox/samplers/base_sampler.py +0 -101
  45. spaces/CVPR/WALT/mmdet/utils/logger.py +0 -19
  46. spaces/Caoyunkang/Segment-Any-Anomaly/SAM/segment_anything/modeling/mask_decoder.py +0 -176
  47. spaces/ChengZ/DeepDanbooru_string0/README.md +0 -39
  48. spaces/CodingBillionaire/bark-voice-cloning/app.py +0 -98
  49. spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/rpn/__init__.py +0 -2
  50. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/varLib/instancer/solver.py +0 -305
spaces/101-5/gpt4free/testing/binghuan/BingHuan.py DELETED
@@ -1,49 +0,0 @@
1
- import os,sys
2
- import json
3
- import subprocess
4
- # from ...typing import sha256, Dict, get_type_hints
5
-
6
- url = 'https://b.ai-huan.xyz'
7
- model = ['gpt-3.5-turbo', 'gpt-4']
8
- supports_stream = True
9
- needs_auth = False
10
-
11
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
12
- path = os.path.dirname(os.path.realpath(__file__))
13
- config = json.dumps({
14
- 'messages': messages,
15
- 'model': model}, separators=(',', ':'))
16
- cmd = ['python', f'{path}/helpers/binghuan.py', config]
17
-
18
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
19
-
20
- for line in iter(p.stdout.readline, b''):
21
- yield line.decode('cp1252')
22
-
23
-
24
-
25
- # params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
26
- # '(%s)' % ', '.join(
27
- # [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
28
-
29
-
30
- # Temporary For ChatCompletion Class
31
- class ChatCompletion:
32
- @staticmethod
33
- def create(model: str, messages: list, provider: None or str, stream: bool = False, auth: str = False, **kwargs):
34
- kwargs['auth'] = auth
35
-
36
- if provider and needs_auth and not auth:
37
- print(
38
- f'ValueError: {provider} requires authentication (use auth="cookie or token or jwt ..." param)', file=sys.stderr)
39
- sys.exit(1)
40
-
41
- try:
42
- return (_create_completion(model, messages, stream, **kwargs)
43
- if stream else ''.join(_create_completion(model, messages, stream, **kwargs)))
44
- except TypeError as e:
45
- print(e)
46
- arg: str = str(e).split("'")[1]
47
- print(
48
- f"ValueError: {provider} does not support '{arg}' argument", file=sys.stderr)
49
- sys.exit(1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/123Kumar/vits-uma-genshin-honkai123/app.py DELETED
@@ -1,124 +0,0 @@
1
- import time
2
- import gradio as gr
3
- import utils
4
- import commons
5
- from models import SynthesizerTrn
6
- from text import text_to_sequence
7
- from torch import no_grad, LongTensor
8
- import torch
9
-
10
- hps_ms = utils.get_hparams_from_file(r'./model/config.json')
11
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
12
- net_g_ms = SynthesizerTrn(
13
- len(hps_ms.symbols),
14
- hps_ms.data.filter_length // 2 + 1,
15
- hps_ms.train.segment_size // hps_ms.data.hop_length,
16
- n_speakers=hps_ms.data.n_speakers,
17
- **hps_ms.model).to(device)
18
- _ = net_g_ms.eval()
19
- speakers = hps_ms.speakers
20
- model, optimizer, learning_rate, epochs = utils.load_checkpoint(r'./model/G_953000.pth', net_g_ms, None)
21
-
22
- def get_text(text, hps):
23
- text_norm, clean_text = text_to_sequence(text, hps.symbols, hps.data.text_cleaners)
24
- if hps.data.add_blank:
25
- text_norm = commons.intersperse(text_norm, 0)
26
- text_norm = LongTensor(text_norm)
27
- return text_norm, clean_text
28
-
29
- def vits(text, language, speaker_id, noise_scale, noise_scale_w, length_scale):
30
- start = time.perf_counter()
31
- if not len(text):
32
- return "输入文本不能为空!", None, None
33
- text = text.replace('\n', ' ').replace('\r', '').replace(" ", "")
34
- if len(text) > 500:
35
- return f"输入文字过长!{len(text)}>100", None, None
36
- if language == 0:
37
- text = f"[ZH]{text}[ZH]"
38
- elif language == 1:
39
- text = f"[JA]{text}[JA]"
40
- else:
41
- text = f"{text}"
42
- stn_tst, clean_text = get_text(text, hps_ms)
43
- with no_grad():
44
- x_tst = stn_tst.unsqueeze(0)
45
- x_tst_lengths = LongTensor([stn_tst.size(0)])
46
- speaker_id = LongTensor([speaker_id])
47
- audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=speaker_id, noise_scale=noise_scale, noise_scale_w=noise_scale_w,
48
- length_scale=length_scale)[0][0, 0].data.cpu().float().numpy()
49
-
50
- return "生成成功!", (22050, audio), f"生成耗时 {round(time.perf_counter()-start, 2)} s"
51
-
52
- def search_speaker(search_value):
53
- for s in speakers:
54
- if search_value == s:
55
- return s
56
- for s in speakers:
57
- if search_value in s:
58
- return s
59
-
60
- def change_lang(language):
61
- if language == 0:
62
- return 0.6, 0.668, 1.2
63
- else:
64
- return 0.6, 0.668, 1.1
65
-
66
- download_audio_js = """
67
- () =>{{
68
- let root = document.querySelector("body > gradio-app");
69
- if (root.shadowRoot != null)
70
- root = root.shadowRoot;
71
- let audio = root.querySelector("#tts-audio").querySelector("audio");
72
- let text = root.querySelector("#input-text").querySelector("textarea");
73
- if (audio == undefined)
74
- return;
75
- text = text.value;
76
- if (text == undefined)
77
- text = Math.floor(Math.random()*100000000);
78
- audio = audio.src;
79
- let oA = document.createElement("a");
80
- oA.download = text.substr(0, 20)+'.wav';
81
- oA.href = audio;
82
- document.body.appendChild(oA);
83
- oA.click();
84
- oA.remove();
85
- }}
86
- """
87
-
88
- if __name__ == '__main__':
89
- with gr.Blocks() as app:
90
- gr.Markdown(
91
- "# <center> VITS语音在线合成demo\n"
92
- "<div align='center'>主要有赛马娘,原神中文,原神日语,崩坏3的音色</div>"
93
- '<div align="center"><a><font color="#dd0000">结果有随机性,语调可能很奇怪,可多次生成取最佳效果</font></a></div>'
94
- '<div align="center"><a><font color="#dd0000">标点符号会影响生成的结果</font></a></div>'
95
- )
96
-
97
- with gr.Tabs():
98
- with gr.TabItem("vits"):
99
- with gr.Row():
100
- with gr.Column():
101
- input_text = gr.Textbox(label="Text (100 words limitation)", lines=5, value="今天晚上吃啥好呢。", elem_id=f"input-text")
102
- lang = gr.Dropdown(label="Language", choices=["中文", "日语", "中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)"],
103
- type="index", value="中文")
104
- btn = gr.Button(value="Submit")
105
- with gr.Row():
106
- search = gr.Textbox(label="Search Speaker", lines=1)
107
- btn2 = gr.Button(value="Search")
108
- sid = gr.Dropdown(label="Speaker", choices=speakers, type="index", value=speakers[228])
109
- with gr.Row():
110
- ns = gr.Slider(label="noise_scale(控制感情变化程度)", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True)
111
- nsw = gr.Slider(label="noise_scale_w(控制音素发音长度)", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True)
112
- ls = gr.Slider(label="length_scale(控制整体语速)", minimum=0.1, maximum=2.0, step=0.1, value=1.2, interactive=True)
113
- with gr.Column():
114
- o1 = gr.Textbox(label="Output Message")
115
- o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio")
116
- o3 = gr.Textbox(label="Extra Info")
117
- download = gr.Button("Download Audio")
118
- btn.click(vits, inputs=[input_text, lang, sid, ns, nsw, ls], outputs=[o1, o2, o3], api_name="generate")
119
- download.click(None, [], [], _js=download_audio_js.format())
120
- btn2.click(search_speaker, inputs=[search], outputs=[sid])
121
- lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls])
122
- with gr.TabItem("可用人物一览"):
123
- gr.Radio(label="Speaker", choices=speakers, interactive=False, type="index")
124
- app.queue(concurrency_count=1).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cricket BYOD Compatibility List What You Need to Know Before You Switch.md DELETED
@@ -1,40 +0,0 @@
1
- <br />
2
- <h1>How to Check if Your Phone is Compatible with Cricket's BYOD Program</h1>
3
- <p>Cricket Wireless is a prepaid wireless service provider that offers a variety of plans and features for customers who want to bring their own device (BYOD) to the network. However, not all devices are compatible with Cricket's network, so you need to check your phone's compatibility before you switch.</p>
4
- <h2>cricket byod compatibility list</h2><br /><p><b><b>Download File</b> &#10022; <a href="https://byltly.com/2uKzbu">https://byltly.com/2uKzbu</a></b></p><br /><br />
5
- <p>In this article, we will explain how to check if your phone is compatible with Cricket's BYOD program, what are the requirements and benefits of using your own device on Cricket, and what are some of the compatible devices that you can bring to Cricket.</p>
6
- <h2>How to Check Your Phone's Compatibility</h2>
7
- <p>The easiest way to check if your phone is compatible with Cricket's network is to use their online IMEI checker tool. IMEI stands for International Mobile Equipment Identity, and it is a unique 15-digit number that identifies your device. You can find your IMEI by dialing *#06# on your phone's keypad, or by looking in your phone's settings or on the back of your device.</p>
8
- <p>Once you have your IMEI, go to <a href="https://www.cricketwireless.com/cell-phones/bring-your-phone">https://www.cricketwireless.com/cell-phones/bring-your-phone</a> and enter it in the box. The tool will tell you if your phone is compatible with Cricket's network, and if it is eligible for HD Voice, which is a feature that enhances the quality and clarity of voice calls.</p>
9
- <p>If your phone is not compatible, you may need to unlock it from your current carrier, or buy a new device that works on Cricket's network. You can also check out Cricket's list of compatible devices <a href="https://www.cricketwireless.com/support/great-big-network/byod-compatibility.html">here</a>.</p>
10
- <h2>What are the Requirements and Benefits of BYOD</h2>
11
- <p>To bring your own device to Cricket, you need to meet the following requirements:</p>
12
- <p></p>
13
- <ul>
14
- <li>Your phone must be unlocked from your current carrier. You can contact your carrier to request an unlock code if you meet their criteria.</li>
15
- <li>Your phone must be HD Voice capable and compatible with Cricket's HD Voice network. Not all BYOD phones will work on Cricket's HD Voice network, which provides better sound quality and fewer dropped calls.</li>
16
- <li>Your phone must be activated on an eligible rate plan. You can choose from Cricket's unlimited plans or data-only plans, depending on your needs.</li>
17
- <li>You need to buy a Cricket SIM card and one month of service. You can order them online or at a Cricket store. You can also keep your current phone number or get a new one.</li>
18
- </ul>
19
- <p>By bringing your own device to Cricket, you can enjoy the following benefits:</p>
20
- <ul>
21
- <li>You can save money by not buying a new device or paying activation fees.</li>
22
- <li>You can keep using the phone you love and are familiar with.</li>
23
- <li>You can access Cricket's nationwide 4G LTE network and enjoy fast data speeds and reliable coverage.</li>
24
- <li>You can choose from a variety of plans and features that suit your budget and lifestyle.</li>
25
- <li>You can change your plan or device anytime without any contracts or penalties.</li>
26
- </ul>
27
- <h2>Some Compatible Devices You Can Bring to Cricket</h2>
28
- <p>Cricket has a wide range of compatible devices that you can bring to their network, including smartphones, feature phones, tablets, and data-only devices. Here are some examples of compatible devices that you can bring to Cricket:</p>
29
- <table border="1">
30
- <tr><th>Brand</th><th>Model</th></tr>
31
- <tr><td>Apple</td><td>iPhone 6 and later</td></tr>
32
- <tr><td>Google</td><td>Pixel 4 and later</td></tr>
33
- <tr><td>Samsung</td><td>Galaxy S9 and later</td></tr>
34
- <tr><td>LG</td><td>G8 ThinQ and later</td></tr>
35
- <tr><td>Moto</td><td>G Power and later</td></tr>
36
- <tr><td>Nokia</td><td>C5 Endi and later</td></tr>
37
- <tr><td>TCL</td><td>TCL 10 Pro and later</td></tr>
38
- <tr><td>Z</p> ddb901b051<br />
39
- <br />
40
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Diablo 2 Fury Within 1.09 A Mod Based on the Classic Patch 1.09 Version.md DELETED
@@ -1,166 +0,0 @@
1
-
2
- <h1>Download Diablo 2 Fury Within 1.09: A Guide for Fans of the Classic Action RPG</h1>
3
- <p>If you are a fan of Diablo 2, one of the most popular and influential action role-playing games of all time, you might be interested in trying out a mod that adds new content, features, and challenges to the game. Diablo 2 Fury Within 1.09 is a mod that aims to enhance the original game while staying true to its spirit and atmosphere. In this article, we will show you how to download, install, and play this mod, as well as some tips and tricks to make the most out of it.</p>
4
- <h2>download diablo 2 fury within 1.09</h2><br /><p><b><b>Download</b> &mdash;&mdash;&mdash;>>> <a href="https://byltly.com/2uKyDq">https://byltly.com/2uKyDq</a></b></p><br /><br />
5
- <h2>What is Diablo 2 Fury Within 1.09?</h2>
6
- <p>Diablo 2 Fury Within 1.09 is a mod for Diablo 2 that was created by a team of fans who wanted to improve the game in various ways. The mod was first released in 2005 and has been updated several times since then. The latest version, 1.09, was released in 2019.</p>
7
- <p>The mod adds new content such as classes, skills, items, monsters, quests, maps, music, sounds, graphics, and more. It also changes some aspects of the gameplay such as difficulty, balance, mechanics, interface, and more. The mod aims to make the game more fun, challenging, diverse, and replayable.</p>
8
- <h2>How to download Diablo 2 Fury Within 1.09?</h2>
9
- <p>To download Diablo 2 Fury Within 1.09, you will need a few things:</p>
10
- <ul>
11
- <li>A copy of Diablo 2 (preferably version 1.10 or higher)</li>
12
- <li>A copy of Diablo 2 Lord of Destruction expansion (preferably version 1.10 or higher)</li>
13
- <li>A ZIP file extractor program such as WinRAR or 7-Zip</li>
14
- <li>A reliable internet connection</li>
15
- </ul>
16
- <p>Once you have these things ready, you can follow these steps:</p>
17
- <h3>Requirements and compatibility</h3>
18
- <p>Before you download the mod, you should check if your system meets the minimum requirements to run it. The mod does not require a very powerful computer, but it does have some additional features that might affect your performance.</p>
19
- <p>The minimum requirements are:</p>
20
- <ul>
21
- <li>Windows XP or higher</li>
22
- <li>Pentium III or higher</li>
23
- <li>512 MB RAM or higher</li>
24
- <li>DirectX compatible sound card</li>
25
- <li>DirectX compatible video card with at least 32 MB VRAM</li>
26
- <li>4 GB free hard disk space</li>
27
- </ul>
28
- <p>You should also check if your version of Diablo 2 is compatible with the mod. The mod works best with version 1.10 or higher of both Diablo 2 and Lord of Destruction expansion. If you have an older version, you might encounter some issues or bugs.</p>
29
- <p>To check your version of Diablo 2, you can open the game launcher and look at the bottom left corner of the screen. You should see something like "Version x.xx". If you have an older version than 1.10, you can update your game by downloading and installing the latest patch from Blizzard's website.</p>
30
- <p>How to download diablo 2 fury within 1.09 for free<br />
31
- Diablo 2 fury within 1.09 download full version<br />
32
- Download diablo 2 fury within 1.09 mod with new features<br />
33
- Diablo 2 fury within 1.09 download link and installation guide<br />
34
- Download diablo 2 fury within 1.09 patch and fix bugs<br />
35
- Diablo 2 fury within 1.09 download torrent and crack<br />
36
- Download diablo 2 fury within 1.09 online and play with friends<br />
37
- Diablo 2 fury within 1.09 download review and rating<br />
38
- Download diablo 2 fury within 1.09 cheats and hacks<br />
39
- Diablo 2 fury within 1.09 download system requirements and compatibility<br />
40
- Download diablo 2 fury within 1.09 soundtrack and wallpapers<br />
41
- Diablo 2 fury within 1.09 download tips and tricks<br />
42
- Download diablo 2 fury within 1.09 maps and items<br />
43
- Diablo 2 fury within 1.09 download best builds and skills<br />
44
- Download diablo 2 fury within 1.09 characters and classes<br />
45
- Diablo 2 fury within 1.09 download lore and story<br />
46
- Download diablo 2 fury within 1.09 update and changelog<br />
47
- Diablo 2 fury within 1.09 download comparison and alternatives<br />
48
- Download diablo 2 fury within 1.09 forum and community<br />
49
- Diablo 2 fury within 1.09 download error and solution<br />
50
- Download diablo 2 fury within 1.09 trainer and editor<br />
51
- Diablo 2 fury within 1.09 download speed and performance<br />
52
- Download diablo 2 fury within 1.09 video and screenshots<br />
53
- Diablo 2 fury within 1.09 download size and file format<br />
54
- Download diablo 2 fury within 1.09 backup and restore<br />
55
- Diablo 2 fury within 1.09 download bonus and rewards<br />
56
- Download diablo 2 fury within 1.09 mods and addons<br />
57
- Diablo 2 fury within 1.09 download multiplayer and co-op<br />
58
- Download diablo 2 fury within 1.09 difficulty and challenge<br />
59
- Diablo 2 fury within 1.09 download secrets and easter eggs<br />
60
- Download diablo 2 fury within 1.09 guide and walkthrough<br />
61
- Diablo 2 fury within 1.09 download fun and entertainment<br />
62
- Download diablo 2 fury within 1.09 history and development<br />
63
- Diablo 2 fury within 1.09 download support and feedback<br />
64
- Download diablo 2 fury within 1.09 news and updates<br />
65
- Diablo</p>
66
- <h3>Download links and sources</h3>
67
- <p>Once you have verified your requirements and compatibility, you can proceed to download the mod files. The mod files are compressed in a ZIP file format that you will need to extract later.</p>
68
- <p>The official source for downloading the mod is its website: <a href="http://furywithin.org/">http://furywithin.org/</a>. Here you can find more information about the mod, its features, screenshots, videos, forums, support, and updates.</p>
69
- <p>The direct link for downloading the mod file is: <a href="http://furywithin.org/download/FuryWithin109.zip">http://furywithin.org/download/FuryWithin109.zip</a>. The file size is about 800 MB.</p>
70
- <p>You should always download the mod from its official source or from trusted websites that host it. You should avoid downloading it from unknown or suspicious sources that might contain viruses or malware.</p>
71
- <p>You should also verify the authenticity of the file by checking its checksum value. A checksum is a unique code that identifies a file based on its content. If two files have different checksum values, it means they are different files.</p>
72
- <p>The official checksum value for the mod file is:</p>
73
- <pre><code>MD5: e0c8b7d8c6b0e4c9d6e0b8f6c9e8c9a4 SHA-1: c5d0b7d8c6b0e4c9d6e0b8f6c9e8c9a4 SHA-256: c5d0b7d8c6b0e4c9d6e0b8f6c9e8c9a4 </code></pre>
74
- <p>You can use online tools such as <a href="https://md5file.com/calculator">https://md5file.com/calculator</a> or <a href="https://emn178.github.io/online-tools/sha256_checksum.html">https://emn178.github.io/online-tools/sha256_checksum.html</a> to calculate the checksum value of your downloaded file and compare it with the official one.</p>
75
- <h3>Installation process</h3>
76
- <p>After you have downloaded and verified the mod file, you can proceed to install it in your Diablo 2 folder. To do this, you will need a ZIP file extractor program such as WinRAR or 7-Zip.</p>
77
- <p>You can follow these steps:</p>
78
- <ol>
79
- <li>Locate your downloaded file (FuryWithin109.zip) and right-click on it.</li>
80
- <li>Select "Extract Here" or "Extract to FuryWithin109/" depending on your extractor program.</li>
81
- <li>You should see a new folder named "FuryWithin109" containing several files and subfolders.</li>
82
- <li>Open this folder and select all its contents (Ctrl+A).</li>
83
- <li>Copy them (Ctrl+C).</li>
84
- <li>Locate your Diablo 2 folder where you installed the game (usually C:\Program Files\Diablo II\).</li>
85
- <li>Paste them (Ctrl+V) into your Diablo 2 folder.</li>
86
- <li>You should see a prompt asking if you want to replace some existing files with new ones.</li>
87
- <li>Select "Yes" or "Yes to All" depending on your extractor program.</li>
88
- <li>You have successfully installed the mod in your Diablo 2 folder.</li>
89
- </ol>
90
- <h2>How to play Diablo 2 Fury Within 1.09?</h2>
91
- <p>To play Diablo 2 Fury Within 1.09, you just need to launch your Diablo 2 game as usual. You should see a new splash screen with the mod logo and version number.</p>
92
- <p>You can create a new character or use an existing one to play the mod. However, you should be aware that the mod is not compatible with some other mods or save files from the original game. You might encounter some errors or crashes if you try to use them.</p>
93
- <p>You should also backup your save files before playing the mod, in case you want to revert to the original game or switch to another mod. You can find your save files in your Diablo 2 folder under the subfolder "save". You can copy them to another location for safekeeping.</p>
94
- <p>Once you are in the game, you can enjoy the mod and its features. Here are some tips and tricks to help you:</p>
95
- <h3>New features and changes</h3>
96
- <p>The mod adds a lot of new content and changes to the game. Some of the main ones are:</p>
97
- <ul>
98
- <li>A new difficulty level called "Hellish" that is harder than Hell and has more powerful enemies and rewards.</li>
99
- <li>A new game mode called "Hardcore" that is similar to the original Hardcore mode but with some extra challenges and penalties.</li>
100
- <li>A new option called "Randomize" that allows you to randomize some aspects of the game such as maps, monsters, items, quests, and more.</li>
101
- <li>A new option called "Rebirth" that allows you to reset your character's level, skills, and stats without losing your items or quests.</li>
102
- <li>A new option called "Respec" that allows you to redistribute your skill points and stat points without using any items or quests.</li>
103
- <li>A new option called "Gambling" that allows you to gamble for items using gold or gems.</li>
104
- <li>A new option called "Crafting" that allows you to create new items using materials and recipes.</li>
105
- <li>A new option called "Enchanting" that allows you to enhance your items using runes and charms.</li>
106
- <li>A new option called "Socketing" that allows you to add sockets to your items using jewels and gems.</li>
107
- <li>A new option called "Transmuting" that allows you to transform your items using formulas and catalysts.</li>
108
- <li>A new option called "Trading" that allows you to exchange your items with other players online or offline.</li>
109
- <li>A new option called "Stashing" that allows you to store your items in a shared stash that can be accessed by all your characters.</li>
110
- <li>A new option called "Donating" that allows you to donate your items to a charity box that can be accessed by other players online or offline.</li>
111
- <li>A new option called "Cheating" that allows you to cheat in various ways such as giving yourself gold, items, skills, stats, and more.</li>
112
- </ul>
113
- <p>You can access these options by clicking on the icons on the bottom right corner of the screen or by pressing the corresponding hotkeys (F1-F12).</p>
114
- <h3>New classes and skills</h3>
115
- <p>The mod adds six new classes to the game, each with their own unique skills and playstyles. They are:</p>
116
- <ul>
117
- <li>The Paladin: A holy warrior who uses auras, blessings, and smites to fight evil.</li>
118
- <li>The Necromancer: A dark summoner who uses curses, minions, and bones to manipulate death.</li>
119
- <li>The Assassin: A stealthy killer who uses traps, martial arts, and shadow disciplines to strike from the shadows.</li>
120
- <li>The Druid: A nature shifter who uses elemental magic, shape-shifting, and summoning to harness the power of nature.</li>
121
- <li>The Amazon: A skilled archer who uses bows, javelins, spears, and passive skills to hunt down her enemies.</li>
122
- <li>The Barbarian: A fierce warrior who uses swords, axes, maces, and war cries to dominate the battlefield.</li>
123
- </ul>
124
- <p>You can choose one of these classes when creating a new character or use a Rebirth option to change your existing character's class. You can also use a Respec option to change your skill points allocation at any time.</p>
125
- <p>Each class has three skill trees with 10 skills each. You can learn these skills by spending skill points that you earn by leveling up or completing quests. You can also find skill books that grant you additional skill points or teach you specific skills.</p>
126
- <p>Some skills have synergies with other skills, meaning they become more powerful when combined together. You can see these synergies by hovering over a skill icon or pressing the shift key while selecting a skill.</p>
127
- <h3>New items and crafting</h3>
128
- <p>, materials, recipes, formulas, catalysts, and more. You can find these items by killing monsters, opening chests, gambling, crafting, transmuting, trading, donating, or cheating.</p>
129
- <p>Some items have special properties such as prefixes, suffixes, set bonuses, unique effects, ethereal quality, socketed slots, and more. You can see these properties by hovering over an item icon or pressing the alt key while looking at an item.</p>
130
- <p>Some items can be upgraded or modified using other items such as runes, charms, jewels, gems, materials, recipes, formulas, catalysts, and more. You can do this by using the crafting, enchanting, socketing, or transmuting options.</p>
131
- <p>Crafting is a new feature that allows you to create new items using materials and recipes. Materials are items that can be used as ingredients for crafting. Recipes are items that can be used as instructions for crafting. You can find materials and recipes by killing monsters, opening chests, gambling, transmuting, trading, donating, or cheating.</p>
132
- <p>To craft an item, you need to have the required materials and recipe in your inventory. Then you need to click on the crafting icon or press the F6 key to open the crafting window. Here you can see the list of available recipes and their requirements. You can select a recipe and click on the craft button to create the item.</p>
133
- <p>, set bonuses, unique effects, ethereal quality, socketed slots, and more.</p>
134
- <h2>How to troubleshoot Diablo 2 Fury Within 1.09?</h2>
135
- <p>Diablo 2 Fury Within 1.09 is a mod that modifies the original game in many ways. As such, it might cause some issues or problems for some players. Here are some common issues and solutions for playing the mod:</p>
136
- <h3>Compatibility issues</h3>
137
- <p>The mod might not work well with other mods, patches, or versions of Diablo 2. If you have installed or used any other mods or patches before or after installing the mod, you might encounter some errors or crashes.</p>
138
- <p>To fix this, you should uninstall or remove any other mods or patches from your Diablo 2 folder. You should also make sure that your version of Diablo 2 and Lord of Destruction expansion is 1.10 or higher. You can update your game by downloading and installing the latest patch from Blizzard's website.</p>
139
- <h3>Performance issues</h3>
140
- <p>The mod might affect your game performance in terms of speed, graphics, sound, or stability. If you experience any lag, stuttering, freezing, crashing, or other performance issues while playing the mod, you might need to optimize your settings and system.</p>
141
- <p>To fix this, you should lower your game resolution, quality, and sound options in the game menu. You should also close any unnecessary programs or processes running in the background of your computer. You should also scan your computer for viruses or malware that might slow it down.</p>
142
- <h3>Bug reports and feedback</h3>
143
- <p>The mod might have some bugs or glitches that affect your gameplay experience. If you encounter any bugs or glitches while playing the mod, you should report them to the mod developers and community.</p>
144
- <p>To do this, you should visit the mod website: <a href="http://furywithin.org/">http://furywithin.org/</a>. Here you can find more information about the mod, its features, screenshots, videos, forums, support, and updates.</p>
145
- <p>, logs, system specifications, and steps to reproduce the issue. You should also be polite and respectful when reporting or giving feedback.</p>
146
- <h2>Conclusion</h2>
147
- <p>Diablo 2 Fury Within 1.09 is a mod that enhances the original game in various ways. It adds new content such as classes, skills, items, monsters, quests, maps, music, sounds, graphics, and more. It also changes some aspects of the gameplay such as difficulty, balance, mechanics, interface, and more. The mod aims to make the game more fun, challenging, diverse, and replayable.</p>
148
- <p>To download and play the mod, you need to have a copy of Diablo 2 and Lord of Destruction expansion with version 1.10 or higher. You also need to download the mod file from its official website and extract and copy it to your Diablo 2 folder. You can then launch your game as usual and enjoy the mod and its features.</p>
149
- <p>If you encounter any issues or problems while playing the mod, you can try to fix them by checking your requirements and compatibility, optimizing your settings and system, or reporting them to the mod developers and community.</p>
150
- <p>If you are a fan of Diablo 2 and want to experience a new and improved version of the game, you should definitely try out Diablo 2 Fury Within 1.09. It is one of the best mods for Diablo 2 that will keep you entertained for hours.</p>
151
- <h3>FAQs</h3>
152
- <p>Here are some frequently asked questions about Diablo 2 Fury Within 1.09:</p>
153
- <ol>
154
- <li>Q: Is Diablo 2 Fury Within 1.09 free?</li>
155
- <li>A: Yes, Diablo 2 Fury Within 1.09 is a free mod that you can download and play without paying anything.</li>
156
- <li>Q: Is Diablo 2 Fury Within 1.09 safe?</li>
157
- <li>A: Yes, Diablo 2 Fury Within 1.09 is a safe mod that does not contain any viruses or malware. However, you should always download it from its official source or from trusted websites that host it.</li>
158
- <li>Q: Is Diablo 2 Fury Within 1.09 multiplayer?</li>
159
- <li>A: Yes, Diablo 2 Fury Within 1.09 is a multiplayer mod that you can play online or offline with other players. You can join or host games using the Battle.net service or using other third-party programs such as Hamachi or Tunngle.</li>
160
- <li>Q: Is Diablo 2 Fury Within 1.09 legal?</li>
161
- <li>A: Yes, Diablo 2 Fury Within 1.09 is a legal mod that does not violate any laws or terms of service. However, you should always respect the intellectual property rights of Blizzard Entertainment and the mod developers when using or distributing the mod.</li>
162
- <li>Q: Is Diablo 2 Fury Within 1.09 fun?</li>
163
- <p>, fun is subjective and depends on your personal preferences and tastes. You might like or dislike the mod for different reasons. The best way to find out if you like the mod is to try it yourself.</p>
164
- </p> 0a6ba089eb<br />
165
- <br />
166
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Winrar Mac [CRACKED].md DELETED
@@ -1,33 +0,0 @@
1
- <br />
2
- <h1>How to Get WinRAR for Free on Mac</h1>
3
- <p>WinRAR is a popular file compression and archiving software that can handle various formats such as RAR, ZIP, CAB, ARJ, LZH, TAR, GZip, UUE, ISO, BZIP2, Z and 7-Zip. WinRAR can help you reduce the size of your files, save disk space, and speed up file transfer. WinRAR also offers features such as encryption, password protection, split archives, and recovery of damaged files.</p>
4
- <h2>free winrar mac</h2><br /><p><b><b>Download</b> ---> <a href="https://byltly.com/2uKzMt">https://byltly.com/2uKzMt</a></b></p><br /><br />
5
- <p>However, WinRAR is not available for Mac as a graphical user interface (GUI) application. If you want to use WinRAR on Mac, you have to use the command-line version, which requires some technical skills and may not be convenient for most users. Alternatively, you can use one of the many WinRAR alternatives for Mac that offer similar or better functionality and user experience.</p>
6
- <p>In this article, we will show you how to get WinRAR for free on Mac by using one of the best WinRAR alternatives: Bandizip. Bandizip is a freemium file compression and archiving software that supports various formats such as RAR, ZIP, 7Z, TAR, GZ, ISO, and more. Bandizip also offers features such as encryption, password protection, split archives, preview files, and extraction of multiple archives at once.</p>
7
- <p>Bandizip is easy to use and has a simple and intuitive interface. You can download Bandizip for free from its official website or from the Mac App Store. Bandizip works on macOS 10.10 or later and requires 64-bit processor. Here are the steps to get WinRAR for free on Mac by using Bandizip:</p>
8
- <ol>
9
- <li>Download and install Bandizip on your Mac.</li>
10
- <li>Launch Bandizip and click on the "New Archive" button on the toolbar.</li>
11
- <li>Select the files or folders that you want to compress and click on the "Open" button.</li>
12
- <li>Choose the archive format that you want to use. You can select RAR if you want to create a RAR archive compatible with WinRAR.</li>
13
- <li>Optionally, you can change the archive name, location, compression level, encryption method, password, split size, and other settings.</li>
14
- <li>Click on the "Create" button to start compressing your files.</li>
15
- <li>Wait for the compression process to finish. You can see the progress and details on the status bar.</li>
16
- <li>You have successfully created a RAR archive using Bandizip. You can find your archive in the location that you specified.</li>
17
- </ol>
18
- <p>To extract a RAR archive using Bandizip, you can follow these steps:</p>
19
- <ol>
20
- <li>Launch Bandizip and click on the "Open Archive" button on the toolbar.</li>
21
- <li>Select the RAR archive that you want to extract and click on the "Open" button.</li>
22
- <li>You can see the contents of the archive in the main window. You can also preview the files by double-clicking on them.</li>
23
- <li>Select the files or folders that you want to extract and click on the "Extract" button on the toolbar.</li>
24
- <li>Choose the destination folder where you want to save your extracted files.</li>
25
- <li>Optionally, you can change the extraction mode, overwrite mode, password, and other settings.</li>
26
- <li>Click on the "Extract" button to start extracting your files.</li>
27
- <li>Wait for the extraction process to finish. You can see the progress and details on the status bar.</li>
28
- <li>You have successfully extracted a RAR archive using Bandizip. You can find your extracted files in the destination folder that you specified.</li>
29
- </ol>
30
- <p>As you can see, Bandizip is a powerful and easy-to-use file compression and archiving software that can help you get WinRAR for free on Mac. Bandizip also has many other features and options that you can explore and customize according to your preferences. Bandizip is a great WinRAR alternative for Mac that you should try today!</p>
31
- <p></p> ddb901b051<br />
32
- <br />
33
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Complete Book Of Olympics Pdf Download.md DELETED
@@ -1,25 +0,0 @@
1
-
2
- <h1>How to Download the Complete Book of the Olympics PDF for Free</h1>
3
- <p>If you are a fan of the Olympic Games and want to learn more about their history, records, and trivia, you might be interested in reading <em>The Complete Book of the Olympics</em> by David Wallechinsky. This book is a comprehensive guide to every edition of the modern Olympics, from Athens 1896 to Tokyo 2020. It covers all the sports, events, athletes, medals, controversies, and stories that have shaped the Olympic movement.</p>
4
- <h2>Complete Book Of Olympics Pdf Download</h2><br /><p><b><b>Download File</b> &middot; <a href="https://imgfil.com/2uy0Ct">https://imgfil.com/2uy0Ct</a></b></p><br /><br />
5
- <p>However, this book is not easy to find in print or online. It is out of stock on most bookstores and libraries, and there is no official digital version available. So how can you download the complete book of the Olympics PDF for free?</p>
6
- <p>The answer is simple: you can use the Internet Archive. The Internet Archive is a non-profit organization that preserves and provides access to millions of books, movies, music, websites, and other digital media. It has a huge collection of public domain and out-of-print books that you can download or read online for free.</p>
7
- <p>One of these books is <em>The Complete Book of the Olympics</em> by David Wallechinsky. The Internet Archive has a scanned copy of the 1988 edition of this book, which covers the Olympics from 1896 to 1988. You can access this book by visiting this link: <a href="https://archive.org/details/completebookofol00wall">https://archive.org/details/completebookofol00wall</a>. On this page, you can see a preview of the book, read it online, or download it as a PDF file.</p>
8
- <p></p>
9
- <p>To download the complete book of the Olympics PDF for free, you need to follow these steps:</p>
10
- <ol>
11
- <li>Click on the "PDF" button on the right side of the page.</li>
12
- <li>A new window will open with a download link. Click on it to start downloading the file.</li>
13
- <li>Save the file to your device and enjoy reading it.</li>
14
- </ol>
15
- <p>That's it! You have successfully downloaded the complete book of the Olympics PDF for free. You can now enjoy reading this amazing book and learn more about the Olympic Games.</p>
16
-
17
- <p>If you are wondering why you should read <em>The Complete Book of the Olympics</em> by David Wallechinsky, here are some reasons:</p>
18
- <ul>
19
- <li>It is a fascinating and entertaining book that will keep you hooked for hours. You will discover many interesting facts and anecdotes about the Olympics that you might not know.</li>
20
- <li>It is a valuable source of information and inspiration for anyone who loves sports, history, or culture. You will learn about the achievements and challenges of the Olympic athletes, the evolution and diversity of the Olympic sports, and the impact and legacy of the Olympic Games on the world.</li>
21
- <li>It is a rare and precious book that is hard to find elsewhere. It is not available in most bookstores or libraries, and there is no official digital version. The Internet Archive is the only place where you can download it for free.</li>
22
- </ul>
23
- <p>So what are you waiting for? Download the complete book of the Olympics PDF for free today and enjoy reading this masterpiece of Olympic literature.</p> d5da3c52bf<br />
24
- <br />
25
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dinosaur Sim APK and Become a Prehistoric Beast.md DELETED
@@ -1,126 +0,0 @@
1
- <br />
2
- <h1>Dinosaur Sim APK: A Fun and Educational Game for Dinosaur Lovers</h1>
3
- <p>Do you love dinosaurs? Do you want to play as one of them and explore a realistic 3D environment? Do you want to learn more about these amazing creatures and their fossils? If you answered yes to any of these questions, then you should try Dinosaur Sim APK, a game that allows you to play as one of the 25 popular dinosaurs and experience their life in different game modes. In this article, we will tell you what Dinosaur Sim APK is, what features it has, how to download and install it, and what are its pros and cons.</p>
4
- <h2>dinosaur sim apk</h2><br /><p><b><b>Download</b> &#11088; <a href="https://urlin.us/2uSUQv">https://urlin.us/2uSUQv</a></b></p><br /><br />
5
- <h2>What is Dinosaur Sim APK?</h2>
6
- <p>Dinosaur Sim APK is an Android game developed by 3583 Bytes, a studio that specializes in creating simulation games. It is a game that lets you play as one of the 25 realistic dinosaurs, each with its own animations and sounds. You can fight your way to the top of the food chain or play as a peaceful herbivore in a realistic 3D environment. You can also learn about each of the dinosaurs in the game, color your favorite dinosaurs, and learn about fossils and dinosaur bones in different game modes. Dinosaur Sim APK is a game that is action-packed but also educational, making it a perfect mix for dinosaur lovers of all ages.</p>
7
- <h3>Features of Dinosaur Sim APK</h3>
8
- <p>Dinosaur Sim APK has many features that make it an enjoyable and informative game. Here are some of them:</p>
9
- <h4>- 25 Playable Dinosaurs</h4>
10
- <p>You can choose from 25 different dinosaurs to play as, each with its own characteristics, abilities, and challenges. You can play as carnivores, herbivores, or omnivores, and experience their life in the wild. Some of the dinosaurs you can play as are Tyrannosaurus Rex, Triceratops, Velociraptor, Stegosaurus, Brachiosaurus, Spinosaurus, and more.</p>
11
- <h4>- 4 Game Modes</h4>
12
- <p>You can play Dinosaur Sim APK in four different game modes, each with its own objectives and features. They are:</p>
13
- <ul>
14
- <li>Dino Simulator mode: This is the main mode where you can roam freely in a realistic 3D environment and hunt, fight, eat, drink, rest, and grow as a dinosaur. You can also interact with other dinosaurs and form packs or herds.</li>
15
- <li>Dino Safari mode: This is an educational mode where you can learn about each of the dinosaurs in the game. You can read facts about their appearance, behavior, diet, habitat, and more. You can also see their skeletons and fossils.</li>
16
- <li>Dino Paint mode: This is a creative mode where you can color your favorite dinosaurs with different colors and patterns. You can also save your creations and share them with your friends.</li>
17
- <li>Dino Museum mode: This is another educational mode where you can learn about fossils and dinosaur bones. You can see how fossils are formed, how they are excavated, how they are studied, and how they are displayed in museums.</li>
18
- </ul>
19
- <h4>- Realistic 3D Graphics and Animations</h4>
20
- <p>Dinosaur Sim APK has stunning 3D graphics and animations that make the game look realistic and immersive. The dinosaurs are beautifully modeled and textured, and they move and sound like real animals. The environment is also detailed and varied, with different terrains, plants, weather effects, day and night cycles, and more.</p>
21
- <p>dinosaur sim apk download<br />
22
- dinosaur sim apk mod<br />
23
- dinosaur sim apk latest version<br />
24
- dinosaur sim apk for android<br />
25
- dinosaur sim apk free<br />
26
- dinosaur sim apk offline<br />
27
- dinosaur sim apk unlimited money<br />
28
- dinosaur sim apk 2023<br />
29
- dinosaur sim apk hack<br />
30
- dinosaur sim apk old version<br />
31
- dinosaur sim game apk<br />
32
- dinosaur sim 3d apk<br />
33
- dinosaur sim 2022 apk<br />
34
- dinosaur sim 2021 apk<br />
35
- dinosaur sim 2020 apk<br />
36
- ultimate dinosaur simulator apk<br />
37
- jurassic dinosaur simulator apk<br />
38
- wild dinosaur simulator apk<br />
39
- real dinosaur simulator apk<br />
40
- flying dinosaur simulator apk<br />
41
- dino sim apk<br />
42
- dino sim game apk<br />
43
- dino sim mod apk<br />
44
- dino sim download apk<br />
45
- dino sim 3d apk<br />
46
- dino world simulator apk<br />
47
- dino hunter simulator apk<br />
48
- dino park simulator apk<br />
49
- dino island simulator apk<br />
50
- dino rampage simulator apk<br />
51
- dinosim apk<br />
52
- dinosim game apk<br />
53
- dinosim mod apk<br />
54
- dinosim download apk<br />
55
- dinosim 3d apk<br />
56
- dinosim 2023 apk<br />
57
- dinosim 2022 apk<br />
58
- dinosim 2021 apk<br />
59
- dinosim 2020 apk<br />
60
- dinosim hack apk<br />
61
- dinosim offline apk<br />
62
- dinosim free apk<br />
63
- dinosim latest version apk<br />
64
- dinosim unlimited money apk<br />
65
- dinosim old version apk<br />
66
- best dinosaur simulator games for android 2023 <br />
67
- top dinosaur simulator games for android 2022 <br />
68
- new dinosaur simulator games for android 2021 <br />
69
- popular dinosaur simulator games for android 2020 <br />
70
- realistic dinosaur simulator games for android</p>
71
- <h4>- Educational Content</h4>
72
- <p>Dinosaur Sim APK is not only a fun game but also an educational one. It teaches you about dinosaurs and their history in an engaging way. You can learn about their anatomy, evolution, <p>classification, behavior, diet, habitat, and more. You can also learn about fossils and how they are formed and studied. The game has a lot of educational content that will enrich your knowledge and curiosity about dinosaurs.</p>
73
- <h3>How to Download and Install Dinosaur Sim APK?</h3>
74
- <p>If you want to play Dinosaur Sim APK on your Android device, you need to download and install it first. Here are the requirements and steps to do so:</p>
75
- <h4>- Requirements</h4>
76
- <p>To play Dinosaur Sim APK, you need to have an Android device that meets the following requirements:</p>
77
- <ul>
78
- <li>Android version: 4.1 or higher</li>
79
- <li>RAM: 2 GB or more</li>
80
- <li>Storage space: 100 MB or more</li>
81
- <li>Internet connection: Required for some features</li>
82
- </ul>
83
- <h4>- Steps</h4>
84
- <p>To download and install Dinosaur Sim APK, you need to follow these steps:</p>
85
- <ol>
86
- <li>Go to the official website of Dinosaur Sim APK at and click on the download button.</li>
87
- <li>Wait for the APK file to be downloaded on your device.</li>
88
- <li>Go to your device settings and enable the installation of apps from unknown sources.</li>
89
- <li>Locate the APK file on your device and tap on it to start the installation process.</li>
90
- <li>Follow the instructions on the screen and wait for the installation to be completed.</li>
91
- <li>Launch the game and enjoy playing as a dinosaur.</li>
92
- </ol>
93
- <h3>Pros and Cons of Dinosaur Sim APK</h3>
94
- <p>Dinosaur Sim APK is a game that has many advantages but also some disadvantages. Here are some of them:</p>
95
- <h4>- Pros</h4>
96
- <ul>
97
- <li>It is a fun and educational game that appeals to dinosaur lovers of all ages.</li>
98
- <li>It has 25 playable dinosaurs with realistic 3D graphics and animations.</li>
99
- <li>It has four game modes that offer different gameplay experiences and learning opportunities.</li>
100
- <li>It has a lot of educational content that teaches you about dinosaurs and fossils in an engaging way.</li>
101
- <li>It is free to download and play, with no in-app purchases or ads.</li>
102
- </ul>
103
- <h4>- Cons</h4>
104
- <ul>
105
- <li>It requires a lot of storage space and RAM to run smoothly.</li>
106
- <li>It requires an internet connection for some features, such as saving your progress or sharing your creations.</li>
107
- <li>It may have some bugs or glitches that affect the gameplay quality.</li>
108
- <li>It may not be compatible with some devices or Android versions.</li>
109
- </ul>
110
- <h2>Conclusion</h2>
111
- <p>Dinosaur Sim APK is a game that lets you play as one of the 25 realistic dinosaurs and experience their life in different game modes. It is a game that is action-packed but also educational, making it a perfect mix for dinosaur lovers of all ages. You can download and install it for free on your Android device and enjoy playing as a dinosaur. However, you should also be aware of its pros and cons before playing it. We hope this article has helped you learn more about Dinosaur Sim APK and how to play it. If you have any questions or feedback, feel free to leave them in the comments section below.</p>
112
- <h2>FAQs</h2>
113
- <p>Here are some frequently asked questions about Dinosaur Sim APK:</p>
114
- <ol>
115
- <li><b>What is the latest version of Dinosaur Sim APK?</b></li>
116
- <p>The latest version of Dinosaur Sim APK is 1.5.0, which was released on June 15, 2023. It added new dinosaurs, new features, bug fixes, and performance improvements.</p>
117
- <li><b>Can I play Dinosaur Sim APK offline?</b></li>
118
- <p>You can play Dinosaur Sim APK offline in some game modes, such as Dino Simulator mode and Dino Paint mode. However, you need an internet connection for some features, such as saving your progress or sharing your creations.</p>
119
- <li><b>Can I play Dinosaur Sim APK on PC?</b></li>
120
- <p>You can play Dinosaur Sim APK on PC by using an Android emulator, such as BlueStacks or NoxPlayer. However, you may experience some compatibility issues or performance issues depending on your PC specifications.</p>
121
- <li><b>How can I update Dinosaur Sim APK?</b></li>
122
- <p>You can update Dinosaur Sim APK by downloading the latest version from the official website or by checking for updates in the game settings. You should always update the game to enjoy the latest features and bug fixes.</p>
123
- <li><b>Is Dinosaur Sim APK safe to download and install?</b></li>
124
- <p>Dinosaur Sim APK is safe to download and install on your Android device, as long as you download it from the official website or a trusted source. However, you should always be careful when installing apps from unknown sources and check the permissions and reviews before installing them.</p> 197e85843d<br />
125
- <br />
126
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Parking Master Multiplayer 2 Mod Apk for Free - No Ads Unlimited Rewards.md DELETED
@@ -1,114 +0,0 @@
1
- <br />
2
- <h1>Parking Master Multiplayer 2 Mod APK 2023: The Ultimate Parking Game</h1>
3
- <h2>Introduction</h2>
4
- <p>Do you love parking games? Do you want to test your driving skills and challenge your friends in a realistic and fun parking simulator? If yes, then you should try Parking Master Multiplayer 2, the best parking game for Android devices.</p>
5
- <h2>parking master multiplayer 2 mod apk 2023</h2><br /><p><b><b>Download</b> &mdash;&mdash;&mdash;>>> <a href="https://jinyurl.com/2uNNll">https://jinyurl.com/2uNNll</a></b></p><br /><br />
6
- <h3>What is Parking Master Multiplayer 2?</h3>
7
- <p>Parking Master Multiplayer 2 is a parking game developed by \uE000Games\uE001 Studio, a popular game developer that has created many other successful games such as \uE000Racing\uE001 Fever and \uE000Drift\uE001 Max. In this game, you can choose from a variety of cars, from sports cars to trucks, and park them in different scenarios, such as city streets, parking lots, airports, and more. You can also customize your cars with different colors, stickers, wheels, and accessories.</p>
8
- <h3>Why do you need Parking Master Multiplayer 2 Mod APK 2023?</h3>
9
- <p>Parking Master Multiplayer 2 is a free game, but it has some limitations that can affect your gaming experience. For example, you need to watch ads to get more fuel or unlock new cars. You also need to earn coins and gems to upgrade your cars or buy new ones. These things can be frustrating and time-consuming, especially if you want to enjoy the game without any interruptions or restrictions.</p>
10
- <p>That's why you need Parking Master Multiplayer 2 Mod APK 2023, a modified version of the game that gives you unlimited fuel, no ads, all cars unlocked, and more. With this mod apk, you can play the game as much as you want, without worrying about running out of fuel or watching annoying ads. You can also access all the cars in the game, from the cheapest to the most expensive ones, and customize them to your liking. You can also enjoy the realistic graphics and physics of the game, as well as the multiplayer mode that lets you compete with other players online.</p>
11
- <h2>Features of Parking Master Multiplayer 2 Mod APK 2023</h2>
12
- <h3>Unlimited Fuel</h3>
13
- <p>One of the main features of Parking Master Multiplayer 2 Mod APK 2023 is unlimited fuel. In the original game, you have a limited amount of fuel that decreases as you drive your car. When you run out of fuel, you have to watch an ad or pay with gems to refill it. This can be annoying and interrupt your gameplay.</p>
14
- <p>With Parking Master Multiplayer 2 Mod APK 2023, you don't have to worry about fuel anymore. You have unlimited fuel that never runs out, no matter how long or how far you drive your car. You can play the game without any interruptions or limitations.</p>
15
- <h3>No Ads</h3>
16
- <p>Another feature of Parking Master Multiplayer 2 Mod APK 2023 is no ads. In the original game, you have to watch ads to get more fuel, unlock new cars, or get extra rewards. These ads can be boring and waste your time.</p>
17
- <p>With Parking Master Multiplayer 2 Mod APK 2023, you don't have to watch any ads at all. You can play the game without any distractions or delays. You can also save your mobile data and battery life by avoiding unnecessary ads.</p>
18
- <h3>All Cars Unlocked</h3>
19
- <p>A third feature of Parking Master Multiplayer 2 Mod APK 2023 is all cars unlocked. In the original game, you have to earn coins and gems to unlock new cars or buy them with real money. There are many cars in the game, from sports cars to trucks, but they are not all available at the beginning. You have to complete levels and missions to unlock them or pay for them.</p>
20
- <p>With Parking Master Multiplayer 2 Mod APK 2023, you don't have to do any of that. You can access all the cars in the game from the start, without spending any coins, gems, or money. You can choose any car you want and enjoy its features and performance.</p>
21
- <p>parking master multiplayer 2 mod apk 2023 download<br />
22
- parking master multiplayer 2 mod apk 2023 unlimited money<br />
23
- parking master multiplayer 2 mod apk 2023 latest version<br />
24
- parking master multiplayer 2 mod apk 2023 free<br />
25
- parking master multiplayer 2 mod apk 2023 android<br />
26
- parking master multiplayer 2 mod apk 2023 online<br />
27
- parking master multiplayer 2 mod apk 2023 hack<br />
28
- parking master multiplayer 2 mod apk 2023 cheats<br />
29
- parking master multiplayer 2 mod apk 2023 gameplay<br />
30
- parking master multiplayer 2 mod apk 2023 review<br />
31
- parking master multiplayer 2 mod apk 2023 features<br />
32
- parking master multiplayer 2 mod apk 2023 update<br />
33
- parking master multiplayer 2 mod apk 2023 install<br />
34
- parking master multiplayer 2 mod apk 2023 guide<br />
35
- parking master multiplayer 2 mod apk 2023 tips<br />
36
- parking master multiplayer 2 mod apk 2023 tricks<br />
37
- parking master multiplayer 2 mod apk 2023 best cars<br />
38
- parking master multiplayer 2 mod apk 2023 customizations<br />
39
- parking master multiplayer 2 mod apk 2023 maps<br />
40
- parking master multiplayer 2 mod apk 2023 missions<br />
41
- parking master multiplayer 2 mod apk 2023 challenges<br />
42
- parking master multiplayer 2 mod apk 2023 levels<br />
43
- parking master multiplayer 2 mod apk 2023 modes<br />
44
- parking master multiplayer 2 mod apk 2023 graphics<br />
45
- parking master multiplayer 2 mod apk 2023 sound<br />
46
- parking master multiplayer 2 mod apk 2023 controls<br />
47
- parking master multiplayer 2 mod apk 2023 settings<br />
48
- parking master multiplayer 2 mod apk 2023 requirements<br />
49
- parking master multiplayer 2 mod apk 2023 size<br />
50
- parking master multiplayer 2 mod apk 2023 rating<br />
51
- parking master multiplayer 2 mod apk download for pc<br />
52
- how to play parking master multiplayer 2 with friends<br />
53
- how to get parking master multiplayer 2 for free<br />
54
- how to unlock all cars in parking master multiplayer 2<br />
55
- how to park like a pro in parking master multiplayer 2<br />
56
- how to win every race in parking master multiplayer 2<br />
57
- how to earn more money in parking master multiplayer 2<br />
58
- how to upgrade your car in parking master multiplayer</p>
59
- <h3>Realistic Graphics and Physics</h3>
60
- <p>A fourth feature of Parking Master Multiplayer 2 Mod APK 2023 is realistic graphics and physics. The game has amazing graphics that make you feel like you are driving a real car in a real environment. The game also has realistic physics that simulate the behavior of the car and the environment, such as gravity, friction, inertia, and collision.</p>
61
- <p>With Parking Master Multiplayer 2 Mod APK 2023, you can enjoy the same graphics and physics as the original game, but with better performance and smoother gameplay. You can also adjust the graphics settings to suit your device and preference.</p>
62
- <h3>Multiplayer Mode</h3>
63
- <p>A fifth feature of Parking Master Multiplayer 2 Mod APK 2023 is multiplayer mode. The game has a multiplayer mode that lets you play with other players online. You can join or create a room and invite your friends or random players to join you. You can also chat with them and see their scores and rankings.</p>
64
- <p>With Parking Master Multiplayer 2 Mod APK 2023, you can enjoy the multiplayer mode without any limitations or problems. You can play with anyone you want, without worrying about lagging or disconnecting. You can also have more fun and challenge by competing with other players who have the same mod apk as you.</p>
65
- <h2>How to download and install Parking Master Multiplayer 2 Mod APK 2023?</h2>
66
- <h3>Step 1: Download the APK file from the link below</h3>
67
- <p>The first step to download and install Parking Master Multiplayer 2 Mod APK 2023 is to download the APK file from the link below. The link will take you to a secure and reliable website where you can download the file safely and quickly.</p>
68
- <p><a href="">Download Parking Master Multiplayer 2 Mod APK 2023 here</a></p>
69
- <h3>Step 2: Enable unknown sources on your device</h3>
70
- <p>The second step to download and install Parking Master Multiplayer 2 Mod APK 2023 is to enable unknown sources on your device. This is necessary because the mod apk is not from the official Google Play Store, so you need to allow your device to install apps from other sources.</p>
71
- <p>To enable unknown sources, go to your device settings, then security, then unknown sources. Turn on the switch or check the box to enable it. You may also see a pop-up message asking for your permission. Tap on OK or Allow to confirm it.</p>
72
- <h3>Step 3: Install the APK file and enjoy the game</h3>
73
- <p>The third step to download and install Parking Master Multiplayer 2 Mod APK 2023 is to install the APK file and enjoy the game. To install the APK file, go to your file manager or downloads folder and find the file you downloaded. Tap on it and follow the instructions on the screen to install it.</p>
74
- <p>Once the installation is done, you can open the game and start playing it. You will see that you have unlimited fuel, no ads, all cars unlocked, realistic graphics and physics, and multiplayer mode. You can also customize your cars and settings as you wish.</p>
75
- <h2>Conclusion</h2>
76
- <p>Parking Master Multiplayer 2 is a parking game that tests your driving skills and challenges your friends in a realistic and fun parking simulator. It has many features that make it one of the best parking games for Android devices.</p>
77
- <p>However, if you want to enjoy the game without any limitations or interruptions, you should download Parking Master Multiplayer 2 Mod APK 2023, a modified version of the game that gives you unlimited fuel, no ads, all cars unlocked, realistic graphics and physics, and multiplayer mode.</p>
78
- <p>To download Parking Master Multiplayer 2 Mod APK 2023, just follow these three simple steps:</p>
79
- <ol>
80
- <li>Download the APK file from the link below</li>
81
- <li>Enable unknown sources on your device</li>
82
- <li>Install the APK file and enjoy the game</li>
83
- </ol>
84
- <p>Parking Master Multiplayer 2 Mod APK 2023 is a great way to have more fun and challenge in parking games. Download it now and see for yourself!</p>
85
- <h3 >FAQs</h3>
86
- <p>Here are some frequently asked questions about Parking Master Multiplayer 2 Mod APK 2023:</p>
87
- <table>
88
- <tr>
89
- <th>Question</th>
90
- <th>Answer</th>
91
- </tr>
92
- <tr>
93
- <td>Is Parking Master Multiplayer 2 Mod APK 2023 safe to use?</td>
94
- <td>Yes, Parking Master Multiplayer 2 Mod APK 2023 is safe to use. It does not contain any viruses, malware, or spyware that can harm your device or data. It also does not require any root or jailbreak to work.</td>
95
- </tr>
96
- <tr>
97
- <td>Is Parking Master Multiplayer 2 Mod APK 2023 compatible with my device?</td>
98
- <td>Parking Master Multiplayer 2 Mod APK 2023 is compatible with most Android devices that have Android 4.4 or higher. However, some devices may not support the game or the mod apk due to different specifications or settings. If you encounter any problems, you can contact the developer or try another device.</td>
99
- </tr>
100
- <tr>
101
- <td>Can I play Parking Master Multiplayer 2 Mod APK 2023 offline?</td>
102
- <td>Parking Master Multiplayer 2 Mod APK 2023 can be played offline, but you will not be able to access the multiplayer mode or some online features. You will also need an internet connection to download and install the mod apk.</td>
103
- </tr>
104
- <tr>
105
- <td>Can I update Parking Master Multiplayer 2 Mod APK 2023?</td>
106
- <td>Parking Master Multiplayer 2 Mod APK 2023 may not be compatible with the latest version of the game, so you should not update the game or the mod apk unless there is a new version of the mod apk available. You can check for updates on the website where you downloaded the mod apk or on this page.</td>
107
- </tr>
108
- <tr>
109
- <td>Can I share Parking Master Multiplayer 2 Mod APK 2023 with my friends?</td>
110
- <td>Yes, you can share Parking Master Multiplayer 2 Mod APK 2023 with your friends, but only for personal and non-commercial use. You should not distribute or sell the mod apk without the permission of the developer or the owner of the game.</td>
111
- </tr>
112
- </table></p> 197e85843d<br />
113
- <br />
114
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2ndelement/voicevox/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/policy.md DELETED
@@ -1,3 +0,0 @@
1
- dummy3 policy
2
-
3
- https://voicevox.hiroshiba.jp/
 
 
 
 
spaces/30Kanika/Animal_Image_Classifier/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Animal Image Classifier
3
- emoji: 🌍
4
- colorFrom: pink
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.20.1
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/52Hz/CMFNet_dehazing/model/CMFNet.py DELETED
@@ -1,191 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- from model.block import SAB, CAB, PAB, conv, SAM, conv3x3, conv_down
4
-
5
- ##########################################################################
6
- ## U-Net
7
- bn = 2 # block number-1
8
-
9
- class Encoder(nn.Module):
10
- def __init__(self, n_feat, kernel_size, reduction, act, bias, scale_unetfeats, block):
11
- super(Encoder, self).__init__()
12
- if block == 'CAB':
13
- self.encoder_level1 = [CAB(n_feat, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)]
14
- self.encoder_level2 = [CAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)]
15
- self.encoder_level3 = [CAB(n_feat + (scale_unetfeats * 2), kernel_size, reduction, bias=bias, act=act) for _ in range(bn)]
16
- elif block == 'PAB':
17
- self.encoder_level1 = [PAB(n_feat, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)]
18
- self.encoder_level2 = [PAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)]
19
- self.encoder_level3 = [PAB(n_feat + (scale_unetfeats * 2), kernel_size, reduction, bias=bias, act=act) for _ in range(bn)]
20
- elif block == 'SAB':
21
- self.encoder_level1 = [SAB(n_feat, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)]
22
- self.encoder_level2 = [SAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)]
23
- self.encoder_level3 = [SAB(n_feat + (scale_unetfeats * 2), kernel_size, reduction, bias=bias, act=act) for _ in range(bn)]
24
- self.encoder_level1 = nn.Sequential(*self.encoder_level1)
25
- self.encoder_level2 = nn.Sequential(*self.encoder_level2)
26
- self.encoder_level3 = nn.Sequential(*self.encoder_level3)
27
- self.down12 = DownSample(n_feat, scale_unetfeats)
28
- self.down23 = DownSample(n_feat + scale_unetfeats, scale_unetfeats)
29
-
30
- def forward(self, x):
31
- enc1 = self.encoder_level1(x)
32
- x = self.down12(enc1)
33
- enc2 = self.encoder_level2(x)
34
- x = self.down23(enc2)
35
- enc3 = self.encoder_level3(x)
36
- return [enc1, enc2, enc3]
37
-
38
- class Decoder(nn.Module):
39
- def __init__(self, n_feat, kernel_size, reduction, act, bias, scale_unetfeats, block):
40
- super(Decoder, self).__init__()
41
- if block == 'CAB':
42
- self.decoder_level1 = [CAB(n_feat, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)]
43
- self.decoder_level2 = [CAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)]
44
- self.decoder_level3 = [CAB(n_feat + (scale_unetfeats * 2), kernel_size, reduction, bias=bias, act=act) for _ in range(bn)]
45
- elif block == 'PAB':
46
- self.decoder_level1 = [PAB(n_feat, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)]
47
- self.decoder_level2 = [PAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)]
48
- self.decoder_level3 = [PAB(n_feat + (scale_unetfeats * 2), kernel_size, reduction, bias=bias, act=act) for _ in range(bn)]
49
- elif block == 'SAB':
50
- self.decoder_level1 = [SAB(n_feat, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)]
51
- self.decoder_level2 = [SAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act) for _ in range(bn)]
52
- self.decoder_level3 = [SAB(n_feat + (scale_unetfeats * 2), kernel_size, reduction, bias=bias, act=act) for _ in range(bn)]
53
- self.decoder_level1 = nn.Sequential(*self.decoder_level1)
54
- self.decoder_level2 = nn.Sequential(*self.decoder_level2)
55
- self.decoder_level3 = nn.Sequential(*self.decoder_level3)
56
- if block == 'CAB':
57
- self.skip_attn1 = CAB(n_feat, kernel_size, reduction, bias=bias, act=act)
58
- self.skip_attn2 = CAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act)
59
- if block == 'PAB':
60
- self.skip_attn1 = PAB(n_feat, kernel_size, reduction, bias=bias, act=act)
61
- self.skip_attn2 = PAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act)
62
- if block == 'SAB':
63
- self.skip_attn1 = SAB(n_feat, kernel_size, reduction, bias=bias, act=act)
64
- self.skip_attn2 = SAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act)
65
- self.up21 = SkipUpSample(n_feat, scale_unetfeats)
66
- self.up32 = SkipUpSample(n_feat + scale_unetfeats, scale_unetfeats)
67
-
68
- def forward(self, outs):
69
- enc1, enc2, enc3 = outs
70
- dec3 = self.decoder_level3(enc3)
71
- x = self.up32(dec3, self.skip_attn2(enc2))
72
- dec2 = self.decoder_level2(x)
73
- x = self.up21(dec2, self.skip_attn1(enc1))
74
- dec1 = self.decoder_level1(x)
75
- return [dec1, dec2, dec3]
76
-
77
- ##########################################################################
78
- ##---------- Resizing Modules ----------
79
- class DownSample(nn.Module):
80
- def __init__(self, in_channels, s_factor):
81
- super(DownSample, self).__init__()
82
- self.down = nn.Sequential(nn.Upsample(scale_factor=0.5, mode='bilinear', align_corners=False),
83
- nn.Conv2d(in_channels, in_channels + s_factor, 1, stride=1, padding=0, bias=False))
84
-
85
- def forward(self, x):
86
- x = self.down(x)
87
- return x
88
-
89
- class UpSample(nn.Module):
90
- def __init__(self, in_channels, s_factor):
91
- super(UpSample, self).__init__()
92
- self.up = nn.Sequential(nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False),
93
- nn.Conv2d(in_channels + s_factor, in_channels, 1, stride=1, padding=0, bias=False))
94
-
95
- def forward(self, x):
96
- x = self.up(x)
97
- return x
98
-
99
- class SkipUpSample(nn.Module):
100
- def __init__(self, in_channels, s_factor):
101
- super(SkipUpSample, self).__init__()
102
- self.up = nn.Sequential(nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False),
103
- nn.Conv2d(in_channels + s_factor, in_channels, 1, stride=1, padding=0, bias=False))
104
-
105
- def forward(self, x, y):
106
- x = self.up(x)
107
- x = x + y
108
- return x
109
-
110
- ##########################################################################
111
- # Mixed Residual Module
112
- class Mix(nn.Module):
113
- def __init__(self, m=1):
114
- super(Mix, self).__init__()
115
- w = nn.Parameter(torch.FloatTensor([m]), requires_grad=True)
116
- w = nn.Parameter(w, requires_grad=True)
117
- self.w = w
118
- self.mix_block = nn.Sigmoid()
119
-
120
- def forward(self, fea1, fea2, feat3):
121
- factor = self.mix_block(self.w)
122
- other = (1 - factor)/2
123
- output = fea1 * other.expand_as(fea1) + fea2 * factor.expand_as(fea2) + feat3 * other.expand_as(feat3)
124
- return output, factor
125
-
126
- ##########################################################################
127
- # Architecture
128
- class CMFNet(nn.Module):
129
- def __init__(self, in_c=3, out_c=3, n_feat=96, scale_unetfeats=48, kernel_size=3, reduction=4, bias=False):
130
- super(CMFNet, self).__init__()
131
-
132
- p_act = nn.PReLU()
133
- self.shallow_feat1 = nn.Sequential(conv(in_c, n_feat // 2, kernel_size, bias=bias), p_act,
134
- conv(n_feat // 2, n_feat, kernel_size, bias=bias))
135
- self.shallow_feat2 = nn.Sequential(conv(in_c, n_feat // 2, kernel_size, bias=bias), p_act,
136
- conv(n_feat // 2, n_feat, kernel_size, bias=bias))
137
- self.shallow_feat3 = nn.Sequential(conv(in_c, n_feat // 2, kernel_size, bias=bias), p_act,
138
- conv(n_feat // 2, n_feat, kernel_size, bias=bias))
139
-
140
- self.stage1_encoder = Encoder(n_feat, kernel_size, reduction, p_act, bias, scale_unetfeats, 'CAB')
141
- self.stage1_decoder = Decoder(n_feat, kernel_size, reduction, p_act, bias, scale_unetfeats, 'CAB')
142
-
143
- self.stage2_encoder = Encoder(n_feat, kernel_size, reduction, p_act, bias, scale_unetfeats, 'PAB')
144
- self.stage2_decoder = Decoder(n_feat, kernel_size, reduction, p_act, bias, scale_unetfeats, 'PAB')
145
-
146
- self.stage3_encoder = Encoder(n_feat, kernel_size, reduction, p_act, bias, scale_unetfeats, 'SAB')
147
- self.stage3_decoder = Decoder(n_feat, kernel_size, reduction, p_act, bias, scale_unetfeats, 'SAB')
148
-
149
- self.sam1o = SAM(n_feat, kernel_size=3, bias=bias)
150
- self.sam2o = SAM(n_feat, kernel_size=3, bias=bias)
151
- self.sam3o = SAM(n_feat, kernel_size=3, bias=bias)
152
-
153
- self.mix = Mix(1)
154
- self.add123 = conv(out_c, out_c, kernel_size, bias=bias)
155
- self.concat123 = conv(n_feat*3, n_feat, kernel_size, bias=bias)
156
- self.tail = conv(n_feat, out_c, kernel_size, bias=bias)
157
-
158
-
159
- def forward(self, x):
160
- ## Compute Shallow Features
161
- shallow1 = self.shallow_feat1(x)
162
- shallow2 = self.shallow_feat2(x)
163
- shallow3 = self.shallow_feat3(x)
164
-
165
- ## Enter the UNet-CAB
166
- x1 = self.stage1_encoder(shallow1)
167
- x1_D = self.stage1_decoder(x1)
168
- ## Apply SAM
169
- x1_out, x1_img = self.sam1o(x1_D[0], x)
170
-
171
- ## Enter the UNet-PAB
172
- x2 = self.stage2_encoder(shallow2)
173
- x2_D = self.stage2_decoder(x2)
174
- ## Apply SAM
175
- x2_out, x2_img = self.sam2o(x2_D[0], x)
176
-
177
- ## Enter the UNet-SAB
178
- x3 = self.stage3_encoder(shallow3)
179
- x3_D = self.stage3_decoder(x3)
180
- ## Apply SAM
181
- x3_out, x3_img = self.sam3o(x3_D[0], x)
182
-
183
- ## Aggregate SAM features of Stage 1, Stage 2 and Stage 3
184
- mix_r = self.mix(x1_img, x2_img, x3_img)
185
- mixed_img = self.add123(mix_r[0])
186
-
187
- ## Concat SAM features of Stage 1, Stage 2 and Stage 3
188
- concat_feat = self.concat123(torch.cat([x1_out, x2_out, x3_out], 1))
189
- x_final = self.tail(concat_feat)
190
-
191
- return x_final + mixed_img
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIZ2H/08-Search-Streamlit-Session-State-QueryParameters/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: 08 Search Streamlit Session State QueryParameters
3
- emoji: 🔎🧠
4
- colorFrom: pink
5
- colorTo: purple
6
- sdk: streamlit
7
- sdk_version: 1.10.0
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIZero2Hero4Health/1-ASRLiveSpeechRecognition-GR/app.py DELETED
@@ -1,169 +0,0 @@
1
- import gradio as gr
2
- import torch
3
- import time
4
- import librosa
5
- import soundfile
6
- import nemo.collections.asr as nemo_asr
7
- import tempfile
8
- import os
9
- import uuid
10
-
11
- from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
12
- import torch
13
-
14
- # PersistDataset -----
15
- import os
16
- import csv
17
- import gradio as gr
18
- from gradio import inputs, outputs
19
- import huggingface_hub
20
- from huggingface_hub import Repository, hf_hub_download, upload_file
21
- from datetime import datetime
22
-
23
- # ---------------------------------------------
24
- # Dataset and Token links - change awacke1 to your own HF id, and add a HF_TOKEN copy to your repo for write permissions
25
- # This should allow you to save your results to your own Dataset hosted on HF. ---
26
- #DATASET_REPO_URL = "https://huggingface.co/datasets/awacke1/Carddata.csv"
27
- #DATASET_REPO_ID = "awacke1/Carddata.csv"
28
- #DATA_FILENAME = "Carddata.csv"
29
- #DATA_FILE = os.path.join("data", DATA_FILENAME)
30
- #HF_TOKEN = os.environ.get("HF_TOKEN")
31
- #SCRIPT = """
32
-
33
- #<script>
34
- #if (!window.hasBeenRun) {
35
- # window.hasBeenRun = true;
36
- # console.log("should only happen once");
37
- # document.querySelector("button.submit").click();
38
- #}
39
- #</script>
40
- #"""
41
-
42
- #try:
43
- # hf_hub_download(
44
- # repo_id=DATASET_REPO_ID,
45
- # filename=DATA_FILENAME,
46
- # cache_dir=DATA_DIRNAME,
47
- # force_filename=DATA_FILENAME
48
- # )
49
- #except:
50
- # print("file not found")
51
- #repo = Repository(
52
- # local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN
53
- #)
54
-
55
- #def store_message(name: str, message: str):
56
- # if name and message:
57
- # with open(DATA_FILE, "a") as csvfile:
58
- # writer = csv.DictWriter(csvfile, fieldnames=["name", "message", "time"])
59
- # writer.writerow(
60
- # {"name": name.strip(), "message": message.strip(), "time": str(datetime.now())}
61
- # )
62
- # # uncomment line below to begin saving -
63
- # commit_url = repo.push_to_hub()
64
- # return ""
65
-
66
- #iface = gr.Interface(
67
- # store_message,
68
- # [
69
- # inputs.Textbox(placeholder="Your name"),
70
- # inputs.Textbox(placeholder="Your message", lines=2),
71
- # ],
72
- # "html",
73
- # css="""
74
- # .message {background-color:cornflowerblue;color:white; padding:4px;margin:4px;border-radius:4px; }
75
- # """,
76
- # title="Reading/writing to a HuggingFace dataset repo from Spaces",
77
- # description=f"This is a demo of how to do simple *shared data persistence* in a Gradio Space, backed by a dataset repo.",
78
- # article=f"The dataset repo is [{DATASET_REPO_URL}]({DATASET_REPO_URL})",
79
- #)
80
-
81
-
82
- # main -------------------------
83
- mname = "facebook/blenderbot-400M-distill"
84
- model = BlenderbotForConditionalGeneration.from_pretrained(mname)
85
- tokenizer = BlenderbotTokenizer.from_pretrained(mname)
86
-
87
- def take_last_tokens(inputs, note_history, history):
88
- """Filter the last 128 tokens"""
89
- if inputs['input_ids'].shape[1] > 128:
90
- inputs['input_ids'] = torch.tensor([inputs['input_ids'][0][-128:].tolist()])
91
- inputs['attention_mask'] = torch.tensor([inputs['attention_mask'][0][-128:].tolist()])
92
- note_history = ['</s> <s>'.join(note_history[0].split('</s> <s>')[2:])]
93
- history = history[1:]
94
- return inputs, note_history, history
95
-
96
- def add_note_to_history(note, note_history):
97
- """Add a note to the historical information"""
98
- note_history.append(note)
99
- note_history = '</s> <s>'.join(note_history)
100
- return [note_history]
101
-
102
-
103
- def chat(message, history):
104
- history = history or []
105
- if history:
106
- history_useful = ['</s> <s>'.join([str(a[0])+'</s> <s>'+str(a[1]) for a in history])]
107
- else:
108
- history_useful = []
109
- history_useful = add_note_to_history(message, history_useful)
110
- inputs = tokenizer(history_useful, return_tensors="pt")
111
- inputs, history_useful, history = take_last_tokens(inputs, history_useful, history)
112
- reply_ids = model.generate(**inputs)
113
- response = tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0]
114
- history_useful = add_note_to_history(response, history_useful)
115
- list_history = history_useful[0].split('</s> <s>')
116
- history.append((list_history[-2], list_history[-1]))
117
- # store_message(message, response) # Save to dataset - uncomment if you uncomment above to save inputs and outputs to your dataset
118
- return history, history
119
-
120
-
121
- SAMPLE_RATE = 16000
122
- model = nemo_asr.models.EncDecRNNTBPEModel.from_pretrained("nvidia/stt_en_conformer_transducer_xlarge")
123
- model.change_decoding_strategy(None)
124
- model.eval()
125
-
126
- def process_audio_file(file):
127
- data, sr = librosa.load(file)
128
- if sr != SAMPLE_RATE:
129
- data = librosa.resample(data, orig_sr=sr, target_sr=SAMPLE_RATE)
130
- # monochannel
131
- data = librosa.to_mono(data)
132
- return data
133
-
134
-
135
- def transcribe(audio, state = ""):
136
- if state is None:
137
- state = ""
138
- audio_data = process_audio_file(audio)
139
- with tempfile.TemporaryDirectory() as tmpdir:
140
- audio_path = os.path.join(tmpdir, f'audio_{uuid.uuid4()}.wav')
141
- soundfile.write(audio_path, audio_data, SAMPLE_RATE)
142
- transcriptions = model.transcribe([audio_path])
143
- if type(transcriptions) == tuple and len(transcriptions) == 2:
144
- transcriptions = transcriptions[0]
145
- transcriptions = transcriptions[0]
146
- # store_message(transcriptions, state) # Save to dataset - uncomment to store into a dataset - hint you will need your HF_TOKEN
147
- state = state + transcriptions + " "
148
- return state, state
149
-
150
- iface = gr.Interface(
151
- fn=transcribe,
152
- inputs=[
153
- gr.Audio(source="microphone", type='filepath', streaming=True),
154
- "state",
155
- ],
156
- outputs=[
157
- "textbox",
158
- "state",
159
- ],
160
- layout="horizontal",
161
- theme="huggingface",
162
- title="🗣️LiveSpeechRecognition🧠Memory💾",
163
- description=f"Live Automatic Speech Recognition (ASR) with Memory💾 Dataset.",
164
- allow_flagging='never',
165
- live=True,
166
- # article=f"Result Output Saved to Memory💾 Dataset: [{DATASET_REPO_URL}]({DATASET_REPO_URL})"
167
- article=f"Important Videos to understanding AI and NLP Clinical Terminology, Assessment, and Value Based Care AI include Huggingfaces Course Series here: https://www.youtube.com/c/HuggingFace , AI NLP Innovations in 2022 for Clinical and Mental Health Care here: https://www.youtube.com/watch?v=r38lXjz3g6M&list=PLHgX2IExbFov_5_4WfkesR7gnWPHHG-a1 and this link to see and manage playlist here: https://www.youtube.com/playlist?list=PLHgX2IExbFov_5_4WfkesR7gnWPHHG-a1 Review at your leisure to understand AI and NLP impact to helping the world develop Clinical systems of the future using AI and NLP for Clinical Terminology and alignment to worldwide Value Based Care objectives to help people be healthy."
168
- )
169
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/generated/server/internal.js DELETED
@@ -1,30 +0,0 @@
1
-
2
- import root from '../root.svelte';
3
- import { set_building } from '__sveltekit/environment';
4
- import { set_assets } from '__sveltekit/paths';
5
- import { set_private_env, set_public_env } from '../../../node_modules/@sveltejs/kit/src/runtime/shared-server.js';
6
-
7
- export const options = {
8
- app_template_contains_nonce: false,
9
- csp: {"mode":"auto","directives":{"upgrade-insecure-requests":false,"block-all-mixed-content":false},"reportOnly":{"upgrade-insecure-requests":false,"block-all-mixed-content":false}},
10
- csrf_check_origin: false,
11
- track_server_fetches: false,
12
- embedded: false,
13
- env_public_prefix: 'PUBLIC_',
14
- env_private_prefix: '',
15
- hooks: null, // added lazily, via `get_hooks`
16
- preload_strategy: "modulepreload",
17
- root,
18
- service_worker: false,
19
- templates: {
20
- app: ({ head, body, assets, nonce, env }) => "<!DOCTYPE html>\r\n<html lang=\"en\" class=\"h-full\">\r\n\t<link rel=\"stylesheet\" href=\"https://www.w3schools.com/w3css/4/w3.css\" />\r\n\t<head>\r\n\t\t<!-- Google Tag Manager -->\r\n\t\t<script>\r\n\t\tvar _paq = window._paq || [];\r\n\t\twindow._paq=_paq;\r\n\t\t(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start':\r\n\t\tnew Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],\r\n\t\tj=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=\r\n\t\t'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);\r\n\t\t})(window,document,'script','dataLayer','GTM-TVD93MF');\r\n\t\t</script>\r\n\t\t<!-- End Google Tag Manager -->\r\n\t\t<meta charset=\"utf-8\" />\r\n\t\t<meta name=\"viewport\" content=\"width=device-width, initial-scale=1, user-scalable=no\" />\r\n\t\t<meta property=\"og:image\" content=\"/chatui/thumbnail.jpg\" />\r\n\t\t<script>\r\n\t\t\tif (\r\n\t\t\t\tlocalStorage.theme === \"dark\" ||\r\n\t\t\t\t(!(\"theme\" in localStorage) && window.matchMedia(\"(prefers-color-scheme: dark)\").matches)\r\n\t\t\t) {\r\n\t\t\t\tdocument.documentElement.classList.add(\"dark\");\r\n\t\t\t}\r\n\t\t</script>\r\n\t\t" + head + "\r\n\t</head>\r\n\t<body data-sveltekit-preload-data=\"hover\" class=\"h-full dark:bg-gray-900\">\r\n\t\t<div id=\"app\" class=\"contents h-full\">" + body + "</div>\r\n\t</body>\r\n</html>\r\n",
21
- error: ({ status, message }) => "<!DOCTYPE html>\n<html lang=\"en\">\n\t<head>\n\t\t<meta charset=\"utf-8\" />\n\t\t<title>" + message + "</title>\n\n\t\t<style>\n\t\t\tbody {\n\t\t\t\t--bg: white;\n\t\t\t\t--fg: #222;\n\t\t\t\t--divider: #ccc;\n\t\t\t\tbackground: var(--bg);\n\t\t\t\tcolor: var(--fg);\n\t\t\t\tfont-family: system-ui, -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen,\n\t\t\t\t\tUbuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif;\n\t\t\t\tdisplay: flex;\n\t\t\t\talign-items: center;\n\t\t\t\tjustify-content: center;\n\t\t\t\theight: 100vh;\n\t\t\t\tmargin: 0;\n\t\t\t}\n\n\t\t\t.error {\n\t\t\t\tdisplay: flex;\n\t\t\t\talign-items: center;\n\t\t\t\tmax-width: 32rem;\n\t\t\t\tmargin: 0 1rem;\n\t\t\t}\n\n\t\t\t.status {\n\t\t\t\tfont-weight: 200;\n\t\t\t\tfont-size: 3rem;\n\t\t\t\tline-height: 1;\n\t\t\t\tposition: relative;\n\t\t\t\ttop: -0.05rem;\n\t\t\t}\n\n\t\t\t.message {\n\t\t\t\tborder-left: 1px solid var(--divider);\n\t\t\t\tpadding: 0 0 0 1rem;\n\t\t\t\tmargin: 0 0 0 1rem;\n\t\t\t\tmin-height: 2.5rem;\n\t\t\t\tdisplay: flex;\n\t\t\t\talign-items: center;\n\t\t\t}\n\n\t\t\t.message h1 {\n\t\t\t\tfont-weight: 400;\n\t\t\t\tfont-size: 1em;\n\t\t\t\tmargin: 0;\n\t\t\t}\n\n\t\t\t@media (prefers-color-scheme: dark) {\n\t\t\t\tbody {\n\t\t\t\t\t--bg: #222;\n\t\t\t\t\t--fg: #ddd;\n\t\t\t\t\t--divider: #666;\n\t\t\t\t}\n\t\t\t}\n\t\t</style>\n\t</head>\n\t<body>\n\t\t<div class=\"error\">\n\t\t\t<span class=\"status\">" + status + "</span>\n\t\t\t<div class=\"message\">\n\t\t\t\t<h1>" + message + "</h1>\n\t\t\t</div>\n\t\t</div>\n\t</body>\n</html>\n"
22
- },
23
- version_hash: "r3vpsq"
24
- };
25
-
26
- export function get_hooks() {
27
- return import("../../../src/hooks.server.ts");
28
- }
29
-
30
- export { set_assets, set_building, set_private_env, set_public_env };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinput/ColorInput.d.ts DELETED
@@ -1,59 +0,0 @@
1
- import ColorInputBase from '../colorinputbase/ColorInputBase';
2
- import RoundRectangle from '../../roundrectangle/RoundRectangle';
3
- import ColorComponents from '../colorcomponents/ColorComponents';
4
- import CanvasInput from '../../canvasinput/CanvasInput';
5
-
6
- export default ColorInput;
7
-
8
- declare namespace ColorInput {
9
- type TransitCallbackType = (
10
- gameObject: Phaser.GameObjects.GameObject,
11
- duration: number
12
- ) => void;
13
-
14
- interface IConfig extends ColorInputBase.IConfig {
15
- colorPicker?: {
16
- width?: number, height?: number,
17
-
18
- background?: RoundRectangle.IConfig,
19
- createBackgroundCallback: (
20
- scene: Phaser.Scene,
21
- ) => Phaser.GameObjects.GameObject,
22
-
23
- hPalettePosition?: 0 | 1 | 2 | 3 | 'bottom' | 'left' | 'top' | 'right',
24
-
25
- expandDirection?: 0 | 1 | 'down' | 'up',
26
-
27
- easeIn?: number, easeOut?: number,
28
-
29
- transitIn?: TransitCallbackType,
30
- transitOut?: TransitCallbackType,
31
-
32
- bounds?: Phaser.Geom.Rectangle;
33
-
34
- space?: {
35
- left?: number, right?: number, top?: number, bottom?: number,
36
- item?: number,
37
- }
38
- },
39
-
40
- colorComponents?: {
41
- height?: number,
42
-
43
- formatLabel?: ColorComponents.IFormatLabelConfig,
44
-
45
- inputText?: CanvasInput.IConfig,
46
-
47
- space?: {
48
- left?: number, right?: number, top?: number, bottom?: number,
49
- },
50
- }
51
- }
52
- }
53
-
54
- declare class ColorInput extends ColorInputBase {
55
- constructor(
56
- scene: Phaser.Scene,
57
- config?: ColorInput.IConfig
58
- );
59
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/roundrectangle/Factory.js DELETED
@@ -1,13 +0,0 @@
1
- import RoundRectangle from './RoundRectangle.js';
2
- import ObjectFactory from '../ObjectFactory.js';
3
- import SetValue from '../../../plugins/utils/object/SetValue.js';
4
-
5
- ObjectFactory.register('roundRectangle', function (x, y, width, height, radiusConfig, fillColor, fillAlpha) {
6
- var gameObject = new RoundRectangle(this.scene, x, y, width, height, radiusConfig, fillColor, fillAlpha);
7
- this.scene.add.existing(gameObject);
8
- return gameObject;
9
- });
10
-
11
- SetValue(window, 'RexPlugins.UI.RoundRectangle', RoundRectangle);
12
-
13
- export default RoundRectangle;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlekseyKorshuk/thin-plate-spline-motion-model/modules/keypoint_detector.py DELETED
@@ -1,27 +0,0 @@
1
- from torch import nn
2
- import torch
3
- from torchvision import models
4
-
5
- class KPDetector(nn.Module):
6
- """
7
- Predict K*5 keypoints.
8
- """
9
-
10
- def __init__(self, num_tps, **kwargs):
11
- super(KPDetector, self).__init__()
12
- self.num_tps = num_tps
13
-
14
- self.fg_encoder = models.resnet18(pretrained=False)
15
- num_features = self.fg_encoder.fc.in_features
16
- self.fg_encoder.fc = nn.Linear(num_features, num_tps*5*2)
17
-
18
-
19
- def forward(self, image):
20
-
21
- fg_kp = self.fg_encoder(image)
22
- bs, _, = fg_kp.shape
23
- fg_kp = torch.sigmoid(fg_kp)
24
- fg_kp = fg_kp * 2 - 1
25
- out = {'fg_kp': fg_kp.view(bs, self.num_tps*5, -1)}
26
-
27
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexKozachuk/anything-v3.0/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Anything V3.0
3
- emoji: 🏃
4
- colorFrom: gray
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.10.1
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: yuessiah/anything-v3.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Allakhazam/Home/README.md DELETED
@@ -1,11 +0,0 @@
1
- ---
2
- title: Home Prompts
3
- emoji: 🏆
4
- colorFrom: indigo
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.15.0
8
- app_file: app.py
9
- ---
10
-
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/criteria/clip_loss.py DELETED
@@ -1,17 +0,0 @@
1
-
2
- import torch
3
- import clip
4
-
5
-
6
- class CLIPLoss(torch.nn.Module):
7
-
8
- def __init__(self, opts):
9
- super(CLIPLoss, self).__init__()
10
- self.model, self.preprocess = clip.load("ViT-B/32", device="cuda")
11
- self.upsample = torch.nn.Upsample(scale_factor=7)
12
- self.avg_pool = torch.nn.AvgPool2d(kernel_size=opts.stylegan_size // 32)
13
-
14
- def forward(self, image, text):
15
- image = self.avg_pool(self.upsample(image))
16
- similarity = 1 - self.model(image, text)[0] / 100
17
- return similarity
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/experimental/rl/value_guided_sampling.py DELETED
@@ -1,154 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import numpy as np
16
- import torch
17
- import tqdm
18
-
19
- from ...models.unet_1d import UNet1DModel
20
- from ...pipelines import DiffusionPipeline
21
- from ...utils import randn_tensor
22
- from ...utils.dummy_pt_objects import DDPMScheduler
23
-
24
-
25
- class ValueGuidedRLPipeline(DiffusionPipeline):
26
- r"""
27
- Pipeline for value-guided sampling from a diffusion model trained to predict sequences of states.
28
-
29
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
30
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
31
-
32
- Parameters:
33
- value_function ([`UNet1DModel`]):
34
- A specialized UNet for fine-tuning trajectories base on reward.
35
- unet ([`UNet1DModel`]):
36
- UNet architecture to denoise the encoded trajectories.
37
- scheduler ([`SchedulerMixin`]):
38
- A scheduler to be used in combination with `unet` to denoise the encoded trajectories. Default for this
39
- application is [`DDPMScheduler`].
40
- env ():
41
- An environment following the OpenAI gym API to act in. For now only Hopper has pretrained models.
42
- """
43
-
44
- def __init__(
45
- self,
46
- value_function: UNet1DModel,
47
- unet: UNet1DModel,
48
- scheduler: DDPMScheduler,
49
- env,
50
- ):
51
- super().__init__()
52
- self.value_function = value_function
53
- self.unet = unet
54
- self.scheduler = scheduler
55
- self.env = env
56
- self.data = env.get_dataset()
57
- self.means = {}
58
- for key in self.data.keys():
59
- try:
60
- self.means[key] = self.data[key].mean()
61
- except: # noqa: E722
62
- pass
63
- self.stds = {}
64
- for key in self.data.keys():
65
- try:
66
- self.stds[key] = self.data[key].std()
67
- except: # noqa: E722
68
- pass
69
- self.state_dim = env.observation_space.shape[0]
70
- self.action_dim = env.action_space.shape[0]
71
-
72
- def normalize(self, x_in, key):
73
- return (x_in - self.means[key]) / self.stds[key]
74
-
75
- def de_normalize(self, x_in, key):
76
- return x_in * self.stds[key] + self.means[key]
77
-
78
- def to_torch(self, x_in):
79
- if type(x_in) is dict:
80
- return {k: self.to_torch(v) for k, v in x_in.items()}
81
- elif torch.is_tensor(x_in):
82
- return x_in.to(self.unet.device)
83
- return torch.tensor(x_in, device=self.unet.device)
84
-
85
- def reset_x0(self, x_in, cond, act_dim):
86
- for key, val in cond.items():
87
- x_in[:, key, act_dim:] = val.clone()
88
- return x_in
89
-
90
- def run_diffusion(self, x, conditions, n_guide_steps, scale):
91
- batch_size = x.shape[0]
92
- y = None
93
- for i in tqdm.tqdm(self.scheduler.timesteps):
94
- # create batch of timesteps to pass into model
95
- timesteps = torch.full((batch_size,), i, device=self.unet.device, dtype=torch.long)
96
- for _ in range(n_guide_steps):
97
- with torch.enable_grad():
98
- x.requires_grad_()
99
-
100
- # permute to match dimension for pre-trained models
101
- y = self.value_function(x.permute(0, 2, 1), timesteps).sample
102
- grad = torch.autograd.grad([y.sum()], [x])[0]
103
-
104
- posterior_variance = self.scheduler._get_variance(i)
105
- model_std = torch.exp(0.5 * posterior_variance)
106
- grad = model_std * grad
107
-
108
- grad[timesteps < 2] = 0
109
- x = x.detach()
110
- x = x + scale * grad
111
- x = self.reset_x0(x, conditions, self.action_dim)
112
-
113
- prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1)
114
-
115
- # TODO: verify deprecation of this kwarg
116
- x = self.scheduler.step(prev_x, i, x, predict_epsilon=False)["prev_sample"]
117
-
118
- # apply conditions to the trajectory (set the initial state)
119
- x = self.reset_x0(x, conditions, self.action_dim)
120
- x = self.to_torch(x)
121
- return x, y
122
-
123
- def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1):
124
- # normalize the observations and create batch dimension
125
- obs = self.normalize(obs, "observations")
126
- obs = obs[None].repeat(batch_size, axis=0)
127
-
128
- conditions = {0: self.to_torch(obs)}
129
- shape = (batch_size, planning_horizon, self.state_dim + self.action_dim)
130
-
131
- # generate initial noise and apply our conditions (to make the trajectories start at current state)
132
- x1 = randn_tensor(shape, device=self.unet.device)
133
- x = self.reset_x0(x1, conditions, self.action_dim)
134
- x = self.to_torch(x)
135
-
136
- # run the diffusion process
137
- x, y = self.run_diffusion(x, conditions, n_guide_steps, scale)
138
-
139
- # sort output trajectories by value
140
- sorted_idx = y.argsort(0, descending=True).squeeze()
141
- sorted_values = x[sorted_idx]
142
- actions = sorted_values[:, :, : self.action_dim]
143
- actions = actions.detach().cpu().numpy()
144
- denorm_actions = self.de_normalize(actions, key="actions")
145
-
146
- # select the action with the highest value
147
- if y is not None:
148
- selected_index = 0
149
- else:
150
- # if we didn't run value guiding, select a random action
151
- selected_index = np.random.randint(0, batch_size)
152
-
153
- denorm_actions = denorm_actions[selected_index, 0]
154
- return denorm_actions
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/versatile_diffusion/test_versatile_diffusion_image_variation.py DELETED
@@ -1,57 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import unittest
17
-
18
- import numpy as np
19
- import torch
20
-
21
- from diffusers import VersatileDiffusionImageVariationPipeline
22
- from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
23
-
24
-
25
- torch.backends.cuda.matmul.allow_tf32 = False
26
-
27
-
28
- class VersatileDiffusionImageVariationPipelineFastTests(unittest.TestCase):
29
- pass
30
-
31
-
32
- @slow
33
- @require_torch_gpu
34
- class VersatileDiffusionImageVariationPipelineIntegrationTests(unittest.TestCase):
35
- def test_inference_image_variations(self):
36
- pipe = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion")
37
- pipe.to(torch_device)
38
- pipe.set_progress_bar_config(disable=None)
39
-
40
- image_prompt = load_image(
41
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg"
42
- )
43
- generator = torch.manual_seed(0)
44
- image = pipe(
45
- image=image_prompt,
46
- generator=generator,
47
- guidance_scale=7.5,
48
- num_inference_steps=50,
49
- output_type="numpy",
50
- ).images
51
-
52
- image_slice = image[0, 253:256, 253:256, -1]
53
-
54
- assert image.shape == (1, 512, 512, 3)
55
- expected_slice = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945])
56
-
57
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/swin/cascade_mask_rcnn_swin_small_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco.py DELETED
@@ -1,140 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/cascade_mask_rcnn_swin_fpn.py',
3
- '../_base_/datasets/coco_instance.py',
4
- '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
5
- ]
6
-
7
- model = dict(
8
- backbone=dict(
9
- embed_dim=96,
10
- depths=[2, 2, 18, 2],
11
- num_heads=[3, 6, 12, 24],
12
- window_size=7,
13
- ape=False,
14
- drop_path_rate=0.2,
15
- patch_norm=True,
16
- use_checkpoint=False
17
- ),
18
- neck=dict(in_channels=[96, 192, 384, 768]),
19
- roi_head=dict(
20
- bbox_head=[
21
- dict(
22
- type='ConvFCBBoxHead',
23
- num_shared_convs=4,
24
- num_shared_fcs=1,
25
- in_channels=256,
26
- conv_out_channels=256,
27
- fc_out_channels=1024,
28
- roi_feat_size=7,
29
- num_classes=80,
30
- bbox_coder=dict(
31
- type='DeltaXYWHBBoxCoder',
32
- target_means=[0., 0., 0., 0.],
33
- target_stds=[0.1, 0.1, 0.2, 0.2]),
34
- reg_class_agnostic=False,
35
- reg_decoded_bbox=True,
36
- norm_cfg=dict(type='SyncBN', requires_grad=True),
37
- loss_cls=dict(
38
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
39
- loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
40
- dict(
41
- type='ConvFCBBoxHead',
42
- num_shared_convs=4,
43
- num_shared_fcs=1,
44
- in_channels=256,
45
- conv_out_channels=256,
46
- fc_out_channels=1024,
47
- roi_feat_size=7,
48
- num_classes=80,
49
- bbox_coder=dict(
50
- type='DeltaXYWHBBoxCoder',
51
- target_means=[0., 0., 0., 0.],
52
- target_stds=[0.05, 0.05, 0.1, 0.1]),
53
- reg_class_agnostic=False,
54
- reg_decoded_bbox=True,
55
- norm_cfg=dict(type='SyncBN', requires_grad=True),
56
- loss_cls=dict(
57
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
58
- loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
59
- dict(
60
- type='ConvFCBBoxHead',
61
- num_shared_convs=4,
62
- num_shared_fcs=1,
63
- in_channels=256,
64
- conv_out_channels=256,
65
- fc_out_channels=1024,
66
- roi_feat_size=7,
67
- num_classes=80,
68
- bbox_coder=dict(
69
- type='DeltaXYWHBBoxCoder',
70
- target_means=[0., 0., 0., 0.],
71
- target_stds=[0.033, 0.033, 0.067, 0.067]),
72
- reg_class_agnostic=False,
73
- reg_decoded_bbox=True,
74
- norm_cfg=dict(type='SyncBN', requires_grad=True),
75
- loss_cls=dict(
76
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
77
- loss_bbox=dict(type='GIoULoss', loss_weight=10.0))
78
- ]))
79
-
80
- img_norm_cfg = dict(
81
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
82
-
83
- # augmentation strategy originates from DETR / Sparse RCNN
84
- train_pipeline = [
85
- dict(type='LoadImageFromFile'),
86
- dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
87
- dict(type='RandomFlip', flip_ratio=0.5),
88
- dict(type='AutoAugment',
89
- policies=[
90
- [
91
- dict(type='Resize',
92
- img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
93
- (608, 1333), (640, 1333), (672, 1333), (704, 1333),
94
- (736, 1333), (768, 1333), (800, 1333)],
95
- multiscale_mode='value',
96
- keep_ratio=True)
97
- ],
98
- [
99
- dict(type='Resize',
100
- img_scale=[(400, 1333), (500, 1333), (600, 1333)],
101
- multiscale_mode='value',
102
- keep_ratio=True),
103
- dict(type='RandomCrop',
104
- crop_type='absolute_range',
105
- crop_size=(384, 600),
106
- allow_negative_crop=True),
107
- dict(type='Resize',
108
- img_scale=[(480, 1333), (512, 1333), (544, 1333),
109
- (576, 1333), (608, 1333), (640, 1333),
110
- (672, 1333), (704, 1333), (736, 1333),
111
- (768, 1333), (800, 1333)],
112
- multiscale_mode='value',
113
- override=True,
114
- keep_ratio=True)
115
- ]
116
- ]),
117
- dict(type='Normalize', **img_norm_cfg),
118
- dict(type='Pad', size_divisor=32),
119
- dict(type='DefaultFormatBundle'),
120
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
121
- ]
122
- data = dict(train=dict(pipeline=train_pipeline))
123
-
124
- optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
125
- paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
126
- 'relative_position_bias_table': dict(decay_mult=0.),
127
- 'norm': dict(decay_mult=0.)}))
128
- lr_config = dict(step=[27, 33])
129
- runner = dict(type='EpochBasedRunnerAmp', max_epochs=36)
130
-
131
- # do not use mmdet version fp16
132
- fp16 = None
133
- optimizer_config = dict(
134
- type="DistOptimizerHook",
135
- update_interval=1,
136
- grad_clip=None,
137
- coalesce=True,
138
- bucket_size_mb=-1,
139
- use_fp16=True,
140
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/dnlnet/dnl_r50-d8_769x769_80k_cityscapes.py DELETED
@@ -1,12 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/dnl_r50-d8.py',
3
- '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py',
4
- '../_base_/schedules/schedule_80k.py'
5
- ]
6
- model = dict(
7
- decode_head=dict(align_corners=True),
8
- auxiliary_head=dict(align_corners=True),
9
- test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
10
- optimizer = dict(
11
- paramwise_cfg=dict(
12
- custom_keys=dict(theta=dict(wd_mult=0.), phi=dict(wd_mult=0.))))
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_512x512_80k_ade20k.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './pspnet_r50-d8_512x512_80k_ade20k.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/Ariharasudhan/YoloV5/utils/segment/loss.py DELETED
@@ -1,186 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
-
5
- from ..general import xywh2xyxy
6
- from ..loss import FocalLoss, smooth_BCE
7
- from ..metrics import bbox_iou
8
- from ..torch_utils import de_parallel
9
- from .general import crop_mask
10
-
11
-
12
- class ComputeLoss:
13
- # Compute losses
14
- def __init__(self, model, autobalance=False, overlap=False):
15
- self.sort_obj_iou = False
16
- self.overlap = overlap
17
- device = next(model.parameters()).device # get model device
18
- h = model.hyp # hyperparameters
19
- self.device = device
20
-
21
- # Define criteria
22
- BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
23
- BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
24
-
25
- # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
26
- self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
27
-
28
- # Focal loss
29
- g = h['fl_gamma'] # focal loss gamma
30
- if g > 0:
31
- BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
32
-
33
- m = de_parallel(model).model[-1] # Detect() module
34
- self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7
35
- self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index
36
- self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance
37
- self.na = m.na # number of anchors
38
- self.nc = m.nc # number of classes
39
- self.nl = m.nl # number of layers
40
- self.nm = m.nm # number of masks
41
- self.anchors = m.anchors
42
- self.device = device
43
-
44
- def __call__(self, preds, targets, masks): # predictions, targets, model
45
- p, proto = preds
46
- bs, nm, mask_h, mask_w = proto.shape # batch size, number of masks, mask height, mask width
47
- lcls = torch.zeros(1, device=self.device)
48
- lbox = torch.zeros(1, device=self.device)
49
- lobj = torch.zeros(1, device=self.device)
50
- lseg = torch.zeros(1, device=self.device)
51
- tcls, tbox, indices, anchors, tidxs, xywhn = self.build_targets(p, targets) # targets
52
-
53
- # Losses
54
- for i, pi in enumerate(p): # layer index, layer predictions
55
- b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
56
- tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj
57
-
58
- n = b.shape[0] # number of targets
59
- if n:
60
- pxy, pwh, _, pcls, pmask = pi[b, a, gj, gi].split((2, 2, 1, self.nc, nm), 1) # subset of predictions
61
-
62
- # Box regression
63
- pxy = pxy.sigmoid() * 2 - 0.5
64
- pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i]
65
- pbox = torch.cat((pxy, pwh), 1) # predicted box
66
- iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze() # iou(prediction, target)
67
- lbox += (1.0 - iou).mean() # iou loss
68
-
69
- # Objectness
70
- iou = iou.detach().clamp(0).type(tobj.dtype)
71
- if self.sort_obj_iou:
72
- j = iou.argsort()
73
- b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j]
74
- if self.gr < 1:
75
- iou = (1.0 - self.gr) + self.gr * iou
76
- tobj[b, a, gj, gi] = iou # iou ratio
77
-
78
- # Classification
79
- if self.nc > 1: # cls loss (only if multiple classes)
80
- t = torch.full_like(pcls, self.cn, device=self.device) # targets
81
- t[range(n), tcls[i]] = self.cp
82
- lcls += self.BCEcls(pcls, t) # BCE
83
-
84
- # Mask regression
85
- if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample
86
- masks = F.interpolate(masks[None], (mask_h, mask_w), mode="nearest")[0]
87
- marea = xywhn[i][:, 2:].prod(1) # mask width, height normalized
88
- mxyxy = xywh2xyxy(xywhn[i] * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device))
89
- for bi in b.unique():
90
- j = b == bi # matching index
91
- if self.overlap:
92
- mask_gti = torch.where(masks[bi][None] == tidxs[i][j].view(-1, 1, 1), 1.0, 0.0)
93
- else:
94
- mask_gti = masks[tidxs[i]][j]
95
- lseg += self.single_mask_loss(mask_gti, pmask[j], proto[bi], mxyxy[j], marea[j])
96
-
97
- obji = self.BCEobj(pi[..., 4], tobj)
98
- lobj += obji * self.balance[i] # obj loss
99
- if self.autobalance:
100
- self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
101
-
102
- if self.autobalance:
103
- self.balance = [x / self.balance[self.ssi] for x in self.balance]
104
- lbox *= self.hyp["box"]
105
- lobj *= self.hyp["obj"]
106
- lcls *= self.hyp["cls"]
107
- lseg *= self.hyp["box"] / bs
108
-
109
- loss = lbox + lobj + lcls + lseg
110
- return loss * bs, torch.cat((lbox, lseg, lobj, lcls)).detach()
111
-
112
- def single_mask_loss(self, gt_mask, pred, proto, xyxy, area):
113
- # Mask loss for one image
114
- pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n,32) @ (32,80,80) -> (n,80,80)
115
- loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction="none")
116
- return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean()
117
-
118
- def build_targets(self, p, targets):
119
- # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
120
- na, nt = self.na, targets.shape[0] # number of anchors, targets
121
- tcls, tbox, indices, anch, tidxs, xywhn = [], [], [], [], [], []
122
- gain = torch.ones(8, device=self.device) # normalized to gridspace gain
123
- ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
124
- if self.overlap:
125
- batch = p[0].shape[0]
126
- ti = []
127
- for i in range(batch):
128
- num = (targets[:, 0] == i).sum() # find number of targets of each image
129
- ti.append(torch.arange(num, device=self.device).float().view(1, num).repeat(na, 1) + 1) # (na, num)
130
- ti = torch.cat(ti, 1) # (na, nt)
131
- else:
132
- ti = torch.arange(nt, device=self.device).float().view(1, nt).repeat(na, 1)
133
- targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None], ti[..., None]), 2) # append anchor indices
134
-
135
- g = 0.5 # bias
136
- off = torch.tensor(
137
- [
138
- [0, 0],
139
- [1, 0],
140
- [0, 1],
141
- [-1, 0],
142
- [0, -1], # j,k,l,m
143
- # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
144
- ],
145
- device=self.device).float() * g # offsets
146
-
147
- for i in range(self.nl):
148
- anchors, shape = self.anchors[i], p[i].shape
149
- gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]] # xyxy gain
150
-
151
- # Match targets to anchors
152
- t = targets * gain # shape(3,n,7)
153
- if nt:
154
- # Matches
155
- r = t[..., 4:6] / anchors[:, None] # wh ratio
156
- j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare
157
- # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
158
- t = t[j] # filter
159
-
160
- # Offsets
161
- gxy = t[:, 2:4] # grid xy
162
- gxi = gain[[2, 3]] - gxy # inverse
163
- j, k = ((gxy % 1 < g) & (gxy > 1)).T
164
- l, m = ((gxi % 1 < g) & (gxi > 1)).T
165
- j = torch.stack((torch.ones_like(j), j, k, l, m))
166
- t = t.repeat((5, 1, 1))[j]
167
- offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
168
- else:
169
- t = targets[0]
170
- offsets = 0
171
-
172
- # Define
173
- bc, gxy, gwh, at = t.chunk(4, 1) # (image, class), grid xy, grid wh, anchors
174
- (a, tidx), (b, c) = at.long().T, bc.long().T # anchors, image, class
175
- gij = (gxy - offsets).long()
176
- gi, gj = gij.T # grid indices
177
-
178
- # Append
179
- indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))) # image, anchor, grid
180
- tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
181
- anch.append(anchors[a]) # anchors
182
- tcls.append(c) # class
183
- tidxs.append(tidx)
184
- xywhn.append(torch.cat((gxy, gwh), 1) / gain[2:6]) # xywh normalized
185
-
186
- return tcls, tbox, indices, anch, tidxs, xywhn
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArkanDash/rvc-models/infer_pack/commons.py DELETED
@@ -1,166 +0,0 @@
1
- import math
2
- import numpy as np
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
-
7
-
8
- def init_weights(m, mean=0.0, std=0.01):
9
- classname = m.__class__.__name__
10
- if classname.find("Conv") != -1:
11
- m.weight.data.normal_(mean, std)
12
-
13
-
14
- def get_padding(kernel_size, dilation=1):
15
- return int((kernel_size * dilation - dilation) / 2)
16
-
17
-
18
- def convert_pad_shape(pad_shape):
19
- l = pad_shape[::-1]
20
- pad_shape = [item for sublist in l for item in sublist]
21
- return pad_shape
22
-
23
-
24
- def kl_divergence(m_p, logs_p, m_q, logs_q):
25
- """KL(P||Q)"""
26
- kl = (logs_q - logs_p) - 0.5
27
- kl += (
28
- 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
29
- )
30
- return kl
31
-
32
-
33
- def rand_gumbel(shape):
34
- """Sample from the Gumbel distribution, protect from overflows."""
35
- uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
36
- return -torch.log(-torch.log(uniform_samples))
37
-
38
-
39
- def rand_gumbel_like(x):
40
- g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
41
- return g
42
-
43
-
44
- def slice_segments(x, ids_str, segment_size=4):
45
- ret = torch.zeros_like(x[:, :, :segment_size])
46
- for i in range(x.size(0)):
47
- idx_str = ids_str[i]
48
- idx_end = idx_str + segment_size
49
- ret[i] = x[i, :, idx_str:idx_end]
50
- return ret
51
-
52
-
53
- def slice_segments2(x, ids_str, segment_size=4):
54
- ret = torch.zeros_like(x[:, :segment_size])
55
- for i in range(x.size(0)):
56
- idx_str = ids_str[i]
57
- idx_end = idx_str + segment_size
58
- ret[i] = x[i, idx_str:idx_end]
59
- return ret
60
-
61
-
62
- def rand_slice_segments(x, x_lengths=None, segment_size=4):
63
- b, d, t = x.size()
64
- if x_lengths is None:
65
- x_lengths = t
66
- ids_str_max = x_lengths - segment_size + 1
67
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
68
- ret = slice_segments(x, ids_str, segment_size)
69
- return ret, ids_str
70
-
71
-
72
- def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
73
- position = torch.arange(length, dtype=torch.float)
74
- num_timescales = channels // 2
75
- log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
76
- num_timescales - 1
77
- )
78
- inv_timescales = min_timescale * torch.exp(
79
- torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
80
- )
81
- scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
82
- signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
83
- signal = F.pad(signal, [0, 0, 0, channels % 2])
84
- signal = signal.view(1, channels, length)
85
- return signal
86
-
87
-
88
- def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
89
- b, channels, length = x.size()
90
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
91
- return x + signal.to(dtype=x.dtype, device=x.device)
92
-
93
-
94
- def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
95
- b, channels, length = x.size()
96
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
97
- return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
98
-
99
-
100
- def subsequent_mask(length):
101
- mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
102
- return mask
103
-
104
-
105
- @torch.jit.script
106
- def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
107
- n_channels_int = n_channels[0]
108
- in_act = input_a + input_b
109
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
110
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
111
- acts = t_act * s_act
112
- return acts
113
-
114
-
115
- def convert_pad_shape(pad_shape):
116
- l = pad_shape[::-1]
117
- pad_shape = [item for sublist in l for item in sublist]
118
- return pad_shape
119
-
120
-
121
- def shift_1d(x):
122
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
123
- return x
124
-
125
-
126
- def sequence_mask(length, max_length=None):
127
- if max_length is None:
128
- max_length = length.max()
129
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
130
- return x.unsqueeze(0) < length.unsqueeze(1)
131
-
132
-
133
- def generate_path(duration, mask):
134
- """
135
- duration: [b, 1, t_x]
136
- mask: [b, 1, t_y, t_x]
137
- """
138
- device = duration.device
139
-
140
- b, _, t_y, t_x = mask.shape
141
- cum_duration = torch.cumsum(duration, -1)
142
-
143
- cum_duration_flat = cum_duration.view(b * t_x)
144
- path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
145
- path = path.view(b, t_x, t_y)
146
- path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
147
- path = path.unsqueeze(1).transpose(2, 3) * mask
148
- return path
149
-
150
-
151
- def clip_grad_value_(parameters, clip_value, norm_type=2):
152
- if isinstance(parameters, torch.Tensor):
153
- parameters = [parameters]
154
- parameters = list(filter(lambda p: p.grad is not None, parameters))
155
- norm_type = float(norm_type)
156
- if clip_value is not None:
157
- clip_value = float(clip_value)
158
-
159
- total_norm = 0
160
- for p in parameters:
161
- param_norm = p.grad.data.norm(norm_type)
162
- total_norm += param_norm.item() ** norm_type
163
- if clip_value is not None:
164
- p.grad.data.clamp_(min=-clip_value, max=clip_value)
165
- total_norm = total_norm ** (1.0 / norm_type)
166
- return total_norm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/tuneavideo/models/resnet.py DELETED
@@ -1,208 +0,0 @@
1
- # Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/resnet.py
2
-
3
- import torch
4
- import torch.nn as nn
5
- import torch.nn.functional as F
6
- from einops import rearrange
7
-
8
-
9
- class InflatedConv3d(nn.Conv2d):
10
- def forward(self, x):
11
- video_length = x.shape[2]
12
-
13
- x = rearrange(x, "b c f h w -> (b f) c h w")
14
- x = super().forward(x)
15
- x = rearrange(x, "(b f) c h w -> b c f h w", f=video_length)
16
-
17
- return x
18
-
19
-
20
- class Upsample3D(nn.Module):
21
- def __init__(self, channels, use_conv=False, use_conv_transpose=False, out_channels=None, name="conv"):
22
- super().__init__()
23
- self.channels = channels
24
- self.out_channels = out_channels or channels
25
- self.use_conv = use_conv
26
- self.use_conv_transpose = use_conv_transpose
27
- self.name = name
28
-
29
- conv = None
30
- if use_conv_transpose:
31
- raise NotImplementedError
32
- elif use_conv:
33
- conv = InflatedConv3d(self.channels, self.out_channels, 3, padding=1)
34
-
35
- if name == "conv":
36
- self.conv = conv
37
- else:
38
- self.Conv2d_0 = conv
39
-
40
- def forward(self, hidden_states, output_size=None):
41
- assert hidden_states.shape[1] == self.channels
42
-
43
- if self.use_conv_transpose:
44
- raise NotImplementedError
45
-
46
- # Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16
47
- dtype = hidden_states.dtype
48
- if dtype == torch.bfloat16:
49
- hidden_states = hidden_states.to(torch.float32)
50
-
51
- # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984
52
- if hidden_states.shape[0] >= 64:
53
- hidden_states = hidden_states.contiguous()
54
-
55
- # if `output_size` is passed we force the interpolation output
56
- # size and do not make use of `scale_factor=2`
57
- if output_size is None:
58
- hidden_states = F.interpolate(hidden_states, scale_factor=[1.0, 2.0, 2.0], mode="nearest")
59
- else:
60
- hidden_states = F.interpolate(hidden_states, size=output_size, mode="nearest")
61
-
62
- # If the input is bfloat16, we cast back to bfloat16
63
- if dtype == torch.bfloat16:
64
- hidden_states = hidden_states.to(dtype)
65
-
66
- if self.use_conv:
67
- if self.name == "conv":
68
- hidden_states = self.conv(hidden_states)
69
- else:
70
- hidden_states = self.Conv2d_0(hidden_states)
71
-
72
- return hidden_states
73
-
74
-
75
- class Downsample3D(nn.Module):
76
- def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name="conv"):
77
- super().__init__()
78
- self.channels = channels
79
- self.out_channels = out_channels or channels
80
- self.use_conv = use_conv
81
- self.padding = padding
82
- stride = 2
83
- self.name = name
84
-
85
- if use_conv:
86
- conv = InflatedConv3d(self.channels, self.out_channels, 3, stride=stride, padding=padding)
87
- else:
88
- raise NotImplementedError
89
-
90
- if name == "conv":
91
- self.Conv2d_0 = conv
92
- self.conv = conv
93
- elif name == "Conv2d_0":
94
- self.conv = conv
95
- else:
96
- self.conv = conv
97
-
98
- def forward(self, hidden_states):
99
- assert hidden_states.shape[1] == self.channels
100
- if self.use_conv and self.padding == 0:
101
- raise NotImplementedError
102
-
103
- assert hidden_states.shape[1] == self.channels
104
- hidden_states = self.conv(hidden_states)
105
-
106
- return hidden_states
107
-
108
-
109
- class ResnetBlock3D(nn.Module):
110
- def __init__(
111
- self,
112
- *,
113
- in_channels,
114
- out_channels=None,
115
- conv_shortcut=False,
116
- dropout=0.0,
117
- temb_channels=512,
118
- groups=32,
119
- groups_out=None,
120
- pre_norm=True,
121
- eps=1e-6,
122
- non_linearity="swish",
123
- time_embedding_norm="default",
124
- output_scale_factor=1.0,
125
- use_in_shortcut=None,
126
- ):
127
- super().__init__()
128
- self.pre_norm = pre_norm
129
- self.pre_norm = True
130
- self.in_channels = in_channels
131
- out_channels = in_channels if out_channels is None else out_channels
132
- self.out_channels = out_channels
133
- self.use_conv_shortcut = conv_shortcut
134
- self.time_embedding_norm = time_embedding_norm
135
- self.output_scale_factor = output_scale_factor
136
-
137
- if groups_out is None:
138
- groups_out = groups
139
-
140
- self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)
141
-
142
- self.conv1 = InflatedConv3d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
143
-
144
- if temb_channels is not None:
145
- if self.time_embedding_norm == "default":
146
- time_emb_proj_out_channels = out_channels
147
- elif self.time_embedding_norm == "scale_shift":
148
- time_emb_proj_out_channels = out_channels * 2
149
- else:
150
- raise ValueError(f"unknown time_embedding_norm : {self.time_embedding_norm} ")
151
-
152
- self.time_emb_proj = torch.nn.Linear(temb_channels, time_emb_proj_out_channels)
153
- else:
154
- self.time_emb_proj = None
155
-
156
- self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True)
157
- self.dropout = torch.nn.Dropout(dropout)
158
- self.conv2 = InflatedConv3d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
159
-
160
- if non_linearity == "swish":
161
- self.nonlinearity = lambda x: F.silu(x)
162
- elif non_linearity == "mish":
163
- self.nonlinearity = Mish()
164
- elif non_linearity == "silu":
165
- self.nonlinearity = nn.SiLU()
166
-
167
- self.use_in_shortcut = self.in_channels != self.out_channels if use_in_shortcut is None else use_in_shortcut
168
-
169
- self.conv_shortcut = None
170
- if self.use_in_shortcut:
171
- self.conv_shortcut = InflatedConv3d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
172
-
173
- def forward(self, input_tensor, temb):
174
- hidden_states = input_tensor
175
-
176
- hidden_states = self.norm1(hidden_states)
177
- hidden_states = self.nonlinearity(hidden_states)
178
-
179
- hidden_states = self.conv1(hidden_states)
180
-
181
- if temb is not None:
182
- temb = self.time_emb_proj(self.nonlinearity(temb))[:, :, None, None, None]
183
-
184
- if temb is not None and self.time_embedding_norm == "default":
185
- hidden_states = hidden_states + temb
186
-
187
- hidden_states = self.norm2(hidden_states)
188
-
189
- if temb is not None and self.time_embedding_norm == "scale_shift":
190
- scale, shift = torch.chunk(temb, 2, dim=1)
191
- hidden_states = hidden_states * (1 + scale) + shift
192
-
193
- hidden_states = self.nonlinearity(hidden_states)
194
-
195
- hidden_states = self.dropout(hidden_states)
196
- hidden_states = self.conv2(hidden_states)
197
-
198
- if self.conv_shortcut is not None:
199
- input_tensor = self.conv_shortcut(input_tensor)
200
-
201
- output_tensor = (input_tensor + hidden_states) / self.output_scale_factor
202
-
203
- return output_tensor
204
-
205
-
206
- class Mish(torch.nn.Module):
207
- def forward(self, hidden_states):
208
- return hidden_states * torch.tanh(torch.nn.functional.softplus(hidden_states))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyparsing/unicode.py DELETED
@@ -1,352 +0,0 @@
1
- # unicode.py
2
-
3
- import sys
4
- from itertools import filterfalse
5
- from typing import List, Tuple, Union
6
-
7
-
8
- class _lazyclassproperty:
9
- def __init__(self, fn):
10
- self.fn = fn
11
- self.__doc__ = fn.__doc__
12
- self.__name__ = fn.__name__
13
-
14
- def __get__(self, obj, cls):
15
- if cls is None:
16
- cls = type(obj)
17
- if not hasattr(cls, "_intern") or any(
18
- cls._intern is getattr(superclass, "_intern", [])
19
- for superclass in cls.__mro__[1:]
20
- ):
21
- cls._intern = {}
22
- attrname = self.fn.__name__
23
- if attrname not in cls._intern:
24
- cls._intern[attrname] = self.fn(cls)
25
- return cls._intern[attrname]
26
-
27
-
28
- UnicodeRangeList = List[Union[Tuple[int, int], Tuple[int]]]
29
-
30
-
31
- class unicode_set:
32
- """
33
- A set of Unicode characters, for language-specific strings for
34
- ``alphas``, ``nums``, ``alphanums``, and ``printables``.
35
- A unicode_set is defined by a list of ranges in the Unicode character
36
- set, in a class attribute ``_ranges``. Ranges can be specified using
37
- 2-tuples or a 1-tuple, such as::
38
-
39
- _ranges = [
40
- (0x0020, 0x007e),
41
- (0x00a0, 0x00ff),
42
- (0x0100,),
43
- ]
44
-
45
- Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x).
46
-
47
- A unicode set can also be defined using multiple inheritance of other unicode sets::
48
-
49
- class CJK(Chinese, Japanese, Korean):
50
- pass
51
- """
52
-
53
- _ranges: UnicodeRangeList = []
54
-
55
- @_lazyclassproperty
56
- def _chars_for_ranges(cls):
57
- ret = []
58
- for cc in cls.__mro__:
59
- if cc is unicode_set:
60
- break
61
- for rr in getattr(cc, "_ranges", ()):
62
- ret.extend(range(rr[0], rr[-1] + 1))
63
- return [chr(c) for c in sorted(set(ret))]
64
-
65
- @_lazyclassproperty
66
- def printables(cls):
67
- "all non-whitespace characters in this range"
68
- return "".join(filterfalse(str.isspace, cls._chars_for_ranges))
69
-
70
- @_lazyclassproperty
71
- def alphas(cls):
72
- "all alphabetic characters in this range"
73
- return "".join(filter(str.isalpha, cls._chars_for_ranges))
74
-
75
- @_lazyclassproperty
76
- def nums(cls):
77
- "all numeric digit characters in this range"
78
- return "".join(filter(str.isdigit, cls._chars_for_ranges))
79
-
80
- @_lazyclassproperty
81
- def alphanums(cls):
82
- "all alphanumeric characters in this range"
83
- return cls.alphas + cls.nums
84
-
85
- @_lazyclassproperty
86
- def identchars(cls):
87
- "all characters in this range that are valid identifier characters, plus underscore '_'"
88
- return "".join(
89
- sorted(
90
- set(
91
- "".join(filter(str.isidentifier, cls._chars_for_ranges))
92
- + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº"
93
- + "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ"
94
- + "_"
95
- )
96
- )
97
- )
98
-
99
- @_lazyclassproperty
100
- def identbodychars(cls):
101
- """
102
- all characters in this range that are valid identifier body characters,
103
- plus the digits 0-9
104
- """
105
- return "".join(
106
- sorted(
107
- set(
108
- cls.identchars
109
- + "0123456789"
110
- + "".join(
111
- [c for c in cls._chars_for_ranges if ("_" + c).isidentifier()]
112
- )
113
- )
114
- )
115
- )
116
-
117
-
118
- class pyparsing_unicode(unicode_set):
119
- """
120
- A namespace class for defining common language unicode_sets.
121
- """
122
-
123
- # fmt: off
124
-
125
- # define ranges in language character sets
126
- _ranges: UnicodeRangeList = [
127
- (0x0020, sys.maxunicode),
128
- ]
129
-
130
- class BasicMultilingualPlane(unicode_set):
131
- "Unicode set for the Basic Multilingual Plane"
132
- _ranges: UnicodeRangeList = [
133
- (0x0020, 0xFFFF),
134
- ]
135
-
136
- class Latin1(unicode_set):
137
- "Unicode set for Latin-1 Unicode Character Range"
138
- _ranges: UnicodeRangeList = [
139
- (0x0020, 0x007E),
140
- (0x00A0, 0x00FF),
141
- ]
142
-
143
- class LatinA(unicode_set):
144
- "Unicode set for Latin-A Unicode Character Range"
145
- _ranges: UnicodeRangeList = [
146
- (0x0100, 0x017F),
147
- ]
148
-
149
- class LatinB(unicode_set):
150
- "Unicode set for Latin-B Unicode Character Range"
151
- _ranges: UnicodeRangeList = [
152
- (0x0180, 0x024F),
153
- ]
154
-
155
- class Greek(unicode_set):
156
- "Unicode set for Greek Unicode Character Ranges"
157
- _ranges: UnicodeRangeList = [
158
- (0x0342, 0x0345),
159
- (0x0370, 0x0377),
160
- (0x037A, 0x037F),
161
- (0x0384, 0x038A),
162
- (0x038C,),
163
- (0x038E, 0x03A1),
164
- (0x03A3, 0x03E1),
165
- (0x03F0, 0x03FF),
166
- (0x1D26, 0x1D2A),
167
- (0x1D5E,),
168
- (0x1D60,),
169
- (0x1D66, 0x1D6A),
170
- (0x1F00, 0x1F15),
171
- (0x1F18, 0x1F1D),
172
- (0x1F20, 0x1F45),
173
- (0x1F48, 0x1F4D),
174
- (0x1F50, 0x1F57),
175
- (0x1F59,),
176
- (0x1F5B,),
177
- (0x1F5D,),
178
- (0x1F5F, 0x1F7D),
179
- (0x1F80, 0x1FB4),
180
- (0x1FB6, 0x1FC4),
181
- (0x1FC6, 0x1FD3),
182
- (0x1FD6, 0x1FDB),
183
- (0x1FDD, 0x1FEF),
184
- (0x1FF2, 0x1FF4),
185
- (0x1FF6, 0x1FFE),
186
- (0x2129,),
187
- (0x2719, 0x271A),
188
- (0xAB65,),
189
- (0x10140, 0x1018D),
190
- (0x101A0,),
191
- (0x1D200, 0x1D245),
192
- (0x1F7A1, 0x1F7A7),
193
- ]
194
-
195
- class Cyrillic(unicode_set):
196
- "Unicode set for Cyrillic Unicode Character Range"
197
- _ranges: UnicodeRangeList = [
198
- (0x0400, 0x052F),
199
- (0x1C80, 0x1C88),
200
- (0x1D2B,),
201
- (0x1D78,),
202
- (0x2DE0, 0x2DFF),
203
- (0xA640, 0xA672),
204
- (0xA674, 0xA69F),
205
- (0xFE2E, 0xFE2F),
206
- ]
207
-
208
- class Chinese(unicode_set):
209
- "Unicode set for Chinese Unicode Character Range"
210
- _ranges: UnicodeRangeList = [
211
- (0x2E80, 0x2E99),
212
- (0x2E9B, 0x2EF3),
213
- (0x31C0, 0x31E3),
214
- (0x3400, 0x4DB5),
215
- (0x4E00, 0x9FEF),
216
- (0xA700, 0xA707),
217
- (0xF900, 0xFA6D),
218
- (0xFA70, 0xFAD9),
219
- (0x16FE2, 0x16FE3),
220
- (0x1F210, 0x1F212),
221
- (0x1F214, 0x1F23B),
222
- (0x1F240, 0x1F248),
223
- (0x20000, 0x2A6D6),
224
- (0x2A700, 0x2B734),
225
- (0x2B740, 0x2B81D),
226
- (0x2B820, 0x2CEA1),
227
- (0x2CEB0, 0x2EBE0),
228
- (0x2F800, 0x2FA1D),
229
- ]
230
-
231
- class Japanese(unicode_set):
232
- "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges"
233
- _ranges: UnicodeRangeList = []
234
-
235
- class Kanji(unicode_set):
236
- "Unicode set for Kanji Unicode Character Range"
237
- _ranges: UnicodeRangeList = [
238
- (0x4E00, 0x9FBF),
239
- (0x3000, 0x303F),
240
- ]
241
-
242
- class Hiragana(unicode_set):
243
- "Unicode set for Hiragana Unicode Character Range"
244
- _ranges: UnicodeRangeList = [
245
- (0x3041, 0x3096),
246
- (0x3099, 0x30A0),
247
- (0x30FC,),
248
- (0xFF70,),
249
- (0x1B001,),
250
- (0x1B150, 0x1B152),
251
- (0x1F200,),
252
- ]
253
-
254
- class Katakana(unicode_set):
255
- "Unicode set for Katakana Unicode Character Range"
256
- _ranges: UnicodeRangeList = [
257
- (0x3099, 0x309C),
258
- (0x30A0, 0x30FF),
259
- (0x31F0, 0x31FF),
260
- (0x32D0, 0x32FE),
261
- (0xFF65, 0xFF9F),
262
- (0x1B000,),
263
- (0x1B164, 0x1B167),
264
- (0x1F201, 0x1F202),
265
- (0x1F213,),
266
- ]
267
-
268
- class Hangul(unicode_set):
269
- "Unicode set for Hangul (Korean) Unicode Character Range"
270
- _ranges: UnicodeRangeList = [
271
- (0x1100, 0x11FF),
272
- (0x302E, 0x302F),
273
- (0x3131, 0x318E),
274
- (0x3200, 0x321C),
275
- (0x3260, 0x327B),
276
- (0x327E,),
277
- (0xA960, 0xA97C),
278
- (0xAC00, 0xD7A3),
279
- (0xD7B0, 0xD7C6),
280
- (0xD7CB, 0xD7FB),
281
- (0xFFA0, 0xFFBE),
282
- (0xFFC2, 0xFFC7),
283
- (0xFFCA, 0xFFCF),
284
- (0xFFD2, 0xFFD7),
285
- (0xFFDA, 0xFFDC),
286
- ]
287
-
288
- Korean = Hangul
289
-
290
- class CJK(Chinese, Japanese, Hangul):
291
- "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range"
292
-
293
- class Thai(unicode_set):
294
- "Unicode set for Thai Unicode Character Range"
295
- _ranges: UnicodeRangeList = [
296
- (0x0E01, 0x0E3A),
297
- (0x0E3F, 0x0E5B)
298
- ]
299
-
300
- class Arabic(unicode_set):
301
- "Unicode set for Arabic Unicode Character Range"
302
- _ranges: UnicodeRangeList = [
303
- (0x0600, 0x061B),
304
- (0x061E, 0x06FF),
305
- (0x0700, 0x077F),
306
- ]
307
-
308
- class Hebrew(unicode_set):
309
- "Unicode set for Hebrew Unicode Character Range"
310
- _ranges: UnicodeRangeList = [
311
- (0x0591, 0x05C7),
312
- (0x05D0, 0x05EA),
313
- (0x05EF, 0x05F4),
314
- (0xFB1D, 0xFB36),
315
- (0xFB38, 0xFB3C),
316
- (0xFB3E,),
317
- (0xFB40, 0xFB41),
318
- (0xFB43, 0xFB44),
319
- (0xFB46, 0xFB4F),
320
- ]
321
-
322
- class Devanagari(unicode_set):
323
- "Unicode set for Devanagari Unicode Character Range"
324
- _ranges: UnicodeRangeList = [
325
- (0x0900, 0x097F),
326
- (0xA8E0, 0xA8FF)
327
- ]
328
-
329
- # fmt: on
330
-
331
-
332
- pyparsing_unicode.Japanese._ranges = (
333
- pyparsing_unicode.Japanese.Kanji._ranges
334
- + pyparsing_unicode.Japanese.Hiragana._ranges
335
- + pyparsing_unicode.Japanese.Katakana._ranges
336
- )
337
-
338
- pyparsing_unicode.BMP = pyparsing_unicode.BasicMultilingualPlane
339
-
340
- # add language identifiers using language Unicode
341
- pyparsing_unicode.العربية = pyparsing_unicode.Arabic
342
- pyparsing_unicode.中文 = pyparsing_unicode.Chinese
343
- pyparsing_unicode.кириллица = pyparsing_unicode.Cyrillic
344
- pyparsing_unicode.Ελληνικά = pyparsing_unicode.Greek
345
- pyparsing_unicode.עִברִית = pyparsing_unicode.Hebrew
346
- pyparsing_unicode.日本語 = pyparsing_unicode.Japanese
347
- pyparsing_unicode.Japanese.漢字 = pyparsing_unicode.Japanese.Kanji
348
- pyparsing_unicode.Japanese.カタカナ = pyparsing_unicode.Japanese.Katakana
349
- pyparsing_unicode.Japanese.ひらがな = pyparsing_unicode.Japanese.Hiragana
350
- pyparsing_unicode.한국어 = pyparsing_unicode.Korean
351
- pyparsing_unicode.ไทย = pyparsing_unicode.Thai
352
- pyparsing_unicode.देवनागरी = pyparsing_unicode.Devanagari
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_functools.py DELETED
@@ -1,104 +0,0 @@
1
- import types
2
- import functools
3
-
4
-
5
- # from jaraco.functools 3.3
6
- def method_cache(method, cache_wrapper=None):
7
- """
8
- Wrap lru_cache to support storing the cache data in the object instances.
9
-
10
- Abstracts the common paradigm where the method explicitly saves an
11
- underscore-prefixed protected property on first call and returns that
12
- subsequently.
13
-
14
- >>> class MyClass:
15
- ... calls = 0
16
- ...
17
- ... @method_cache
18
- ... def method(self, value):
19
- ... self.calls += 1
20
- ... return value
21
-
22
- >>> a = MyClass()
23
- >>> a.method(3)
24
- 3
25
- >>> for x in range(75):
26
- ... res = a.method(x)
27
- >>> a.calls
28
- 75
29
-
30
- Note that the apparent behavior will be exactly like that of lru_cache
31
- except that the cache is stored on each instance, so values in one
32
- instance will not flush values from another, and when an instance is
33
- deleted, so are the cached values for that instance.
34
-
35
- >>> b = MyClass()
36
- >>> for x in range(35):
37
- ... res = b.method(x)
38
- >>> b.calls
39
- 35
40
- >>> a.method(0)
41
- 0
42
- >>> a.calls
43
- 75
44
-
45
- Note that if method had been decorated with ``functools.lru_cache()``,
46
- a.calls would have been 76 (due to the cached value of 0 having been
47
- flushed by the 'b' instance).
48
-
49
- Clear the cache with ``.cache_clear()``
50
-
51
- >>> a.method.cache_clear()
52
-
53
- Same for a method that hasn't yet been called.
54
-
55
- >>> c = MyClass()
56
- >>> c.method.cache_clear()
57
-
58
- Another cache wrapper may be supplied:
59
-
60
- >>> cache = functools.lru_cache(maxsize=2)
61
- >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
62
- >>> a = MyClass()
63
- >>> a.method2()
64
- 3
65
-
66
- Caution - do not subsequently wrap the method with another decorator, such
67
- as ``@property``, which changes the semantics of the function.
68
-
69
- See also
70
- http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
71
- for another implementation and additional justification.
72
- """
73
- cache_wrapper = cache_wrapper or functools.lru_cache()
74
-
75
- def wrapper(self, *args, **kwargs):
76
- # it's the first call, replace the method with a cached, bound method
77
- bound_method = types.MethodType(method, self)
78
- cached_method = cache_wrapper(bound_method)
79
- setattr(self, method.__name__, cached_method)
80
- return cached_method(*args, **kwargs)
81
-
82
- # Support cache clear even before cache has been created.
83
- wrapper.cache_clear = lambda: None
84
-
85
- return wrapper
86
-
87
-
88
- # From jaraco.functools 3.3
89
- def pass_none(func):
90
- """
91
- Wrap func so it's not called if its first param is None
92
-
93
- >>> print_text = pass_none(print)
94
- >>> print_text('text')
95
- text
96
- >>> print_text(None)
97
- """
98
-
99
- @functools.wraps(func)
100
- def wrapper(param, *args, **kwargs):
101
- if param is not None:
102
- return func(param, *args, **kwargs)
103
-
104
- return wrapper
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/bdist_egg.py DELETED
@@ -1,457 +0,0 @@
1
- """setuptools.command.bdist_egg
2
-
3
- Build .egg distributions"""
4
-
5
- from distutils.dir_util import remove_tree, mkpath
6
- from distutils import log
7
- from types import CodeType
8
- import sys
9
- import os
10
- import re
11
- import textwrap
12
- import marshal
13
-
14
- from pkg_resources import get_build_platform, Distribution
15
- from setuptools.extension import Library
16
- from setuptools import Command
17
- from .._path import ensure_directory
18
-
19
- from sysconfig import get_path, get_python_version
20
-
21
-
22
- def _get_purelib():
23
- return get_path("purelib")
24
-
25
-
26
- def strip_module(filename):
27
- if '.' in filename:
28
- filename = os.path.splitext(filename)[0]
29
- if filename.endswith('module'):
30
- filename = filename[:-6]
31
- return filename
32
-
33
-
34
- def sorted_walk(dir):
35
- """Do os.walk in a reproducible way,
36
- independent of indeterministic filesystem readdir order
37
- """
38
- for base, dirs, files in os.walk(dir):
39
- dirs.sort()
40
- files.sort()
41
- yield base, dirs, files
42
-
43
-
44
- def write_stub(resource, pyfile):
45
- _stub_template = textwrap.dedent("""
46
- def __bootstrap__():
47
- global __bootstrap__, __loader__, __file__
48
- import sys, pkg_resources, importlib.util
49
- __file__ = pkg_resources.resource_filename(__name__, %r)
50
- __loader__ = None; del __bootstrap__, __loader__
51
- spec = importlib.util.spec_from_file_location(__name__,__file__)
52
- mod = importlib.util.module_from_spec(spec)
53
- spec.loader.exec_module(mod)
54
- __bootstrap__()
55
- """).lstrip()
56
- with open(pyfile, 'w') as f:
57
- f.write(_stub_template % resource)
58
-
59
-
60
- class bdist_egg(Command):
61
- description = "create an \"egg\" distribution"
62
-
63
- user_options = [
64
- ('bdist-dir=', 'b',
65
- "temporary directory for creating the distribution"),
66
- ('plat-name=', 'p', "platform name to embed in generated filenames "
67
- "(default: %s)" % get_build_platform()),
68
- ('exclude-source-files', None,
69
- "remove all .py files from the generated egg"),
70
- ('keep-temp', 'k',
71
- "keep the pseudo-installation tree around after " +
72
- "creating the distribution archive"),
73
- ('dist-dir=', 'd',
74
- "directory to put final built distributions in"),
75
- ('skip-build', None,
76
- "skip rebuilding everything (for testing/debugging)"),
77
- ]
78
-
79
- boolean_options = [
80
- 'keep-temp', 'skip-build', 'exclude-source-files'
81
- ]
82
-
83
- def initialize_options(self):
84
- self.bdist_dir = None
85
- self.plat_name = None
86
- self.keep_temp = 0
87
- self.dist_dir = None
88
- self.skip_build = 0
89
- self.egg_output = None
90
- self.exclude_source_files = None
91
-
92
- def finalize_options(self):
93
- ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
94
- self.egg_info = ei_cmd.egg_info
95
-
96
- if self.bdist_dir is None:
97
- bdist_base = self.get_finalized_command('bdist').bdist_base
98
- self.bdist_dir = os.path.join(bdist_base, 'egg')
99
-
100
- if self.plat_name is None:
101
- self.plat_name = get_build_platform()
102
-
103
- self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
104
-
105
- if self.egg_output is None:
106
-
107
- # Compute filename of the output egg
108
- basename = Distribution(
109
- None, None, ei_cmd.egg_name, ei_cmd.egg_version,
110
- get_python_version(),
111
- self.distribution.has_ext_modules() and self.plat_name
112
- ).egg_name()
113
-
114
- self.egg_output = os.path.join(self.dist_dir, basename + '.egg')
115
-
116
- def do_install_data(self):
117
- # Hack for packages that install data to install's --install-lib
118
- self.get_finalized_command('install').install_lib = self.bdist_dir
119
-
120
- site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
121
- old, self.distribution.data_files = self.distribution.data_files, []
122
-
123
- for item in old:
124
- if isinstance(item, tuple) and len(item) == 2:
125
- if os.path.isabs(item[0]):
126
- realpath = os.path.realpath(item[0])
127
- normalized = os.path.normcase(realpath)
128
- if normalized == site_packages or normalized.startswith(
129
- site_packages + os.sep
130
- ):
131
- item = realpath[len(site_packages) + 1:], item[1]
132
- # XXX else: raise ???
133
- self.distribution.data_files.append(item)
134
-
135
- try:
136
- log.info("installing package data to %s", self.bdist_dir)
137
- self.call_command('install_data', force=0, root=None)
138
- finally:
139
- self.distribution.data_files = old
140
-
141
- def get_outputs(self):
142
- return [self.egg_output]
143
-
144
- def call_command(self, cmdname, **kw):
145
- """Invoke reinitialized command `cmdname` with keyword args"""
146
- for dirname in INSTALL_DIRECTORY_ATTRS:
147
- kw.setdefault(dirname, self.bdist_dir)
148
- kw.setdefault('skip_build', self.skip_build)
149
- kw.setdefault('dry_run', self.dry_run)
150
- cmd = self.reinitialize_command(cmdname, **kw)
151
- self.run_command(cmdname)
152
- return cmd
153
-
154
- def run(self): # noqa: C901 # is too complex (14) # FIXME
155
- # Generate metadata first
156
- self.run_command("egg_info")
157
- # We run install_lib before install_data, because some data hacks
158
- # pull their data path from the install_lib command.
159
- log.info("installing library code to %s", self.bdist_dir)
160
- instcmd = self.get_finalized_command('install')
161
- old_root = instcmd.root
162
- instcmd.root = None
163
- if self.distribution.has_c_libraries() and not self.skip_build:
164
- self.run_command('build_clib')
165
- cmd = self.call_command('install_lib', warn_dir=0)
166
- instcmd.root = old_root
167
-
168
- all_outputs, ext_outputs = self.get_ext_outputs()
169
- self.stubs = []
170
- to_compile = []
171
- for (p, ext_name) in enumerate(ext_outputs):
172
- filename, ext = os.path.splitext(ext_name)
173
- pyfile = os.path.join(self.bdist_dir, strip_module(filename) +
174
- '.py')
175
- self.stubs.append(pyfile)
176
- log.info("creating stub loader for %s", ext_name)
177
- if not self.dry_run:
178
- write_stub(os.path.basename(ext_name), pyfile)
179
- to_compile.append(pyfile)
180
- ext_outputs[p] = ext_name.replace(os.sep, '/')
181
-
182
- if to_compile:
183
- cmd.byte_compile(to_compile)
184
- if self.distribution.data_files:
185
- self.do_install_data()
186
-
187
- # Make the EGG-INFO directory
188
- archive_root = self.bdist_dir
189
- egg_info = os.path.join(archive_root, 'EGG-INFO')
190
- self.mkpath(egg_info)
191
- if self.distribution.scripts:
192
- script_dir = os.path.join(egg_info, 'scripts')
193
- log.info("installing scripts to %s", script_dir)
194
- self.call_command('install_scripts', install_dir=script_dir,
195
- no_ep=1)
196
-
197
- self.copy_metadata_to(egg_info)
198
- native_libs = os.path.join(egg_info, "native_libs.txt")
199
- if all_outputs:
200
- log.info("writing %s", native_libs)
201
- if not self.dry_run:
202
- ensure_directory(native_libs)
203
- libs_file = open(native_libs, 'wt')
204
- libs_file.write('\n'.join(all_outputs))
205
- libs_file.write('\n')
206
- libs_file.close()
207
- elif os.path.isfile(native_libs):
208
- log.info("removing %s", native_libs)
209
- if not self.dry_run:
210
- os.unlink(native_libs)
211
-
212
- write_safety_flag(
213
- os.path.join(archive_root, 'EGG-INFO'), self.zip_safe()
214
- )
215
-
216
- if os.path.exists(os.path.join(self.egg_info, 'depends.txt')):
217
- log.warn(
218
- "WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
219
- "Use the install_requires/extras_require setup() args instead."
220
- )
221
-
222
- if self.exclude_source_files:
223
- self.zap_pyfiles()
224
-
225
- # Make the archive
226
- make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
227
- dry_run=self.dry_run, mode=self.gen_header())
228
- if not self.keep_temp:
229
- remove_tree(self.bdist_dir, dry_run=self.dry_run)
230
-
231
- # Add to 'Distribution.dist_files' so that the "upload" command works
232
- getattr(self.distribution, 'dist_files', []).append(
233
- ('bdist_egg', get_python_version(), self.egg_output))
234
-
235
- def zap_pyfiles(self):
236
- log.info("Removing .py files from temporary directory")
237
- for base, dirs, files in walk_egg(self.bdist_dir):
238
- for name in files:
239
- path = os.path.join(base, name)
240
-
241
- if name.endswith('.py'):
242
- log.debug("Deleting %s", path)
243
- os.unlink(path)
244
-
245
- if base.endswith('__pycache__'):
246
- path_old = path
247
-
248
- pattern = r'(?P<name>.+)\.(?P<magic>[^.]+)\.pyc'
249
- m = re.match(pattern, name)
250
- path_new = os.path.join(
251
- base, os.pardir, m.group('name') + '.pyc')
252
- log.info(
253
- "Renaming file from [%s] to [%s]"
254
- % (path_old, path_new))
255
- try:
256
- os.remove(path_new)
257
- except OSError:
258
- pass
259
- os.rename(path_old, path_new)
260
-
261
- def zip_safe(self):
262
- safe = getattr(self.distribution, 'zip_safe', None)
263
- if safe is not None:
264
- return safe
265
- log.warn("zip_safe flag not set; analyzing archive contents...")
266
- return analyze_egg(self.bdist_dir, self.stubs)
267
-
268
- def gen_header(self):
269
- return 'w'
270
-
271
- def copy_metadata_to(self, target_dir):
272
- "Copy metadata (egg info) to the target_dir"
273
- # normalize the path (so that a forward-slash in egg_info will
274
- # match using startswith below)
275
- norm_egg_info = os.path.normpath(self.egg_info)
276
- prefix = os.path.join(norm_egg_info, '')
277
- for path in self.ei_cmd.filelist.files:
278
- if path.startswith(prefix):
279
- target = os.path.join(target_dir, path[len(prefix):])
280
- ensure_directory(target)
281
- self.copy_file(path, target)
282
-
283
- def get_ext_outputs(self):
284
- """Get a list of relative paths to C extensions in the output distro"""
285
-
286
- all_outputs = []
287
- ext_outputs = []
288
-
289
- paths = {self.bdist_dir: ''}
290
- for base, dirs, files in sorted_walk(self.bdist_dir):
291
- for filename in files:
292
- if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
293
- all_outputs.append(paths[base] + filename)
294
- for filename in dirs:
295
- paths[os.path.join(base, filename)] = (paths[base] +
296
- filename + '/')
297
-
298
- if self.distribution.has_ext_modules():
299
- build_cmd = self.get_finalized_command('build_ext')
300
- for ext in build_cmd.extensions:
301
- if isinstance(ext, Library):
302
- continue
303
- fullname = build_cmd.get_ext_fullname(ext.name)
304
- filename = build_cmd.get_ext_filename(fullname)
305
- if not os.path.basename(filename).startswith('dl-'):
306
- if os.path.exists(os.path.join(self.bdist_dir, filename)):
307
- ext_outputs.append(filename)
308
-
309
- return all_outputs, ext_outputs
310
-
311
-
312
- NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
313
-
314
-
315
- def walk_egg(egg_dir):
316
- """Walk an unpacked egg's contents, skipping the metadata directory"""
317
- walker = sorted_walk(egg_dir)
318
- base, dirs, files = next(walker)
319
- if 'EGG-INFO' in dirs:
320
- dirs.remove('EGG-INFO')
321
- yield base, dirs, files
322
- for bdf in walker:
323
- yield bdf
324
-
325
-
326
- def analyze_egg(egg_dir, stubs):
327
- # check for existing flag in EGG-INFO
328
- for flag, fn in safety_flags.items():
329
- if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)):
330
- return flag
331
- if not can_scan():
332
- return False
333
- safe = True
334
- for base, dirs, files in walk_egg(egg_dir):
335
- for name in files:
336
- if name.endswith('.py') or name.endswith('.pyw'):
337
- continue
338
- elif name.endswith('.pyc') or name.endswith('.pyo'):
339
- # always scan, even if we already know we're not safe
340
- safe = scan_module(egg_dir, base, name, stubs) and safe
341
- return safe
342
-
343
-
344
- def write_safety_flag(egg_dir, safe):
345
- # Write or remove zip safety flag file(s)
346
- for flag, fn in safety_flags.items():
347
- fn = os.path.join(egg_dir, fn)
348
- if os.path.exists(fn):
349
- if safe is None or bool(safe) != flag:
350
- os.unlink(fn)
351
- elif safe is not None and bool(safe) == flag:
352
- f = open(fn, 'wt')
353
- f.write('\n')
354
- f.close()
355
-
356
-
357
- safety_flags = {
358
- True: 'zip-safe',
359
- False: 'not-zip-safe',
360
- }
361
-
362
-
363
- def scan_module(egg_dir, base, name, stubs):
364
- """Check whether module possibly uses unsafe-for-zipfile stuff"""
365
-
366
- filename = os.path.join(base, name)
367
- if filename[:-1] in stubs:
368
- return True # Extension module
369
- pkg = base[len(egg_dir) + 1:].replace(os.sep, '.')
370
- module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0]
371
- if sys.version_info < (3, 7):
372
- skip = 12 # skip magic & date & file size
373
- else:
374
- skip = 16 # skip magic & reserved? & date & file size
375
- f = open(filename, 'rb')
376
- f.read(skip)
377
- code = marshal.load(f)
378
- f.close()
379
- safe = True
380
- symbols = dict.fromkeys(iter_symbols(code))
381
- for bad in ['__file__', '__path__']:
382
- if bad in symbols:
383
- log.warn("%s: module references %s", module, bad)
384
- safe = False
385
- if 'inspect' in symbols:
386
- for bad in [
387
- 'getsource', 'getabsfile', 'getsourcefile', 'getfile'
388
- 'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
389
- 'getinnerframes', 'getouterframes', 'stack', 'trace'
390
- ]:
391
- if bad in symbols:
392
- log.warn("%s: module MAY be using inspect.%s", module, bad)
393
- safe = False
394
- return safe
395
-
396
-
397
- def iter_symbols(code):
398
- """Yield names and strings used by `code` and its nested code objects"""
399
- for name in code.co_names:
400
- yield name
401
- for const in code.co_consts:
402
- if isinstance(const, str):
403
- yield const
404
- elif isinstance(const, CodeType):
405
- for name in iter_symbols(const):
406
- yield name
407
-
408
-
409
- def can_scan():
410
- if not sys.platform.startswith('java') and sys.platform != 'cli':
411
- # CPython, PyPy, etc.
412
- return True
413
- log.warn("Unable to analyze compiled code on this platform.")
414
- log.warn("Please ask the author to include a 'zip_safe'"
415
- " setting (either True or False) in the package's setup.py")
416
-
417
-
418
- # Attribute names of options for commands that might need to be convinced to
419
- # install to the egg build directory
420
-
421
- INSTALL_DIRECTORY_ATTRS = [
422
- 'install_lib', 'install_dir', 'install_data', 'install_base'
423
- ]
424
-
425
-
426
- def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=True,
427
- mode='w'):
428
- """Create a zip file from all the files under 'base_dir'. The output
429
- zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
430
- Python module (if available) or the InfoZIP "zip" utility (if installed
431
- and found on the default search path). If neither tool is available,
432
- raises DistutilsExecError. Returns the name of the output zip file.
433
- """
434
- import zipfile
435
-
436
- mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
437
- log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
438
-
439
- def visit(z, dirname, names):
440
- for name in names:
441
- path = os.path.normpath(os.path.join(dirname, name))
442
- if os.path.isfile(path):
443
- p = path[len(base_dir) + 1:]
444
- if not dry_run:
445
- z.write(path, p)
446
- log.debug("adding '%s'", p)
447
-
448
- compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED
449
- if not dry_run:
450
- z = zipfile.ZipFile(zip_filename, mode, compression=compression)
451
- for dirname, dirs, files in sorted_walk(base_dir):
452
- visit(z, dirname, files)
453
- z.close()
454
- else:
455
- for dirname, dirs, files in sorted_walk(base_dir):
456
- visit(None, dirname, files)
457
- return zip_filename
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awesimo/jojogan/e4e_projection.py DELETED
@@ -1,38 +0,0 @@
1
- import os
2
- import sys
3
- import numpy as np
4
- from PIL import Image
5
- import torch
6
- import torchvision.transforms as transforms
7
- from argparse import Namespace
8
- from e4e.models.psp import pSp
9
- from util import *
10
-
11
-
12
-
13
- @ torch.no_grad()
14
- def projection(img, name, device='cuda'):
15
-
16
-
17
- model_path = 'e4e_ffhq_encode.pt'
18
- ckpt = torch.load(model_path, map_location='cpu')
19
- opts = ckpt['opts']
20
- opts['checkpoint_path'] = model_path
21
- opts= Namespace(**opts)
22
- net = pSp(opts, device).eval().to(device)
23
-
24
- transform = transforms.Compose(
25
- [
26
- transforms.Resize(256),
27
- transforms.CenterCrop(256),
28
- transforms.ToTensor(),
29
- transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
30
- ]
31
- )
32
-
33
- img = transform(img).unsqueeze(0).to(device)
34
- images, w_plus = net(img, randomize_noise=False, return_latents=True)
35
- result_file = {}
36
- result_file['latent'] = w_plus[0]
37
- torch.save(result_file, name)
38
- return w_plus[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/datasets/objects365.py DELETED
@@ -1,394 +0,0 @@
1
- from detectron2.data.datasets.register_coco import register_coco_instances
2
- import os
3
-
4
- categories_v1 = [
5
- {'id': 164, 'name': 'cutting/chopping board'} ,
6
- {'id': 49, 'name': 'tie'} ,
7
- {'id': 306, 'name': 'crosswalk sign'} ,
8
- {'id': 145, 'name': 'gun'} ,
9
- {'id': 14, 'name': 'street lights'} ,
10
- {'id': 223, 'name': 'bar soap'} ,
11
- {'id': 74, 'name': 'wild bird'} ,
12
- {'id': 219, 'name': 'ice cream'} ,
13
- {'id': 37, 'name': 'stool'} ,
14
- {'id': 25, 'name': 'storage box'} ,
15
- {'id': 153, 'name': 'giraffe'} ,
16
- {'id': 52, 'name': 'pen/pencil'} ,
17
- {'id': 61, 'name': 'high heels'} ,
18
- {'id': 340, 'name': 'mangosteen'} ,
19
- {'id': 22, 'name': 'bracelet'} ,
20
- {'id': 155, 'name': 'piano'} ,
21
- {'id': 162, 'name': 'vent'} ,
22
- {'id': 75, 'name': 'laptop'} ,
23
- {'id': 236, 'name': 'toaster'} ,
24
- {'id': 231, 'name': 'fire truck'} ,
25
- {'id': 42, 'name': 'basket'} ,
26
- {'id': 150, 'name': 'zebra'} ,
27
- {'id': 124, 'name': 'head phone'} ,
28
- {'id': 90, 'name': 'sheep'} ,
29
- {'id': 322, 'name': 'steak'} ,
30
- {'id': 39, 'name': 'couch'} ,
31
- {'id': 209, 'name': 'toothbrush'} ,
32
- {'id': 59, 'name': 'bicycle'} ,
33
- {'id': 336, 'name': 'red cabbage'} ,
34
- {'id': 228, 'name': 'golf ball'} ,
35
- {'id': 120, 'name': 'tomato'} ,
36
- {'id': 132, 'name': 'computer box'} ,
37
- {'id': 8, 'name': 'cup'} ,
38
- {'id': 183, 'name': 'basketball'} ,
39
- {'id': 298, 'name': 'butterfly'} ,
40
- {'id': 250, 'name': 'garlic'} ,
41
- {'id': 12, 'name': 'desk'} ,
42
- {'id': 141, 'name': 'microwave'} ,
43
- {'id': 171, 'name': 'strawberry'} ,
44
- {'id': 200, 'name': 'kettle'} ,
45
- {'id': 63, 'name': 'van'} ,
46
- {'id': 300, 'name': 'cheese'} ,
47
- {'id': 215, 'name': 'marker'} ,
48
- {'id': 100, 'name': 'blackboard/whiteboard'} ,
49
- {'id': 186, 'name': 'printer'} ,
50
- {'id': 333, 'name': 'bread/bun'} ,
51
- {'id': 243, 'name': 'penguin'} ,
52
- {'id': 364, 'name': 'iron'} ,
53
- {'id': 180, 'name': 'ladder'} ,
54
- {'id': 34, 'name': 'flag'} ,
55
- {'id': 78, 'name': 'cell phone'} ,
56
- {'id': 97, 'name': 'fan'} ,
57
- {'id': 224, 'name': 'scale'} ,
58
- {'id': 151, 'name': 'duck'} ,
59
- {'id': 319, 'name': 'flute'} ,
60
- {'id': 156, 'name': 'stop sign'} ,
61
- {'id': 290, 'name': 'rickshaw'} ,
62
- {'id': 128, 'name': 'sailboat'} ,
63
- {'id': 165, 'name': 'tennis racket'} ,
64
- {'id': 241, 'name': 'cigar'} ,
65
- {'id': 101, 'name': 'balloon'} ,
66
- {'id': 308, 'name': 'hair drier'} ,
67
- {'id': 167, 'name': 'skating and skiing shoes'} ,
68
- {'id': 237, 'name': 'helicopter'} ,
69
- {'id': 65, 'name': 'sink'} ,
70
- {'id': 129, 'name': 'tangerine'} ,
71
- {'id': 330, 'name': 'crab'} ,
72
- {'id': 320, 'name': 'measuring cup'} ,
73
- {'id': 260, 'name': 'fishing rod'} ,
74
- {'id': 346, 'name': 'saw'} ,
75
- {'id': 216, 'name': 'ship'} ,
76
- {'id': 46, 'name': 'coffee table'} ,
77
- {'id': 194, 'name': 'facial mask'} ,
78
- {'id': 281, 'name': 'stapler'} ,
79
- {'id': 118, 'name': 'refrigerator'} ,
80
- {'id': 40, 'name': 'belt'} ,
81
- {'id': 349, 'name': 'starfish'} ,
82
- {'id': 87, 'name': 'hanger'} ,
83
- {'id': 116, 'name': 'baseball glove'} ,
84
- {'id': 261, 'name': 'cherry'} ,
85
- {'id': 334, 'name': 'baozi'} ,
86
- {'id': 267, 'name': 'screwdriver'} ,
87
- {'id': 158, 'name': 'converter'} ,
88
- {'id': 335, 'name': 'lion'} ,
89
- {'id': 170, 'name': 'baseball'} ,
90
- {'id': 111, 'name': 'skis'} ,
91
- {'id': 136, 'name': 'broccoli'} ,
92
- {'id': 342, 'name': 'eraser'} ,
93
- {'id': 337, 'name': 'polar bear'} ,
94
- {'id': 139, 'name': 'shovel'} ,
95
- {'id': 193, 'name': 'extension cord'} ,
96
- {'id': 284, 'name': 'goldfish'} ,
97
- {'id': 174, 'name': 'pepper'} ,
98
- {'id': 138, 'name': 'stroller'} ,
99
- {'id': 328, 'name': 'yak'} ,
100
- {'id': 83, 'name': 'clock'} ,
101
- {'id': 235, 'name': 'tricycle'} ,
102
- {'id': 248, 'name': 'parking meter'} ,
103
- {'id': 274, 'name': 'trophy'} ,
104
- {'id': 324, 'name': 'binoculars'} ,
105
- {'id': 51, 'name': 'traffic light'} ,
106
- {'id': 314, 'name': 'donkey'} ,
107
- {'id': 45, 'name': 'barrel/bucket'} ,
108
- {'id': 292, 'name': 'pomegranate'} ,
109
- {'id': 13, 'name': 'handbag'} ,
110
- {'id': 262, 'name': 'tablet'} ,
111
- {'id': 68, 'name': 'apple'} ,
112
- {'id': 226, 'name': 'cabbage'} ,
113
- {'id': 23, 'name': 'flower'} ,
114
- {'id': 58, 'name': 'faucet'} ,
115
- {'id': 206, 'name': 'tong'} ,
116
- {'id': 291, 'name': 'trombone'} ,
117
- {'id': 160, 'name': 'carrot'} ,
118
- {'id': 172, 'name': 'bow tie'} ,
119
- {'id': 122, 'name': 'tent'} ,
120
- {'id': 163, 'name': 'cookies'} ,
121
- {'id': 115, 'name': 'remote'} ,
122
- {'id': 175, 'name': 'coffee machine'} ,
123
- {'id': 238, 'name': 'green beans'} ,
124
- {'id': 233, 'name': 'cello'} ,
125
- {'id': 28, 'name': 'wine glass'} ,
126
- {'id': 295, 'name': 'mushroom'} ,
127
- {'id': 344, 'name': 'scallop'} ,
128
- {'id': 125, 'name': 'lantern'} ,
129
- {'id': 123, 'name': 'shampoo/shower gel'} ,
130
- {'id': 285, 'name': 'meat balls'} ,
131
- {'id': 266, 'name': 'key'} ,
132
- {'id': 296, 'name': 'calculator'} ,
133
- {'id': 168, 'name': 'scissors'} ,
134
- {'id': 103, 'name': 'cymbal'} ,
135
- {'id': 6, 'name': 'bottle'} ,
136
- {'id': 264, 'name': 'nuts'} ,
137
- {'id': 234, 'name': 'notepaper'} ,
138
- {'id': 211, 'name': 'mango'} ,
139
- {'id': 287, 'name': 'toothpaste'} ,
140
- {'id': 196, 'name': 'chopsticks'} ,
141
- {'id': 140, 'name': 'baseball bat'} ,
142
- {'id': 244, 'name': 'hurdle'} ,
143
- {'id': 195, 'name': 'tennis ball'} ,
144
- {'id': 144, 'name': 'surveillance camera'} ,
145
- {'id': 271, 'name': 'volleyball'} ,
146
- {'id': 94, 'name': 'keyboard'} ,
147
- {'id': 339, 'name': 'seal'} ,
148
- {'id': 11, 'name': 'picture/frame'} ,
149
- {'id': 348, 'name': 'okra'} ,
150
- {'id': 191, 'name': 'sausage'} ,
151
- {'id': 166, 'name': 'candy'} ,
152
- {'id': 62, 'name': 'ring'} ,
153
- {'id': 311, 'name': 'dolphin'} ,
154
- {'id': 273, 'name': 'eggplant'} ,
155
- {'id': 84, 'name': 'drum'} ,
156
- {'id': 143, 'name': 'surfboard'} ,
157
- {'id': 288, 'name': 'antelope'} ,
158
- {'id': 204, 'name': 'clutch'} ,
159
- {'id': 207, 'name': 'slide'} ,
160
- {'id': 43, 'name': 'towel/napkin'} ,
161
- {'id': 352, 'name': 'durian'} ,
162
- {'id': 276, 'name': 'board eraser'} ,
163
- {'id': 315, 'name': 'electric drill'} ,
164
- {'id': 312, 'name': 'sushi'} ,
165
- {'id': 198, 'name': 'pie'} ,
166
- {'id': 106, 'name': 'pickup truck'} ,
167
- {'id': 176, 'name': 'bathtub'} ,
168
- {'id': 26, 'name': 'vase'} ,
169
- {'id': 133, 'name': 'elephant'} ,
170
- {'id': 256, 'name': 'sandwich'} ,
171
- {'id': 327, 'name': 'noodles'} ,
172
- {'id': 10, 'name': 'glasses'} ,
173
- {'id': 109, 'name': 'airplane'} ,
174
- {'id': 95, 'name': 'tripod'} ,
175
- {'id': 247, 'name': 'CD'} ,
176
- {'id': 121, 'name': 'machinery vehicle'} ,
177
- {'id': 365, 'name': 'flashlight'} ,
178
- {'id': 53, 'name': 'microphone'} ,
179
- {'id': 270, 'name': 'pliers'} ,
180
- {'id': 362, 'name': 'chainsaw'} ,
181
- {'id': 259, 'name': 'bear'} ,
182
- {'id': 197, 'name': 'electronic stove and gas stove'} ,
183
- {'id': 89, 'name': 'pot/pan'} ,
184
- {'id': 220, 'name': 'tape'} ,
185
- {'id': 338, 'name': 'lighter'} ,
186
- {'id': 177, 'name': 'snowboard'} ,
187
- {'id': 214, 'name': 'violin'} ,
188
- {'id': 217, 'name': 'chicken'} ,
189
- {'id': 2, 'name': 'sneakers'} ,
190
- {'id': 161, 'name': 'washing machine'} ,
191
- {'id': 131, 'name': 'kite'} ,
192
- {'id': 354, 'name': 'rabbit'} ,
193
- {'id': 86, 'name': 'bus'} ,
194
- {'id': 275, 'name': 'dates'} ,
195
- {'id': 282, 'name': 'camel'} ,
196
- {'id': 88, 'name': 'nightstand'} ,
197
- {'id': 179, 'name': 'grapes'} ,
198
- {'id': 229, 'name': 'pine apple'} ,
199
- {'id': 56, 'name': 'necklace'} ,
200
- {'id': 18, 'name': 'leather shoes'} ,
201
- {'id': 358, 'name': 'hoverboard'} ,
202
- {'id': 345, 'name': 'pencil case'} ,
203
- {'id': 359, 'name': 'pasta'} ,
204
- {'id': 157, 'name': 'radiator'} ,
205
- {'id': 201, 'name': 'hamburger'} ,
206
- {'id': 268, 'name': 'globe'} ,
207
- {'id': 332, 'name': 'barbell'} ,
208
- {'id': 329, 'name': 'mop'} ,
209
- {'id': 252, 'name': 'horn'} ,
210
- {'id': 350, 'name': 'eagle'} ,
211
- {'id': 169, 'name': 'folder'} ,
212
- {'id': 137, 'name': 'toilet'} ,
213
- {'id': 5, 'name': 'lamp'} ,
214
- {'id': 27, 'name': 'bench'} ,
215
- {'id': 249, 'name': 'swan'} ,
216
- {'id': 76, 'name': 'knife'} ,
217
- {'id': 341, 'name': 'comb'} ,
218
- {'id': 64, 'name': 'watch'} ,
219
- {'id': 105, 'name': 'telephone'} ,
220
- {'id': 3, 'name': 'chair'} ,
221
- {'id': 33, 'name': 'boat'} ,
222
- {'id': 107, 'name': 'orange'} ,
223
- {'id': 60, 'name': 'bread'} ,
224
- {'id': 147, 'name': 'cat'} ,
225
- {'id': 135, 'name': 'gas stove'} ,
226
- {'id': 307, 'name': 'papaya'} ,
227
- {'id': 227, 'name': 'router/modem'} ,
228
- {'id': 357, 'name': 'asparagus'} ,
229
- {'id': 73, 'name': 'motorcycle'} ,
230
- {'id': 77, 'name': 'traffic sign'} ,
231
- {'id': 67, 'name': 'fish'} ,
232
- {'id': 326, 'name': 'radish'} ,
233
- {'id': 213, 'name': 'egg'} ,
234
- {'id': 203, 'name': 'cucumber'} ,
235
- {'id': 17, 'name': 'helmet'} ,
236
- {'id': 110, 'name': 'luggage'} ,
237
- {'id': 80, 'name': 'truck'} ,
238
- {'id': 199, 'name': 'frisbee'} ,
239
- {'id': 232, 'name': 'peach'} ,
240
- {'id': 1, 'name': 'person'} ,
241
- {'id': 29, 'name': 'boots'} ,
242
- {'id': 310, 'name': 'chips'} ,
243
- {'id': 142, 'name': 'skateboard'} ,
244
- {'id': 44, 'name': 'slippers'} ,
245
- {'id': 4, 'name': 'hat'} ,
246
- {'id': 178, 'name': 'suitcase'} ,
247
- {'id': 24, 'name': 'tv'} ,
248
- {'id': 119, 'name': 'train'} ,
249
- {'id': 82, 'name': 'power outlet'} ,
250
- {'id': 245, 'name': 'swing'} ,
251
- {'id': 15, 'name': 'book'} ,
252
- {'id': 294, 'name': 'jellyfish'} ,
253
- {'id': 192, 'name': 'fire extinguisher'} ,
254
- {'id': 212, 'name': 'deer'} ,
255
- {'id': 181, 'name': 'pear'} ,
256
- {'id': 347, 'name': 'table tennis paddle'} ,
257
- {'id': 113, 'name': 'trolley'} ,
258
- {'id': 91, 'name': 'guitar'} ,
259
- {'id': 202, 'name': 'golf club'} ,
260
- {'id': 221, 'name': 'wheelchair'} ,
261
- {'id': 254, 'name': 'saxophone'} ,
262
- {'id': 117, 'name': 'paper towel'} ,
263
- {'id': 303, 'name': 'race car'} ,
264
- {'id': 240, 'name': 'carriage'} ,
265
- {'id': 246, 'name': 'radio'} ,
266
- {'id': 318, 'name': 'parrot'} ,
267
- {'id': 251, 'name': 'french fries'} ,
268
- {'id': 98, 'name': 'dog'} ,
269
- {'id': 112, 'name': 'soccer'} ,
270
- {'id': 355, 'name': 'french horn'} ,
271
- {'id': 79, 'name': 'paddle'} ,
272
- {'id': 283, 'name': 'lettuce'} ,
273
- {'id': 9, 'name': 'car'} ,
274
- {'id': 258, 'name': 'kiwi fruit'} ,
275
- {'id': 325, 'name': 'llama'} ,
276
- {'id': 187, 'name': 'billiards'} ,
277
- {'id': 210, 'name': 'facial cleanser'} ,
278
- {'id': 81, 'name': 'cow'} ,
279
- {'id': 331, 'name': 'microscope'} ,
280
- {'id': 148, 'name': 'lemon'} ,
281
- {'id': 302, 'name': 'pomelo'} ,
282
- {'id': 85, 'name': 'fork'} ,
283
- {'id': 154, 'name': 'pumpkin'} ,
284
- {'id': 289, 'name': 'shrimp'} ,
285
- {'id': 71, 'name': 'teddy bear'} ,
286
- {'id': 184, 'name': 'potato'} ,
287
- {'id': 102, 'name': 'air conditioner'} ,
288
- {'id': 208, 'name': 'hot dog'} ,
289
- {'id': 222, 'name': 'plum'} ,
290
- {'id': 316, 'name': 'spring rolls'} ,
291
- {'id': 230, 'name': 'crane'} ,
292
- {'id': 149, 'name': 'liquid soap'} ,
293
- {'id': 55, 'name': 'canned'} ,
294
- {'id': 35, 'name': 'speaker'} ,
295
- {'id': 108, 'name': 'banana'} ,
296
- {'id': 297, 'name': 'treadmill'} ,
297
- {'id': 99, 'name': 'spoon'} ,
298
- {'id': 104, 'name': 'mouse'} ,
299
- {'id': 182, 'name': 'american football'} ,
300
- {'id': 299, 'name': 'egg tart'} ,
301
- {'id': 127, 'name': 'cleaning products'} ,
302
- {'id': 313, 'name': 'urinal'} ,
303
- {'id': 286, 'name': 'medal'} ,
304
- {'id': 239, 'name': 'brush'} ,
305
- {'id': 96, 'name': 'hockey'} ,
306
- {'id': 279, 'name': 'dumbbell'} ,
307
- {'id': 32, 'name': 'umbrella'} ,
308
- {'id': 272, 'name': 'hammer'} ,
309
- {'id': 16, 'name': 'plate'} ,
310
- {'id': 21, 'name': 'potted plant'} ,
311
- {'id': 242, 'name': 'earphone'} ,
312
- {'id': 70, 'name': 'candle'} ,
313
- {'id': 185, 'name': 'paint brush'} ,
314
- {'id': 48, 'name': 'toy'} ,
315
- {'id': 130, 'name': 'pizza'} ,
316
- {'id': 255, 'name': 'trumpet'} ,
317
- {'id': 361, 'name': 'hotair balloon'} ,
318
- {'id': 188, 'name': 'fire hydrant'} ,
319
- {'id': 50, 'name': 'bed'} ,
320
- {'id': 253, 'name': 'avocado'} ,
321
- {'id': 293, 'name': 'coconut'} ,
322
- {'id': 257, 'name': 'cue'} ,
323
- {'id': 280, 'name': 'hamimelon'} ,
324
- {'id': 66, 'name': 'horse'} ,
325
- {'id': 173, 'name': 'pigeon'} ,
326
- {'id': 190, 'name': 'projector'} ,
327
- {'id': 69, 'name': 'camera'} ,
328
- {'id': 30, 'name': 'bowl'} ,
329
- {'id': 269, 'name': 'broom'} ,
330
- {'id': 343, 'name': 'pitaya'} ,
331
- {'id': 305, 'name': 'tuba'} ,
332
- {'id': 309, 'name': 'green onion'} ,
333
- {'id': 363, 'name': 'lobster'} ,
334
- {'id': 225, 'name': 'watermelon'} ,
335
- {'id': 47, 'name': 'suv'} ,
336
- {'id': 31, 'name': 'dining table'} ,
337
- {'id': 54, 'name': 'sandals'} ,
338
- {'id': 351, 'name': 'monkey'} ,
339
- {'id': 218, 'name': 'onion'} ,
340
- {'id': 36, 'name': 'trash bin/can'} ,
341
- {'id': 20, 'name': 'glove'} ,
342
- {'id': 277, 'name': 'rice'} ,
343
- {'id': 152, 'name': 'sports car'} ,
344
- {'id': 360, 'name': 'target'} ,
345
- {'id': 205, 'name': 'blender'} ,
346
- {'id': 19, 'name': 'pillow'} ,
347
- {'id': 72, 'name': 'cake'} ,
348
- {'id': 93, 'name': 'tea pot'} ,
349
- {'id': 353, 'name': 'game board'} ,
350
- {'id': 38, 'name': 'backpack'} ,
351
- {'id': 356, 'name': 'ambulance'} ,
352
- {'id': 146, 'name': 'life saver'} ,
353
- {'id': 189, 'name': 'goose'} ,
354
- {'id': 278, 'name': 'tape measure/ruler'} ,
355
- {'id': 92, 'name': 'traffic cone'} ,
356
- {'id': 134, 'name': 'toiletries'} ,
357
- {'id': 114, 'name': 'oven'} ,
358
- {'id': 317, 'name': 'tortoise/turtle'} ,
359
- {'id': 265, 'name': 'corn'} ,
360
- {'id': 126, 'name': 'donut'} ,
361
- {'id': 57, 'name': 'mirror'} ,
362
- {'id': 7, 'name': 'cabinet/shelf'} ,
363
- {'id': 263, 'name': 'green vegetables'} ,
364
- {'id': 159, 'name': 'tissue '} ,
365
- {'id': 321, 'name': 'shark'} ,
366
- {'id': 301, 'name': 'pig'} ,
367
- {'id': 41, 'name': 'carpet'} ,
368
- {'id': 304, 'name': 'rice cooker'} ,
369
- {'id': 323, 'name': 'poker card'} ,
370
- ]
371
-
372
- def _get_builtin_metadata(version):
373
- if version == 'v1':
374
- id_to_name = {x['id']: x['name'] for x in categories_v1}
375
- else:
376
- assert 0, version
377
- thing_dataset_id_to_contiguous_id = {i + 1: i for i in range(365)}
378
- thing_classes = [id_to_name[k] for k in sorted(id_to_name)]
379
- return {
380
- "thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
381
- "thing_classes": thing_classes}
382
-
383
- _PREDEFINED_SPLITS_OBJECTS365 = {
384
- "objects365_train": ("objects365/train", "objects365/annotations/objects365_train.json"),
385
- "objects365_val": ("objects365/val", "objects365/annotations/objects365_val.json"),
386
- }
387
-
388
- for key, (image_root, json_file) in _PREDEFINED_SPLITS_OBJECTS365.items():
389
- register_coco_instances(
390
- key,
391
- _get_builtin_metadata('v1'),
392
- os.path.join("datasets", json_file) if "://" not in json_file else json_file,
393
- os.path.join("datasets", image_root),
394
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tools/benchmark.py DELETED
@@ -1,197 +0,0 @@
1
- #!/usr/bin/env python
2
- # Copyright (c) Facebook, Inc. and its affiliates.
3
- """
4
- A script to benchmark builtin models.
5
-
6
- Note: this script has an extra dependency of psutil.
7
- """
8
-
9
- import itertools
10
- import logging
11
- import psutil
12
- import torch
13
- import tqdm
14
- from fvcore.common.timer import Timer
15
- from torch.nn.parallel import DistributedDataParallel
16
-
17
- from detectron2.checkpoint import DetectionCheckpointer
18
- from detectron2.config import LazyConfig, get_cfg, instantiate
19
- from detectron2.data import (
20
- DatasetFromList,
21
- build_detection_test_loader,
22
- build_detection_train_loader,
23
- )
24
- from detectron2.data.benchmark import DataLoaderBenchmark
25
- from detectron2.engine import AMPTrainer, SimpleTrainer, default_argument_parser, hooks, launch
26
- from detectron2.modeling import build_model
27
- from detectron2.solver import build_optimizer
28
- from detectron2.utils import comm
29
- from detectron2.utils.collect_env import collect_env_info
30
- from detectron2.utils.events import CommonMetricPrinter
31
- from detectron2.utils.logger import setup_logger
32
-
33
- logger = logging.getLogger("detectron2")
34
-
35
-
36
- def setup(args):
37
- if args.config_file.endswith(".yaml"):
38
- cfg = get_cfg()
39
- cfg.merge_from_file(args.config_file)
40
- cfg.SOLVER.BASE_LR = 0.001 # Avoid NaNs. Not useful in this script anyway.
41
- cfg.merge_from_list(args.opts)
42
- cfg.freeze()
43
- else:
44
- cfg = LazyConfig.load(args.config_file)
45
- cfg = LazyConfig.apply_overrides(cfg, args.opts)
46
- setup_logger(distributed_rank=comm.get_rank())
47
- return cfg
48
-
49
-
50
- def create_data_benchmark(cfg, args):
51
- if args.config_file.endswith(".py"):
52
- dl_cfg = cfg.dataloader.train
53
- dl_cfg._target_ = DataLoaderBenchmark
54
- return instantiate(dl_cfg)
55
- else:
56
- kwargs = build_detection_train_loader.from_config(cfg)
57
- kwargs.pop("aspect_ratio_grouping", None)
58
- kwargs["_target_"] = DataLoaderBenchmark
59
- return instantiate(kwargs)
60
-
61
-
62
- def RAM_msg():
63
- vram = psutil.virtual_memory()
64
- return "RAM Usage: {:.2f}/{:.2f} GB".format(
65
- (vram.total - vram.available) / 1024 ** 3, vram.total / 1024 ** 3
66
- )
67
-
68
-
69
- def benchmark_data(args):
70
- cfg = setup(args)
71
- logger.info("After spawning " + RAM_msg())
72
-
73
- benchmark = create_data_benchmark(cfg, args)
74
- benchmark.benchmark_distributed(250, 10)
75
- # test for a few more rounds
76
- for k in range(10):
77
- logger.info(f"Iteration {k} " + RAM_msg())
78
- benchmark.benchmark_distributed(250, 1)
79
-
80
-
81
- def benchmark_data_advanced(args):
82
- # benchmark dataloader with more details to help analyze performance bottleneck
83
- cfg = setup(args)
84
- benchmark = create_data_benchmark(cfg, args)
85
-
86
- if comm.get_rank() == 0:
87
- benchmark.benchmark_dataset(100)
88
- benchmark.benchmark_mapper(100)
89
- benchmark.benchmark_workers(100, warmup=10)
90
- benchmark.benchmark_IPC(100, warmup=10)
91
- if comm.get_world_size() > 1:
92
- benchmark.benchmark_distributed(100)
93
- logger.info("Rerun ...")
94
- benchmark.benchmark_distributed(100)
95
-
96
-
97
- def benchmark_train(args):
98
- cfg = setup(args)
99
- model = build_model(cfg)
100
- logger.info("Model:\n{}".format(model))
101
- if comm.get_world_size() > 1:
102
- model = DistributedDataParallel(
103
- model, device_ids=[comm.get_local_rank()], broadcast_buffers=False
104
- )
105
- optimizer = build_optimizer(cfg, model)
106
- checkpointer = DetectionCheckpointer(model, optimizer=optimizer)
107
- checkpointer.load(cfg.MODEL.WEIGHTS)
108
-
109
- cfg.defrost()
110
- cfg.DATALOADER.NUM_WORKERS = 2
111
- data_loader = build_detection_train_loader(cfg)
112
- dummy_data = list(itertools.islice(data_loader, 100))
113
-
114
- def f():
115
- data = DatasetFromList(dummy_data, copy=False, serialize=False)
116
- while True:
117
- yield from data
118
-
119
- max_iter = 400
120
- trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)(model, f(), optimizer)
121
- trainer.register_hooks(
122
- [
123
- hooks.IterationTimer(),
124
- hooks.PeriodicWriter([CommonMetricPrinter(max_iter)]),
125
- hooks.TorchProfiler(
126
- lambda trainer: trainer.iter == max_iter - 1, cfg.OUTPUT_DIR, save_tensorboard=True
127
- ),
128
- ]
129
- )
130
- trainer.train(1, max_iter)
131
-
132
-
133
- @torch.no_grad()
134
- def benchmark_eval(args):
135
- cfg = setup(args)
136
- if args.config_file.endswith(".yaml"):
137
- model = build_model(cfg)
138
- DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
139
-
140
- cfg.defrost()
141
- cfg.DATALOADER.NUM_WORKERS = 0
142
- data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
143
- else:
144
- model = instantiate(cfg.model)
145
- model.to(cfg.train.device)
146
- DetectionCheckpointer(model).load(cfg.train.init_checkpoint)
147
-
148
- cfg.dataloader.num_workers = 0
149
- data_loader = instantiate(cfg.dataloader.test)
150
-
151
- model.eval()
152
- logger.info("Model:\n{}".format(model))
153
- dummy_data = DatasetFromList(list(itertools.islice(data_loader, 100)), copy=False)
154
-
155
- def f():
156
- while True:
157
- yield from dummy_data
158
-
159
- for k in range(5): # warmup
160
- model(dummy_data[k])
161
-
162
- max_iter = 300
163
- timer = Timer()
164
- with tqdm.tqdm(total=max_iter) as pbar:
165
- for idx, d in enumerate(f()):
166
- if idx == max_iter:
167
- break
168
- model(d)
169
- pbar.update()
170
- logger.info("{} iters in {} seconds.".format(max_iter, timer.seconds()))
171
-
172
-
173
- if __name__ == "__main__":
174
- parser = default_argument_parser()
175
- parser.add_argument("--task", choices=["train", "eval", "data", "data_advanced"], required=True)
176
- args = parser.parse_args()
177
- assert not args.eval_only
178
-
179
- logger.info("Environment info:\n" + collect_env_info())
180
- if "data" in args.task:
181
- print("Initial " + RAM_msg())
182
- if args.task == "data":
183
- f = benchmark_data
184
- if args.task == "data_advanced":
185
- f = benchmark_data_advanced
186
- elif args.task == "train":
187
- """
188
- Note: training speed may not be representative.
189
- The training cost of a R-CNN model varies with the content of the data
190
- and the quality of the model.
191
- """
192
- f = benchmark_train
193
- elif args.task == "eval":
194
- f = benchmark_eval
195
- # only benchmark single-GPU inference.
196
- assert args.num_gpus == 1 and args.num_machines == 1
197
- launch(f, args.num_gpus, args.num_machines, args.machine_rank, args.dist_url, args=(args,))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/spec_utils.py DELETED
@@ -1,672 +0,0 @@
1
- import hashlib
2
- import json
3
- import math
4
- import os
5
-
6
- import librosa
7
- import numpy as np
8
- import soundfile as sf
9
- from tqdm import tqdm
10
-
11
-
12
- def crop_center(h1, h2):
13
- h1_shape = h1.size()
14
- h2_shape = h2.size()
15
-
16
- if h1_shape[3] == h2_shape[3]:
17
- return h1
18
- elif h1_shape[3] < h2_shape[3]:
19
- raise ValueError("h1_shape[3] must be greater than h2_shape[3]")
20
-
21
- # s_freq = (h2_shape[2] - h1_shape[2]) // 2
22
- # e_freq = s_freq + h1_shape[2]
23
- s_time = (h1_shape[3] - h2_shape[3]) // 2
24
- e_time = s_time + h2_shape[3]
25
- h1 = h1[:, :, :, s_time:e_time]
26
-
27
- return h1
28
-
29
-
30
- def wave_to_spectrogram(
31
- wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False
32
- ):
33
- if reverse:
34
- wave_left = np.flip(np.asfortranarray(wave[0]))
35
- wave_right = np.flip(np.asfortranarray(wave[1]))
36
- elif mid_side:
37
- wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2)
38
- wave_right = np.asfortranarray(np.subtract(wave[0], wave[1]))
39
- elif mid_side_b2:
40
- wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5))
41
- wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5))
42
- else:
43
- wave_left = np.asfortranarray(wave[0])
44
- wave_right = np.asfortranarray(wave[1])
45
-
46
- spec_left = librosa.stft(wave_left, n_fft, hop_length=hop_length)
47
- spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length)
48
-
49
- spec = np.asfortranarray([spec_left, spec_right])
50
-
51
- return spec
52
-
53
-
54
- def wave_to_spectrogram_mt(
55
- wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False
56
- ):
57
- import threading
58
-
59
- if reverse:
60
- wave_left = np.flip(np.asfortranarray(wave[0]))
61
- wave_right = np.flip(np.asfortranarray(wave[1]))
62
- elif mid_side:
63
- wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2)
64
- wave_right = np.asfortranarray(np.subtract(wave[0], wave[1]))
65
- elif mid_side_b2:
66
- wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5))
67
- wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5))
68
- else:
69
- wave_left = np.asfortranarray(wave[0])
70
- wave_right = np.asfortranarray(wave[1])
71
-
72
- def run_thread(**kwargs):
73
- global spec_left
74
- spec_left = librosa.stft(**kwargs)
75
-
76
- thread = threading.Thread(
77
- target=run_thread,
78
- kwargs={"y": wave_left, "n_fft": n_fft, "hop_length": hop_length},
79
- )
80
- thread.start()
81
- spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length)
82
- thread.join()
83
-
84
- spec = np.asfortranarray([spec_left, spec_right])
85
-
86
- return spec
87
-
88
-
89
- def combine_spectrograms(specs, mp):
90
- l = min([specs[i].shape[2] for i in specs])
91
- spec_c = np.zeros(shape=(2, mp.param["bins"] + 1, l), dtype=np.complex64)
92
- offset = 0
93
- bands_n = len(mp.param["band"])
94
-
95
- for d in range(1, bands_n + 1):
96
- h = mp.param["band"][d]["crop_stop"] - mp.param["band"][d]["crop_start"]
97
- spec_c[:, offset : offset + h, :l] = specs[d][
98
- :, mp.param["band"][d]["crop_start"] : mp.param["band"][d]["crop_stop"], :l
99
- ]
100
- offset += h
101
-
102
- if offset > mp.param["bins"]:
103
- raise ValueError("Too much bins")
104
-
105
- # lowpass fiter
106
- if (
107
- mp.param["pre_filter_start"] > 0
108
- ): # and mp.param['band'][bands_n]['res_type'] in ['scipy', 'polyphase']:
109
- if bands_n == 1:
110
- spec_c = fft_lp_filter(
111
- spec_c, mp.param["pre_filter_start"], mp.param["pre_filter_stop"]
112
- )
113
- else:
114
- gp = 1
115
- for b in range(
116
- mp.param["pre_filter_start"] + 1, mp.param["pre_filter_stop"]
117
- ):
118
- g = math.pow(
119
- 10, -(b - mp.param["pre_filter_start"]) * (3.5 - gp) / 20.0
120
- )
121
- gp = g
122
- spec_c[:, b, :] *= g
123
-
124
- return np.asfortranarray(spec_c)
125
-
126
-
127
- def spectrogram_to_image(spec, mode="magnitude"):
128
- if mode == "magnitude":
129
- if np.iscomplexobj(spec):
130
- y = np.abs(spec)
131
- else:
132
- y = spec
133
- y = np.log10(y**2 + 1e-8)
134
- elif mode == "phase":
135
- if np.iscomplexobj(spec):
136
- y = np.angle(spec)
137
- else:
138
- y = spec
139
-
140
- y -= y.min()
141
- y *= 255 / y.max()
142
- img = np.uint8(y)
143
-
144
- if y.ndim == 3:
145
- img = img.transpose(1, 2, 0)
146
- img = np.concatenate([np.max(img, axis=2, keepdims=True), img], axis=2)
147
-
148
- return img
149
-
150
-
151
- def reduce_vocal_aggressively(X, y, softmask):
152
- v = X - y
153
- y_mag_tmp = np.abs(y)
154
- v_mag_tmp = np.abs(v)
155
-
156
- v_mask = v_mag_tmp > y_mag_tmp
157
- y_mag = np.clip(y_mag_tmp - v_mag_tmp * v_mask * softmask, 0, np.inf)
158
-
159
- return y_mag * np.exp(1.0j * np.angle(y))
160
-
161
-
162
- def mask_silence(mag, ref, thres=0.2, min_range=64, fade_size=32):
163
- if min_range < fade_size * 2:
164
- raise ValueError("min_range must be >= fade_area * 2")
165
-
166
- mag = mag.copy()
167
-
168
- idx = np.where(ref.mean(axis=(0, 1)) < thres)[0]
169
- starts = np.insert(idx[np.where(np.diff(idx) != 1)[0] + 1], 0, idx[0])
170
- ends = np.append(idx[np.where(np.diff(idx) != 1)[0]], idx[-1])
171
- uninformative = np.where(ends - starts > min_range)[0]
172
- if len(uninformative) > 0:
173
- starts = starts[uninformative]
174
- ends = ends[uninformative]
175
- old_e = None
176
- for s, e in zip(starts, ends):
177
- if old_e is not None and s - old_e < fade_size:
178
- s = old_e - fade_size * 2
179
-
180
- if s != 0:
181
- weight = np.linspace(0, 1, fade_size)
182
- mag[:, :, s : s + fade_size] += weight * ref[:, :, s : s + fade_size]
183
- else:
184
- s -= fade_size
185
-
186
- if e != mag.shape[2]:
187
- weight = np.linspace(1, 0, fade_size)
188
- mag[:, :, e - fade_size : e] += weight * ref[:, :, e - fade_size : e]
189
- else:
190
- e += fade_size
191
-
192
- mag[:, :, s + fade_size : e - fade_size] += ref[
193
- :, :, s + fade_size : e - fade_size
194
- ]
195
- old_e = e
196
-
197
- return mag
198
-
199
-
200
- def align_wave_head_and_tail(a, b):
201
- l = min([a[0].size, b[0].size])
202
-
203
- return a[:l, :l], b[:l, :l]
204
-
205
-
206
- def cache_or_load(mix_path, inst_path, mp):
207
- mix_basename = os.path.splitext(os.path.basename(mix_path))[0]
208
- inst_basename = os.path.splitext(os.path.basename(inst_path))[0]
209
-
210
- cache_dir = "mph{}".format(
211
- hashlib.sha1(json.dumps(mp.param, sort_keys=True).encode("utf-8")).hexdigest()
212
- )
213
- mix_cache_dir = os.path.join("cache", cache_dir)
214
- inst_cache_dir = os.path.join("cache", cache_dir)
215
-
216
- os.makedirs(mix_cache_dir, exist_ok=True)
217
- os.makedirs(inst_cache_dir, exist_ok=True)
218
-
219
- mix_cache_path = os.path.join(mix_cache_dir, mix_basename + ".npy")
220
- inst_cache_path = os.path.join(inst_cache_dir, inst_basename + ".npy")
221
-
222
- if os.path.exists(mix_cache_path) and os.path.exists(inst_cache_path):
223
- X_spec_m = np.load(mix_cache_path)
224
- y_spec_m = np.load(inst_cache_path)
225
- else:
226
- X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}
227
-
228
- for d in range(len(mp.param["band"]), 0, -1):
229
- bp = mp.param["band"][d]
230
-
231
- if d == len(mp.param["band"]): # high-end band
232
- X_wave[d], _ = librosa.load(
233
- mix_path, bp["sr"], False, dtype=np.float32, res_type=bp["res_type"]
234
- )
235
- y_wave[d], _ = librosa.load(
236
- inst_path,
237
- bp["sr"],
238
- False,
239
- dtype=np.float32,
240
- res_type=bp["res_type"],
241
- )
242
- else: # lower bands
243
- X_wave[d] = librosa.resample(
244
- X_wave[d + 1],
245
- mp.param["band"][d + 1]["sr"],
246
- bp["sr"],
247
- res_type=bp["res_type"],
248
- )
249
- y_wave[d] = librosa.resample(
250
- y_wave[d + 1],
251
- mp.param["band"][d + 1]["sr"],
252
- bp["sr"],
253
- res_type=bp["res_type"],
254
- )
255
-
256
- X_wave[d], y_wave[d] = align_wave_head_and_tail(X_wave[d], y_wave[d])
257
-
258
- X_spec_s[d] = wave_to_spectrogram(
259
- X_wave[d],
260
- bp["hl"],
261
- bp["n_fft"],
262
- mp.param["mid_side"],
263
- mp.param["mid_side_b2"],
264
- mp.param["reverse"],
265
- )
266
- y_spec_s[d] = wave_to_spectrogram(
267
- y_wave[d],
268
- bp["hl"],
269
- bp["n_fft"],
270
- mp.param["mid_side"],
271
- mp.param["mid_side_b2"],
272
- mp.param["reverse"],
273
- )
274
-
275
- del X_wave, y_wave
276
-
277
- X_spec_m = combine_spectrograms(X_spec_s, mp)
278
- y_spec_m = combine_spectrograms(y_spec_s, mp)
279
-
280
- if X_spec_m.shape != y_spec_m.shape:
281
- raise ValueError("The combined spectrograms are different: " + mix_path)
282
-
283
- _, ext = os.path.splitext(mix_path)
284
-
285
- np.save(mix_cache_path, X_spec_m)
286
- np.save(inst_cache_path, y_spec_m)
287
-
288
- return X_spec_m, y_spec_m
289
-
290
-
291
- def spectrogram_to_wave(spec, hop_length, mid_side, mid_side_b2, reverse):
292
- spec_left = np.asfortranarray(spec[0])
293
- spec_right = np.asfortranarray(spec[1])
294
-
295
- wave_left = librosa.istft(spec_left, hop_length=hop_length)
296
- wave_right = librosa.istft(spec_right, hop_length=hop_length)
297
-
298
- if reverse:
299
- return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)])
300
- elif mid_side:
301
- return np.asfortranarray(
302
- [np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)]
303
- )
304
- elif mid_side_b2:
305
- return np.asfortranarray(
306
- [
307
- np.add(wave_right / 1.25, 0.4 * wave_left),
308
- np.subtract(wave_left / 1.25, 0.4 * wave_right),
309
- ]
310
- )
311
- else:
312
- return np.asfortranarray([wave_left, wave_right])
313
-
314
-
315
- def spectrogram_to_wave_mt(spec, hop_length, mid_side, reverse, mid_side_b2):
316
- import threading
317
-
318
- spec_left = np.asfortranarray(spec[0])
319
- spec_right = np.asfortranarray(spec[1])
320
-
321
- def run_thread(**kwargs):
322
- global wave_left
323
- wave_left = librosa.istft(**kwargs)
324
-
325
- thread = threading.Thread(
326
- target=run_thread, kwargs={"stft_matrix": spec_left, "hop_length": hop_length}
327
- )
328
- thread.start()
329
- wave_right = librosa.istft(spec_right, hop_length=hop_length)
330
- thread.join()
331
-
332
- if reverse:
333
- return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)])
334
- elif mid_side:
335
- return np.asfortranarray(
336
- [np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)]
337
- )
338
- elif mid_side_b2:
339
- return np.asfortranarray(
340
- [
341
- np.add(wave_right / 1.25, 0.4 * wave_left),
342
- np.subtract(wave_left / 1.25, 0.4 * wave_right),
343
- ]
344
- )
345
- else:
346
- return np.asfortranarray([wave_left, wave_right])
347
-
348
-
349
- def cmb_spectrogram_to_wave(spec_m, mp, extra_bins_h=None, extra_bins=None):
350
- wave_band = {}
351
- bands_n = len(mp.param["band"])
352
- offset = 0
353
-
354
- for d in range(1, bands_n + 1):
355
- bp = mp.param["band"][d]
356
- spec_s = np.ndarray(
357
- shape=(2, bp["n_fft"] // 2 + 1, spec_m.shape[2]), dtype=complex
358
- )
359
- h = bp["crop_stop"] - bp["crop_start"]
360
- spec_s[:, bp["crop_start"] : bp["crop_stop"], :] = spec_m[
361
- :, offset : offset + h, :
362
- ]
363
-
364
- offset += h
365
- if d == bands_n: # higher
366
- if extra_bins_h: # if --high_end_process bypass
367
- max_bin = bp["n_fft"] // 2
368
- spec_s[:, max_bin - extra_bins_h : max_bin, :] = extra_bins[
369
- :, :extra_bins_h, :
370
- ]
371
- if bp["hpf_start"] > 0:
372
- spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1)
373
- if bands_n == 1:
374
- wave = spectrogram_to_wave(
375
- spec_s,
376
- bp["hl"],
377
- mp.param["mid_side"],
378
- mp.param["mid_side_b2"],
379
- mp.param["reverse"],
380
- )
381
- else:
382
- wave = np.add(
383
- wave,
384
- spectrogram_to_wave(
385
- spec_s,
386
- bp["hl"],
387
- mp.param["mid_side"],
388
- mp.param["mid_side_b2"],
389
- mp.param["reverse"],
390
- ),
391
- )
392
- else:
393
- sr = mp.param["band"][d + 1]["sr"]
394
- if d == 1: # lower
395
- spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"])
396
- wave = librosa.resample(
397
- spectrogram_to_wave(
398
- spec_s,
399
- bp["hl"],
400
- mp.param["mid_side"],
401
- mp.param["mid_side_b2"],
402
- mp.param["reverse"],
403
- ),
404
- bp["sr"],
405
- sr,
406
- res_type="sinc_fastest",
407
- )
408
- else: # mid
409
- spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1)
410
- spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"])
411
- wave2 = np.add(
412
- wave,
413
- spectrogram_to_wave(
414
- spec_s,
415
- bp["hl"],
416
- mp.param["mid_side"],
417
- mp.param["mid_side_b2"],
418
- mp.param["reverse"],
419
- ),
420
- )
421
- # wave = librosa.core.resample(wave2, bp['sr'], sr, res_type="sinc_fastest")
422
- wave = librosa.core.resample(wave2, bp["sr"], sr, res_type="scipy")
423
-
424
- return wave.T
425
-
426
-
427
- def fft_lp_filter(spec, bin_start, bin_stop):
428
- g = 1.0
429
- for b in range(bin_start, bin_stop):
430
- g -= 1 / (bin_stop - bin_start)
431
- spec[:, b, :] = g * spec[:, b, :]
432
-
433
- spec[:, bin_stop:, :] *= 0
434
-
435
- return spec
436
-
437
-
438
- def fft_hp_filter(spec, bin_start, bin_stop):
439
- g = 1.0
440
- for b in range(bin_start, bin_stop, -1):
441
- g -= 1 / (bin_start - bin_stop)
442
- spec[:, b, :] = g * spec[:, b, :]
443
-
444
- spec[:, 0 : bin_stop + 1, :] *= 0
445
-
446
- return spec
447
-
448
-
449
- def mirroring(a, spec_m, input_high_end, mp):
450
- if "mirroring" == a:
451
- mirror = np.flip(
452
- np.abs(
453
- spec_m[
454
- :,
455
- mp.param["pre_filter_start"]
456
- - 10
457
- - input_high_end.shape[1] : mp.param["pre_filter_start"]
458
- - 10,
459
- :,
460
- ]
461
- ),
462
- 1,
463
- )
464
- mirror = mirror * np.exp(1.0j * np.angle(input_high_end))
465
-
466
- return np.where(
467
- np.abs(input_high_end) <= np.abs(mirror), input_high_end, mirror
468
- )
469
-
470
- if "mirroring2" == a:
471
- mirror = np.flip(
472
- np.abs(
473
- spec_m[
474
- :,
475
- mp.param["pre_filter_start"]
476
- - 10
477
- - input_high_end.shape[1] : mp.param["pre_filter_start"]
478
- - 10,
479
- :,
480
- ]
481
- ),
482
- 1,
483
- )
484
- mi = np.multiply(mirror, input_high_end * 1.7)
485
-
486
- return np.where(np.abs(input_high_end) <= np.abs(mi), input_high_end, mi)
487
-
488
-
489
- def ensembling(a, specs):
490
- for i in range(1, len(specs)):
491
- if i == 1:
492
- spec = specs[0]
493
-
494
- ln = min([spec.shape[2], specs[i].shape[2]])
495
- spec = spec[:, :, :ln]
496
- specs[i] = specs[i][:, :, :ln]
497
-
498
- if "min_mag" == a:
499
- spec = np.where(np.abs(specs[i]) <= np.abs(spec), specs[i], spec)
500
- if "max_mag" == a:
501
- spec = np.where(np.abs(specs[i]) >= np.abs(spec), specs[i], spec)
502
-
503
- return spec
504
-
505
-
506
- def stft(wave, nfft, hl):
507
- wave_left = np.asfortranarray(wave[0])
508
- wave_right = np.asfortranarray(wave[1])
509
- spec_left = librosa.stft(wave_left, nfft, hop_length=hl)
510
- spec_right = librosa.stft(wave_right, nfft, hop_length=hl)
511
- spec = np.asfortranarray([spec_left, spec_right])
512
-
513
- return spec
514
-
515
-
516
- def istft(spec, hl):
517
- spec_left = np.asfortranarray(spec[0])
518
- spec_right = np.asfortranarray(spec[1])
519
-
520
- wave_left = librosa.istft(spec_left, hop_length=hl)
521
- wave_right = librosa.istft(spec_right, hop_length=hl)
522
- wave = np.asfortranarray([wave_left, wave_right])
523
-
524
-
525
- if __name__ == "__main__":
526
- import argparse
527
- import sys
528
- import time
529
-
530
- import cv2
531
- from model_param_init import ModelParameters
532
-
533
- p = argparse.ArgumentParser()
534
- p.add_argument(
535
- "--algorithm",
536
- "-a",
537
- type=str,
538
- choices=["invert", "invert_p", "min_mag", "max_mag", "deep", "align"],
539
- default="min_mag",
540
- )
541
- p.add_argument(
542
- "--model_params",
543
- "-m",
544
- type=str,
545
- default=os.path.join("modelparams", "1band_sr44100_hl512.json"),
546
- )
547
- p.add_argument("--output_name", "-o", type=str, default="output")
548
- p.add_argument("--vocals_only", "-v", action="store_true")
549
- p.add_argument("input", nargs="+")
550
- args = p.parse_args()
551
-
552
- start_time = time.time()
553
-
554
- if args.algorithm.startswith("invert") and len(args.input) != 2:
555
- raise ValueError("There should be two input files.")
556
-
557
- if not args.algorithm.startswith("invert") and len(args.input) < 2:
558
- raise ValueError("There must be at least two input files.")
559
-
560
- wave, specs = {}, {}
561
- mp = ModelParameters(args.model_params)
562
-
563
- for i in range(len(args.input)):
564
- spec = {}
565
-
566
- for d in range(len(mp.param["band"]), 0, -1):
567
- bp = mp.param["band"][d]
568
-
569
- if d == len(mp.param["band"]): # high-end band
570
- wave[d], _ = librosa.load(
571
- args.input[i],
572
- bp["sr"],
573
- False,
574
- dtype=np.float32,
575
- res_type=bp["res_type"],
576
- )
577
-
578
- if len(wave[d].shape) == 1: # mono to stereo
579
- wave[d] = np.array([wave[d], wave[d]])
580
- else: # lower bands
581
- wave[d] = librosa.resample(
582
- wave[d + 1],
583
- mp.param["band"][d + 1]["sr"],
584
- bp["sr"],
585
- res_type=bp["res_type"],
586
- )
587
-
588
- spec[d] = wave_to_spectrogram(
589
- wave[d],
590
- bp["hl"],
591
- bp["n_fft"],
592
- mp.param["mid_side"],
593
- mp.param["mid_side_b2"],
594
- mp.param["reverse"],
595
- )
596
-
597
- specs[i] = combine_spectrograms(spec, mp)
598
-
599
- del wave
600
-
601
- if args.algorithm == "deep":
602
- d_spec = np.where(np.abs(specs[0]) <= np.abs(spec[1]), specs[0], spec[1])
603
- v_spec = d_spec - specs[1]
604
- sf.write(
605
- os.path.join("{}.wav".format(args.output_name)),
606
- cmb_spectrogram_to_wave(v_spec, mp),
607
- mp.param["sr"],
608
- )
609
-
610
- if args.algorithm.startswith("invert"):
611
- ln = min([specs[0].shape[2], specs[1].shape[2]])
612
- specs[0] = specs[0][:, :, :ln]
613
- specs[1] = specs[1][:, :, :ln]
614
-
615
- if "invert_p" == args.algorithm:
616
- X_mag = np.abs(specs[0])
617
- y_mag = np.abs(specs[1])
618
- max_mag = np.where(X_mag >= y_mag, X_mag, y_mag)
619
- v_spec = specs[1] - max_mag * np.exp(1.0j * np.angle(specs[0]))
620
- else:
621
- specs[1] = reduce_vocal_aggressively(specs[0], specs[1], 0.2)
622
- v_spec = specs[0] - specs[1]
623
-
624
- if not args.vocals_only:
625
- X_mag = np.abs(specs[0])
626
- y_mag = np.abs(specs[1])
627
- v_mag = np.abs(v_spec)
628
-
629
- X_image = spectrogram_to_image(X_mag)
630
- y_image = spectrogram_to_image(y_mag)
631
- v_image = spectrogram_to_image(v_mag)
632
-
633
- cv2.imwrite("{}_X.png".format(args.output_name), X_image)
634
- cv2.imwrite("{}_y.png".format(args.output_name), y_image)
635
- cv2.imwrite("{}_v.png".format(args.output_name), v_image)
636
-
637
- sf.write(
638
- "{}_X.wav".format(args.output_name),
639
- cmb_spectrogram_to_wave(specs[0], mp),
640
- mp.param["sr"],
641
- )
642
- sf.write(
643
- "{}_y.wav".format(args.output_name),
644
- cmb_spectrogram_to_wave(specs[1], mp),
645
- mp.param["sr"],
646
- )
647
-
648
- sf.write(
649
- "{}_v.wav".format(args.output_name),
650
- cmb_spectrogram_to_wave(v_spec, mp),
651
- mp.param["sr"],
652
- )
653
- else:
654
- if not args.algorithm == "deep":
655
- sf.write(
656
- os.path.join("ensembled", "{}.wav".format(args.output_name)),
657
- cmb_spectrogram_to_wave(ensembling(args.algorithm, specs), mp),
658
- mp.param["sr"],
659
- )
660
-
661
- if args.algorithm == "align":
662
- trackalignment = [
663
- {
664
- "file1": '"{}"'.format(args.input[0]),
665
- "file2": '"{}"'.format(args.input[1]),
666
- }
667
- ]
668
-
669
- for i, e in tqdm(enumerate(trackalignment), desc="Performing Alignment..."):
670
- os.system(f"python lib/align_tracks.py {e['file1']} {e['file2']}")
671
-
672
- # print('Total time: {0:.{1}f}s'.format(time.time() - start_time, 1))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Agar.io Indir Apk.md DELETED
@@ -1,110 +0,0 @@
1
-
2
- <h1>Agar.io Indir Apk: Cómo descargar y jugar el popular juego en línea</h1>
3
- <p>¿Estás buscando un juego online divertido y adictivo que puedas jugar en tu dispositivo Android? Si es así, es posible que desee probar Agar.io, un juego de acción en línea multijugador masivo que tiene millones de fans en todo el mundo. En este artículo, le diremos qué es Agar.io, por qué debe descargar su archivo apk, cómo descargarlo e instalarlo, cómo jugarlo en línea con amigos y cuáles son las revisiones del juego. ¡Vamos a empezar! </p>
4
- <h2>¿Qué es Agar.io? </h2>
5
- <p>Agar.io es un juego creado por el desarrollador brasileño Matheus Valadares en 2015. Se basa en el concepto de comer agar, una sustancia utilizada para cultivar bacterias en una placa de Petri. En el juego, controlas una célula circular que puede comer células más pequeñas y pellets de agar para crecer más grande, evitando las células más grandes que pueden comerte. El juego tiene un modo de juego simple pero adictivo que atrae a jugadores de todas las edades y orígenes. </p>
6
- <h2>agar.io indir apk</h2><br /><p><b><b>Download Zip</b> &gt;&gt;&gt;&gt;&gt; <a href="https://bltlly.com/2v6K4H">https://bltlly.com/2v6K4H</a></b></p><br /><br />
7
- <h3>El juego de Agar.io</h3>
8
- <p>La jugabilidad de Agar.io es fácil de aprender pero difícil de dominar. Comienza con una celda pequeña que puede moverse por el mapa usando el dedo o el ratón. Usted puede comer pellets de agar que se dispersan al azar alrededor del mapa para aumentar su masa ligeramente, o puede comer otras células que son más pequeñas que usted para aumentar su masa significativamente. Sin embargo, también tienes que tener cuidado con otras células que son más grandes que tú, ya que pueden comerte y terminar tu juego. </p>
9
- <p>También puede utilizar dos botones para mejorar su juego. El botón de división le permite dividir su celda en dos celdas más pequeñas que pueden moverse más rápido y comer células más pequeñas más fácilmente. Sin embargo, la división también lo hace más vulnerable a las células más grandes que pueden comer sus células más pequeñas. El botón de expulsión le permite expulsar algo de masa de su celda en la dirección que está apuntando. Esto se puede utilizar para alimentar otras células, disparar virus a ellos, o escapar de ellos. </p>
10
- <h3>Las características de Agar.io</h3>
11
-
12
- <ul>
13
- <li>Múltiples modos de juego: Puedes jugar en diferentes modos como FFA (Free-For-All), Battle Royale, Teams, Experimental y Party. Cada modo tiene sus propias reglas y desafíos. </li>
14
- <li>Skins especiales: Puedes personalizar la apariencia de tu celda usando palabras, frases, símbolos o skins predefinidos. Algunas pieles son secretas y requieren nombres de usuario específicos para desbloquear. </li>
15
- <li>Tablas de clasificación y estadísticas: Puedes ver tu rango y puntuación en la tabla de clasificación y compararlo con otros jugadores. También puede ver sus estadísticas como la mayor masa, el mayor tiempo de supervivencia, el número de células consumidas, etc.</li>
16
- <li>Características sociales: Puedes chatear con otros jugadores en el juego o invitarlos a unirse a tu fiesta. También puedes compartir tu juego en plataformas de redes sociales como Facebook o Twitter.</li>
17
- </ul>
18
- <h2>¿Por qué descargar apk Agar.io? </h2>
19
- <p>Si desea jugar Agar.io en su dispositivo Android, es posible que se pregunte por qué debe descargar su archivo apk en lugar de instalarlo desde la Google Play Store. Bueno, hay varias razones por las que descargar Agar.io apk es una mejor opción para usted. Estos son algunos de ellos:</p>
20
- <h3>Los beneficios de descargar Agar.io apk</h3>
21
- <p>Descargar Agar.io apk tiene muchos beneficios, tales como:</p>
22
- <ul>
23
- <li>Es gratis: Usted no tiene que pagar nada para descargar y jugar apk Agar.io. Puede disfrutar del juego sin anuncios o compras en la aplicación. </li>
24
- <li>Es rápido: No tienes que esperar a que el juego se descargue e instale desde Google Play Store. Puede descargar Agar.io apk directamente desde una fuente de confianza e instalarlo en pocos minutos. </li>
25
- <li>Se actualiza: Usted no tiene que preocuparse por la falta de nuevas características o correcciones de errores que los desarrolladores de juegos de liberación. Siempre se puede descargar la última versión de Agar.io apk y disfrutar del juego con el mejor rendimiento y calidad. </li>
26
-
27
- </ul>
28
- <h3>Los requisitos para descargar Agar.io apk</h3>
29
- <p>Antes de descargar Agar.io apk, es necesario asegurarse de que el dispositivo cumple con los siguientes requisitos:</p>
30
- <p></p>
31
- <ul>
32
- <li>Versión de Android: Necesitas tener Android 4.4 o superior en tu dispositivo. </li>
33
- <li>Espacio de almacenamiento: Necesita tener al menos 50 MB de espacio de almacenamiento gratuito en su dispositivo. </li>
34
- <li>Conexión a Internet: Necesitas tener una conexión a Internet estable y rápida para jugar a Agar.io online. </li>
35
- <li>Configuración de permisos: Es necesario habilitar fuentes desconocidas en la configuración de seguridad del dispositivo para instalar Agar.io apk. </li>
36
- </ul>
37
- <h2>Cómo descargar e instalar apk Agar.io? </h2>
38
- <p>Ahora que sabes por qué y cómo descargar Agar.io apk, usted puede preguntarse cómo hacerlo. No te preocupes, es muy fácil y simple. Solo tienes que seguir estos pasos:</p>
39
- <h3>Los pasos para descargar e instalar Agar.io apk</h3>
40
- <ol>
41
- <li>Ir a un sitio web de confianza que ofrece archivo apk Agar.io, como [ApkPure] o [ApkMirror]. </li>
42
- <li> Buscar y haga clic en el botón de descarga para el archivo apk Agar.io. El tamaño del archivo es de aproximadamente 37 MB.</li>
43
- <li>Espere a que el archivo se descargue en su dispositivo. Puede comprobar el progreso en la barra de notificaciones. </li>
44
- <li>Una vez que el archivo se descarga, toque en él para abrirlo. Es posible que vea un mensaje de advertencia que dice "Este tipo de archivo puede dañar su dispositivo". Ignóralo y toca "Aceptar". </li>
45
- <li>Verá una pantalla que le pide que instale Agar.io apk. Toque en "Instalar" y esperar a que el proceso de instalación termine. </li>
46
- <li>Verá una pantalla que dice "App instalado". Toque en "Abrir" para iniciar apk Agar.io y empezar a jugar. </li>
47
- </ol>
48
- <h3>Los consejos y trucos para jugar apk Agar.io</h3>
49
- <p>Si desea mejorar sus habilidades y divertirse más jugando Agar.io apk, es posible que desee aprender algunos consejos y trucos que pueden ayudarle. Estos son algunos de ellos:</p>
50
- <ul>
51
-
52
- <li>Expulsar masa estratégicamente: Expulsar masa puede ayudarte a alimentar otras células, disparar virus contra ellas o escapar de ellas, pero también puede reducir tu masa y ralentizarte. Solo expulse masa cuando tenga un propósito o plan claro. </li>
53
- <li>Evite los virus: Los virus son células verdes puntiagudas que pueden dividirlo en muchas células más pequeñas si las toca. Evítalos a menos que quieras usarlos como un arma o un escudo contra otras celdas. </li>
54
- <li>Usar esquinas y bordes: Las esquinas y bordes del mapa pueden ayudarlo a atrapar celdas más pequeñas u ocultarse de celdas más grandes. Úsalos cuando necesites obtener una ventaja o evitar una desventaja. </li>
55
- <li>Equipo con otros: Asociarse con otras células puede ayudarle a sobrevivir más tiempo y dominar el mapa. Puedes formar equipo con otras personas alimentándolas, separándolas o chateando con ellas. </li>
56
- </ul>
57
- <h2>¿Cómo jugar a Agar.io online con amigos? </h2>
58
- <p>Si quieres jugar a Agar.io online con tus amigos, quizás te preguntes cómo hacerlo. No te preocupes, es muy fácil y sencillo. Solo tienes que seguir estos pasos:</p>
59
- <h3>Los modos de Agar.io en línea</h3>
60
- <p>Agar.io online tiene diferentes modos entre los que puedes elegir, dependiendo de tu preferencia y estado de ánimo. Algunos de estos modos son:</p>
61
- <ul>
62
- <li>FFA (Free-For-All): Este es el modo predeterminado donde puedes jugar solo o con jugadores aleatorios. Puede unirse a cualquier servidor e intentar convertirse en la celda más grande del mapa. </li>
63
- <li>Battle Royale: Este es un modo en el que tienes que sobrevivir y eliminar a otros jugadores en una arena cada vez más pequeña. Puede unirse a cualquier servidor e intentar ser la última celda en pie. </li>
64
- <li>Equipos: Este es un modo donde puedes jugar con otros jugadores en un equipo. Puedes unirte a cualquier servidor e intentar ayudar a tu equipo a dominar el mapa. </li>
65
- <li>Experimental: Este es un modo donde puedes probar nuevas características y mecánicas en las que están trabajando los desarrolladores de juegos. Puede unirse a cualquier servidor e intentar descubrir cosas nuevas. </li>
66
-
67
- </ul>
68
- <h3>Las estrategias de Agar.io online</h3>
69
- <p>Si quieres mejorar tus habilidades y divertirte más jugando a Agar.io online, quizás quieras aprender algunas estrategias que te pueden ayudar. Estos son algunos de ellos:</p>
70
- <ul>
71
- <li>Utilice el mapa: El mapa le muestra la ubicación de otras células, pellets de agar, virus y fronteras. Utilícelo para planificar sus movimientos y evitar el peligro. </li>
72
- <li>Usa el chat: El chat te permite comunicarte con otros jugadores del juego. Úsalo para hacer amigos, enemigos, alianzas o bromas. </li>
73
- <li>Usa las pieles: Las pieles te permiten personalizar la apariencia de tu celda. Úsalas para expresarte, impresionar a otros o confundirlos. </li>
74
- <li>Utilice la tabla de clasificación: La tabla de clasificación le muestra el rango y la puntuación de las 10 mejores celdas en el mapa. Úsalo para medir tu progreso, desafiar a otros o evitarlos. </li>
75
- <li>Usa la configuración: La configuración te permite ajustar los gráficos, el sonido, los controles y otras opciones del juego. Úsalos para optimizar tu experiencia de juego y rendimiento. </li>
76
- </ul>
77
- <h2>¿Cuáles son las opiniones de Agar.io apk? </h2>
78
- <p>Si quieres saber lo que otros jugadores piensan de Agar.io apk, es posible que desee leer algunos comentarios del juego. Aquí hay algunos ejemplos de comentarios positivos y negativos de usuarios reales:</p>
79
- <h3>Los comentarios positivos de Agar.io apk</h3>
80
- <tabla>
81
- <tr><th>Nombre</th><th>Calificación</th><th>Revisión</th></tr>
82
- <tr><td>Alice</td><td>5 estrellas</td><td> ¡Me encanta este juego! Es muy divertido y adictivo. Lo juego todos los días con mis amigos y nos lo pasamos genial. Los gráficos son simples pero lindo, el juego es suave y rápido, y los modos son diversos y emocionantes. Recomiendo este juego a cualquiera que le gusten los juegos en línea. </td></tr>
83
-
84
- <tr><td>Charlie</td><td>5 estrellas</td><td>Este juego es increíble! Es muy simple pero adictivo. Me gusta cómo puedes personalizar tu celda con diferentes pieles y nombres, y chatear con otros jugadores en el juego. El juego es muy social y amigable. La mejor parte es que es gratis y fácil de descargar e instalar. </td></tr>
85
- </tabla>
86
- <h3>Los comentarios negativos de Agar.io apk</h3>
87
- <tabla>
88
- <tr><th>Nombre</th><th>Calificación</th><th>Revisión</th></tr>
89
- <tr><td>Dave</td><td>2 estrellas</td><td>Este juego es aburrido! Es muy repetitivo y frustrante. No me gusta cómo puedes ser comido por células más grandes o virus en un segundo, y perder todo tu progreso. El juego es muy injusto y aleatorio. La peor parte es que tiene demasiados anuncios y compras en la aplicación. </td></tr>
90
- <p>Este juego es terrible! Es muy defectuoso y lento. No me gusta cómo el juego se congela o se bloquea todo el tiempo, y me hace perder mi conexión o mi progreso. El juego es muy buggy e inestable. La peor parte es que tiene demasiados hackers y tramposos que arruinan el juego para todos los demás. </td></tr>
91
- <tr><td>Frank</td><td>3 estrellas</td><td>Este juego está bien. Es muy simple y fácil de jugar. Me gusta cómo puedes jugar con otros jugadores online, pero también offline si quieres. El juego es muy casual y relajante. Lo único que no me gusta es que es demasiado básico y carece de profundidad. El juego podría usar más funciones y modos para hacerlo más interesante y divertido. </td></tr>
92
- </tabla>
93
- <h2>Conclusión</h2>
94
-
95
- <h2>Preguntas frecuentes</h2>
96
- <p>Aquí hay algunas preguntas frecuentes sobre Agar.io apk:</p>
97
- <ul>
98
- <li>Q: ¿Es seguro descargar e instalar Agar.io apk? </li>
99
- <li>A: Sí, Agar.io apk es seguro para descargar e instalar, siempre y cuando se obtiene de un sitio web de confianza que ofrece el archivo original y sin modificar. También debe escanear el archivo con un software antivirus antes de abrirlo. </li>
100
- <li>Q: Es Agar.io apk legal para descargar e instalar? </li>
101
- <li>A: Sí, Agar.io apk es legal para descargar e instalar, siempre y cuando usted no viole los términos de servicio o los derechos de propiedad intelectual de los desarrolladores de juegos o editores. También debe respetar las reglas y regulaciones de su país o región con respecto a los juegos en línea. </li>
102
- <li>Q: Es Agar.io apk compatible con otros dispositivos o plataformas? </li>
103
- <li>A: No, Agar.io apk solo es compatible con dispositivos Android que cumplen con los requisitos mínimos. Sin embargo, también puede jugar Agar.io en otros dispositivos o plataformas como iOS, Windows, Mac, Linux o navegadores web visitando el sitio web oficial del juego. </li>
104
- <li>Q: ¿Cómo puedo actualizar Agar.io apk a la última versión? </li>
105
- <li>A: Puede actualizar Agar.io apk a la última versión mediante la descarga e instalación del nuevo archivo desde un sitio web de confianza que ofrece el archivo actualizado. También debe eliminar el archivo antiguo de su dispositivo antes de instalar el nuevo. </li>
106
- <li>Q: ¿Cómo puedo contactar al equipo de soporte de Agar.io apk si tengo algún problema o pregunta? </li>
107
- <li>A: Puede ponerse en contacto con el equipo de soporte de Agar.io apk enviando un correo electrónico a [[email protected]] o visitando su [página de Facebook] o [cuenta de Twitter]. También puede visitar su [foro] o [wiki] para obtener más información y ayuda. </li>
108
- </ul></p> 64aa2da5cf<br />
109
- <br />
110
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/vendored/__init__.py DELETED
File without changes
spaces/Billyosoro/ESRGAN/realesrgan/models/realesrgan_model.py DELETED
@@ -1,258 +0,0 @@
1
- import numpy as np
2
- import random
3
- import torch
4
- from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt
5
- from basicsr.data.transforms import paired_random_crop
6
- from basicsr.models.srgan_model import SRGANModel
7
- from basicsr.utils import DiffJPEG, USMSharp
8
- from basicsr.utils.img_process_util import filter2D
9
- from basicsr.utils.registry import MODEL_REGISTRY
10
- from collections import OrderedDict
11
- from torch.nn import functional as F
12
-
13
-
14
- @MODEL_REGISTRY.register()
15
- class RealESRGANModel(SRGANModel):
16
- """RealESRGAN Model for Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data.
17
-
18
- It mainly performs:
19
- 1. randomly synthesize LQ images in GPU tensors
20
- 2. optimize the networks with GAN training.
21
- """
22
-
23
- def __init__(self, opt):
24
- super(RealESRGANModel, self).__init__(opt)
25
- self.jpeger = DiffJPEG(differentiable=False).cuda() # simulate JPEG compression artifacts
26
- self.usm_sharpener = USMSharp().cuda() # do usm sharpening
27
- self.queue_size = opt.get('queue_size', 180)
28
-
29
- @torch.no_grad()
30
- def _dequeue_and_enqueue(self):
31
- """It is the training pair pool for increasing the diversity in a batch.
32
-
33
- Batch processing limits the diversity of synthetic degradations in a batch. For example, samples in a
34
- batch could not have different resize scaling factors. Therefore, we employ this training pair pool
35
- to increase the degradation diversity in a batch.
36
- """
37
- # initialize
38
- b, c, h, w = self.lq.size()
39
- if not hasattr(self, 'queue_lr'):
40
- assert self.queue_size % b == 0, f'queue size {self.queue_size} should be divisible by batch size {b}'
41
- self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda()
42
- _, c, h, w = self.gt.size()
43
- self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda()
44
- self.queue_ptr = 0
45
- if self.queue_ptr == self.queue_size: # the pool is full
46
- # do dequeue and enqueue
47
- # shuffle
48
- idx = torch.randperm(self.queue_size)
49
- self.queue_lr = self.queue_lr[idx]
50
- self.queue_gt = self.queue_gt[idx]
51
- # get first b samples
52
- lq_dequeue = self.queue_lr[0:b, :, :, :].clone()
53
- gt_dequeue = self.queue_gt[0:b, :, :, :].clone()
54
- # update the queue
55
- self.queue_lr[0:b, :, :, :] = self.lq.clone()
56
- self.queue_gt[0:b, :, :, :] = self.gt.clone()
57
-
58
- self.lq = lq_dequeue
59
- self.gt = gt_dequeue
60
- else:
61
- # only do enqueue
62
- self.queue_lr[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.lq.clone()
63
- self.queue_gt[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.gt.clone()
64
- self.queue_ptr = self.queue_ptr + b
65
-
66
- @torch.no_grad()
67
- def feed_data(self, data):
68
- """Accept data from dataloader, and then add two-order degradations to obtain LQ images.
69
- """
70
- if self.is_train and self.opt.get('high_order_degradation', True):
71
- # training data synthesis
72
- self.gt = data['gt'].to(self.device)
73
- self.gt_usm = self.usm_sharpener(self.gt)
74
-
75
- self.kernel1 = data['kernel1'].to(self.device)
76
- self.kernel2 = data['kernel2'].to(self.device)
77
- self.sinc_kernel = data['sinc_kernel'].to(self.device)
78
-
79
- ori_h, ori_w = self.gt.size()[2:4]
80
-
81
- # ----------------------- The first degradation process ----------------------- #
82
- # blur
83
- out = filter2D(self.gt_usm, self.kernel1)
84
- # random resize
85
- updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob'])[0]
86
- if updown_type == 'up':
87
- scale = np.random.uniform(1, self.opt['resize_range'][1])
88
- elif updown_type == 'down':
89
- scale = np.random.uniform(self.opt['resize_range'][0], 1)
90
- else:
91
- scale = 1
92
- mode = random.choice(['area', 'bilinear', 'bicubic'])
93
- out = F.interpolate(out, scale_factor=scale, mode=mode)
94
- # add noise
95
- gray_noise_prob = self.opt['gray_noise_prob']
96
- if np.random.uniform() < self.opt['gaussian_noise_prob']:
97
- out = random_add_gaussian_noise_pt(
98
- out, sigma_range=self.opt['noise_range'], clip=True, rounds=False, gray_prob=gray_noise_prob)
99
- else:
100
- out = random_add_poisson_noise_pt(
101
- out,
102
- scale_range=self.opt['poisson_scale_range'],
103
- gray_prob=gray_noise_prob,
104
- clip=True,
105
- rounds=False)
106
- # JPEG compression
107
- jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range'])
108
- out = torch.clamp(out, 0, 1) # clamp to [0, 1], otherwise JPEGer will result in unpleasant artifacts
109
- out = self.jpeger(out, quality=jpeg_p)
110
-
111
- # ----------------------- The second degradation process ----------------------- #
112
- # blur
113
- if np.random.uniform() < self.opt['second_blur_prob']:
114
- out = filter2D(out, self.kernel2)
115
- # random resize
116
- updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob2'])[0]
117
- if updown_type == 'up':
118
- scale = np.random.uniform(1, self.opt['resize_range2'][1])
119
- elif updown_type == 'down':
120
- scale = np.random.uniform(self.opt['resize_range2'][0], 1)
121
- else:
122
- scale = 1
123
- mode = random.choice(['area', 'bilinear', 'bicubic'])
124
- out = F.interpolate(
125
- out, size=(int(ori_h / self.opt['scale'] * scale), int(ori_w / self.opt['scale'] * scale)), mode=mode)
126
- # add noise
127
- gray_noise_prob = self.opt['gray_noise_prob2']
128
- if np.random.uniform() < self.opt['gaussian_noise_prob2']:
129
- out = random_add_gaussian_noise_pt(
130
- out, sigma_range=self.opt['noise_range2'], clip=True, rounds=False, gray_prob=gray_noise_prob)
131
- else:
132
- out = random_add_poisson_noise_pt(
133
- out,
134
- scale_range=self.opt['poisson_scale_range2'],
135
- gray_prob=gray_noise_prob,
136
- clip=True,
137
- rounds=False)
138
-
139
- # JPEG compression + the final sinc filter
140
- # We also need to resize images to desired sizes. We group [resize back + sinc filter] together
141
- # as one operation.
142
- # We consider two orders:
143
- # 1. [resize back + sinc filter] + JPEG compression
144
- # 2. JPEG compression + [resize back + sinc filter]
145
- # Empirically, we find other combinations (sinc + JPEG + Resize) will introduce twisted lines.
146
- if np.random.uniform() < 0.5:
147
- # resize back + the final sinc filter
148
- mode = random.choice(['area', 'bilinear', 'bicubic'])
149
- out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode)
150
- out = filter2D(out, self.sinc_kernel)
151
- # JPEG compression
152
- jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2'])
153
- out = torch.clamp(out, 0, 1)
154
- out = self.jpeger(out, quality=jpeg_p)
155
- else:
156
- # JPEG compression
157
- jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2'])
158
- out = torch.clamp(out, 0, 1)
159
- out = self.jpeger(out, quality=jpeg_p)
160
- # resize back + the final sinc filter
161
- mode = random.choice(['area', 'bilinear', 'bicubic'])
162
- out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode)
163
- out = filter2D(out, self.sinc_kernel)
164
-
165
- # clamp and round
166
- self.lq = torch.clamp((out * 255.0).round(), 0, 255) / 255.
167
-
168
- # random crop
169
- gt_size = self.opt['gt_size']
170
- (self.gt, self.gt_usm), self.lq = paired_random_crop([self.gt, self.gt_usm], self.lq, gt_size,
171
- self.opt['scale'])
172
-
173
- # training pair pool
174
- self._dequeue_and_enqueue()
175
- # sharpen self.gt again, as we have changed the self.gt with self._dequeue_and_enqueue
176
- self.gt_usm = self.usm_sharpener(self.gt)
177
- self.lq = self.lq.contiguous() # for the warning: grad and param do not obey the gradient layout contract
178
- else:
179
- # for paired training or validation
180
- self.lq = data['lq'].to(self.device)
181
- if 'gt' in data:
182
- self.gt = data['gt'].to(self.device)
183
- self.gt_usm = self.usm_sharpener(self.gt)
184
-
185
- def nondist_validation(self, dataloader, current_iter, tb_logger, save_img):
186
- # do not use the synthetic process during validation
187
- self.is_train = False
188
- super(RealESRGANModel, self).nondist_validation(dataloader, current_iter, tb_logger, save_img)
189
- self.is_train = True
190
-
191
- def optimize_parameters(self, current_iter):
192
- # usm sharpening
193
- l1_gt = self.gt_usm
194
- percep_gt = self.gt_usm
195
- gan_gt = self.gt_usm
196
- if self.opt['l1_gt_usm'] is False:
197
- l1_gt = self.gt
198
- if self.opt['percep_gt_usm'] is False:
199
- percep_gt = self.gt
200
- if self.opt['gan_gt_usm'] is False:
201
- gan_gt = self.gt
202
-
203
- # optimize net_g
204
- for p in self.net_d.parameters():
205
- p.requires_grad = False
206
-
207
- self.optimizer_g.zero_grad()
208
- self.output = self.net_g(self.lq)
209
-
210
- l_g_total = 0
211
- loss_dict = OrderedDict()
212
- if (current_iter % self.net_d_iters == 0 and current_iter > self.net_d_init_iters):
213
- # pixel loss
214
- if self.cri_pix:
215
- l_g_pix = self.cri_pix(self.output, l1_gt)
216
- l_g_total += l_g_pix
217
- loss_dict['l_g_pix'] = l_g_pix
218
- # perceptual loss
219
- if self.cri_perceptual:
220
- l_g_percep, l_g_style = self.cri_perceptual(self.output, percep_gt)
221
- if l_g_percep is not None:
222
- l_g_total += l_g_percep
223
- loss_dict['l_g_percep'] = l_g_percep
224
- if l_g_style is not None:
225
- l_g_total += l_g_style
226
- loss_dict['l_g_style'] = l_g_style
227
- # gan loss
228
- fake_g_pred = self.net_d(self.output)
229
- l_g_gan = self.cri_gan(fake_g_pred, True, is_disc=False)
230
- l_g_total += l_g_gan
231
- loss_dict['l_g_gan'] = l_g_gan
232
-
233
- l_g_total.backward()
234
- self.optimizer_g.step()
235
-
236
- # optimize net_d
237
- for p in self.net_d.parameters():
238
- p.requires_grad = True
239
-
240
- self.optimizer_d.zero_grad()
241
- # real
242
- real_d_pred = self.net_d(gan_gt)
243
- l_d_real = self.cri_gan(real_d_pred, True, is_disc=True)
244
- loss_dict['l_d_real'] = l_d_real
245
- loss_dict['out_d_real'] = torch.mean(real_d_pred.detach())
246
- l_d_real.backward()
247
- # fake
248
- fake_d_pred = self.net_d(self.output.detach().clone()) # clone for pt1.9
249
- l_d_fake = self.cri_gan(fake_d_pred, False, is_disc=True)
250
- loss_dict['l_d_fake'] = l_d_fake
251
- loss_dict['out_d_fake'] = torch.mean(fake_d_pred.detach())
252
- l_d_fake.backward()
253
- self.optimizer_d.step()
254
-
255
- if self.ema_decay > 0:
256
- self.model_ema(decay=self.ema_decay)
257
-
258
- self.log_dict = self.reduce_loss_dict(loss_dict)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated_cpu.cpp DELETED
@@ -1,73 +0,0 @@
1
- // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- #include "../box_iou_rotated/box_iou_rotated_utils.h"
3
- #include "nms_rotated.h"
4
-
5
- namespace detectron2 {
6
-
7
- template <typename scalar_t>
8
- at::Tensor nms_rotated_cpu_kernel(
9
- const at::Tensor& dets,
10
- const at::Tensor& scores,
11
- const float iou_threshold) {
12
- // nms_rotated_cpu_kernel is modified from torchvision's nms_cpu_kernel,
13
- // however, the code in this function is much shorter because
14
- // we delegate the IoU computation for rotated boxes to
15
- // the single_box_iou_rotated function in box_iou_rotated_utils.h
16
- AT_ASSERTM(!dets.type().is_cuda(), "dets must be a CPU tensor");
17
- AT_ASSERTM(!scores.type().is_cuda(), "scores must be a CPU tensor");
18
- AT_ASSERTM(
19
- dets.type() == scores.type(), "dets should have the same type as scores");
20
-
21
- if (dets.numel() == 0) {
22
- return at::empty({0}, dets.options().dtype(at::kLong));
23
- }
24
-
25
- auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
26
-
27
- auto ndets = dets.size(0);
28
- at::Tensor suppressed_t = at::zeros({ndets}, dets.options().dtype(at::kByte));
29
- at::Tensor keep_t = at::zeros({ndets}, dets.options().dtype(at::kLong));
30
-
31
- auto suppressed = suppressed_t.data_ptr<uint8_t>();
32
- auto keep = keep_t.data_ptr<int64_t>();
33
- auto order = order_t.data_ptr<int64_t>();
34
-
35
- int64_t num_to_keep = 0;
36
-
37
- for (int64_t _i = 0; _i < ndets; _i++) {
38
- auto i = order[_i];
39
- if (suppressed[i] == 1) {
40
- continue;
41
- }
42
-
43
- keep[num_to_keep++] = i;
44
-
45
- for (int64_t _j = _i + 1; _j < ndets; _j++) {
46
- auto j = order[_j];
47
- if (suppressed[j] == 1) {
48
- continue;
49
- }
50
-
51
- auto ovr = single_box_iou_rotated<scalar_t>(
52
- dets[i].data_ptr<scalar_t>(), dets[j].data_ptr<scalar_t>());
53
- if (ovr >= iou_threshold) {
54
- suppressed[j] = 1;
55
- }
56
- }
57
- }
58
- return keep_t.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep);
59
- }
60
-
61
- at::Tensor nms_rotated_cpu(
62
- const at::Tensor& dets,
63
- const at::Tensor& scores,
64
- const float iou_threshold) {
65
- auto result = at::empty({0}, dets.options());
66
-
67
- AT_DISPATCH_FLOATING_TYPES(dets.type(), "nms_rotated", [&] {
68
- result = nms_rotated_cpu_kernel<scalar_t>(dets, scores, iou_threshold);
69
- });
70
- return result;
71
- }
72
-
73
- } // namespace detectron2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/mask_ops.py DELETED
@@ -1,247 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import numpy as np
3
- import torch
4
- from PIL import Image
5
- from torch.nn import functional as F
6
-
7
- __all__ = ["paste_masks_in_image"]
8
-
9
-
10
- BYTES_PER_FLOAT = 4
11
- # TODO: This memory limit may be too much or too little. It would be better to
12
- # determine it based on available resources.
13
- GPU_MEM_LIMIT = 1024 ** 3 # 1 GB memory limit
14
-
15
-
16
- def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True):
17
- """
18
- Args:
19
- masks: N, 1, H, W
20
- boxes: N, 4
21
- img_h, img_w (int):
22
- skip_empty (bool): only paste masks within the region that
23
- tightly bound all boxes, and returns the results this region only.
24
- An important optimization for CPU.
25
-
26
- Returns:
27
- if skip_empty == False, a mask of shape (N, img_h, img_w)
28
- if skip_empty == True, a mask of shape (N, h', w'), and the slice
29
- object for the corresponding region.
30
- """
31
- # On GPU, paste all masks together (up to chunk size)
32
- # by using the entire image to sample the masks
33
- # Compared to pasting them one by one,
34
- # this has more operations but is faster on COCO-scale dataset.
35
- device = masks.device
36
- if skip_empty:
37
- x0_int, y0_int = torch.clamp(boxes.min(dim=0).values.floor()[:2] - 1, min=0).to(
38
- dtype=torch.int32
39
- )
40
- x1_int = torch.clamp(boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32)
41
- y1_int = torch.clamp(boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32)
42
- else:
43
- x0_int, y0_int = 0, 0
44
- x1_int, y1_int = img_w, img_h
45
- x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1
46
-
47
- N = masks.shape[0]
48
-
49
- img_y = torch.arange(y0_int, y1_int, device=device, dtype=torch.float32) + 0.5
50
- img_x = torch.arange(x0_int, x1_int, device=device, dtype=torch.float32) + 0.5
51
- img_y = (img_y - y0) / (y1 - y0) * 2 - 1
52
- img_x = (img_x - x0) / (x1 - x0) * 2 - 1
53
- # img_x, img_y have shapes (N, w), (N, h)
54
-
55
- gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1))
56
- gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1))
57
- grid = torch.stack([gx, gy], dim=3)
58
-
59
- img_masks = F.grid_sample(masks.to(dtype=torch.float32), grid, align_corners=False)
60
-
61
- if skip_empty:
62
- return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int))
63
- else:
64
- return img_masks[:, 0], ()
65
-
66
-
67
- def paste_masks_in_image(masks, boxes, image_shape, threshold=0.5):
68
- """
69
- Paste a set of masks that are of a fixed resolution (e.g., 28 x 28) into an image.
70
- The location, height, and width for pasting each mask is determined by their
71
- corresponding bounding boxes in boxes.
72
-
73
- Note:
74
- This is a complicated but more accurate implementation. In actual deployment, it is
75
- often enough to use a faster but less accurate implementation.
76
- See :func:`paste_mask_in_image_old` in this file for an alternative implementation.
77
-
78
- Args:
79
- masks (tensor): Tensor of shape (Bimg, Hmask, Wmask), where Bimg is the number of
80
- detected object instances in the image and Hmask, Wmask are the mask width and mask
81
- height of the predicted mask (e.g., Hmask = Wmask = 28). Values are in [0, 1].
82
- boxes (Boxes or Tensor): A Boxes of length Bimg or Tensor of shape (Bimg, 4).
83
- boxes[i] and masks[i] correspond to the same object instance.
84
- image_shape (tuple): height, width
85
- threshold (float): A threshold in [0, 1] for converting the (soft) masks to
86
- binary masks.
87
-
88
- Returns:
89
- img_masks (Tensor): A tensor of shape (Bimg, Himage, Wimage), where Bimg is the
90
- number of detected object instances and Himage, Wimage are the image width
91
- and height. img_masks[i] is a binary mask for object instance i.
92
- """
93
-
94
- assert masks.shape[-1] == masks.shape[-2], "Only square mask predictions are supported"
95
- N = len(masks)
96
- if N == 0:
97
- return masks.new_empty((0,) + image_shape, dtype=torch.uint8)
98
- if not isinstance(boxes, torch.Tensor):
99
- boxes = boxes.tensor
100
- device = boxes.device
101
- assert len(boxes) == N, boxes.shape
102
-
103
- img_h, img_w = image_shape
104
-
105
- # The actual implementation split the input into chunks,
106
- # and paste them chunk by chunk.
107
- if device.type == "cpu":
108
- # CPU is most efficient when they are pasted one by one with skip_empty=True
109
- # so that it performs minimal number of operations.
110
- num_chunks = N
111
- else:
112
- # GPU benefits from parallelism for larger chunks, but may have memory issue
113
- num_chunks = int(np.ceil(N * img_h * img_w * BYTES_PER_FLOAT / GPU_MEM_LIMIT))
114
- assert (
115
- num_chunks <= N
116
- ), "Default GPU_MEM_LIMIT in mask_ops.py is too small; try increasing it"
117
- chunks = torch.chunk(torch.arange(N, device=device), num_chunks)
118
-
119
- img_masks = torch.zeros(
120
- N, img_h, img_w, device=device, dtype=torch.bool if threshold >= 0 else torch.uint8
121
- )
122
- for inds in chunks:
123
- masks_chunk, spatial_inds = _do_paste_mask(
124
- masks[inds, None, :, :], boxes[inds], img_h, img_w, skip_empty=device.type == "cpu"
125
- )
126
-
127
- if threshold >= 0:
128
- masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool)
129
- else:
130
- # for visualization and debugging
131
- masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8)
132
-
133
- img_masks[(inds,) + spatial_inds] = masks_chunk
134
- return img_masks
135
-
136
-
137
- # The below are the original paste function (from Detectron1) which has
138
- # larger quantization error.
139
- # It is faster on CPU, while the aligned one is faster on GPU thanks to grid_sample.
140
-
141
-
142
- def paste_mask_in_image_old(mask, box, img_h, img_w, threshold):
143
- """
144
- Paste a single mask in an image.
145
- This is a per-box implementation of :func:`paste_masks_in_image`.
146
- This function has larger quantization error due to incorrect pixel
147
- modeling and is not used any more.
148
-
149
- Args:
150
- mask (Tensor): A tensor of shape (Hmask, Wmask) storing the mask of a single
151
- object instance. Values are in [0, 1].
152
- box (Tensor): A tensor of shape (4, ) storing the x0, y0, x1, y1 box corners
153
- of the object instance.
154
- img_h, img_w (int): Image height and width.
155
- threshold (float): Mask binarization threshold in [0, 1].
156
-
157
- Returns:
158
- im_mask (Tensor):
159
- The resized and binarized object mask pasted into the original
160
- image plane (a tensor of shape (img_h, img_w)).
161
- """
162
- # Conversion from continuous box coordinates to discrete pixel coordinates
163
- # via truncation (cast to int32). This determines which pixels to paste the
164
- # mask onto.
165
- box = box.to(dtype=torch.int32) # Continuous to discrete coordinate conversion
166
- # An example (1D) box with continuous coordinates (x0=0.7, x1=4.3) will map to
167
- # a discrete coordinates (x0=0, x1=4). Note that box is mapped to 5 = x1 - x0 + 1
168
- # pixels (not x1 - x0 pixels).
169
- samples_w = box[2] - box[0] + 1 # Number of pixel samples, *not* geometric width
170
- samples_h = box[3] - box[1] + 1 # Number of pixel samples, *not* geometric height
171
-
172
- # Resample the mask from it's original grid to the new samples_w x samples_h grid
173
- mask = Image.fromarray(mask.cpu().numpy())
174
- mask = mask.resize((samples_w, samples_h), resample=Image.BILINEAR)
175
- mask = np.array(mask, copy=False)
176
-
177
- if threshold >= 0:
178
- mask = np.array(mask > threshold, dtype=np.uint8)
179
- mask = torch.from_numpy(mask)
180
- else:
181
- # for visualization and debugging, we also
182
- # allow it to return an unmodified mask
183
- mask = torch.from_numpy(mask * 255).to(torch.uint8)
184
-
185
- im_mask = torch.zeros((img_h, img_w), dtype=torch.uint8)
186
- x_0 = max(box[0], 0)
187
- x_1 = min(box[2] + 1, img_w)
188
- y_0 = max(box[1], 0)
189
- y_1 = min(box[3] + 1, img_h)
190
-
191
- im_mask[y_0:y_1, x_0:x_1] = mask[
192
- (y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0])
193
- ]
194
- return im_mask
195
-
196
-
197
- # Our pixel modeling requires extrapolation for any continuous
198
- # coordinate < 0.5 or > length - 0.5. When sampling pixels on the masks,
199
- # we would like this extrapolation to be an interpolation between boundary values and zero,
200
- # instead of using absolute zero or boundary values.
201
- # Therefore `paste_mask_in_image_old` is often used with zero padding around the masks like this:
202
- # masks, scale = pad_masks(masks[:, 0, :, :], 1)
203
- # boxes = scale_boxes(boxes.tensor, scale)
204
-
205
-
206
- def pad_masks(masks, padding):
207
- """
208
- Args:
209
- masks (tensor): A tensor of shape (B, M, M) representing B masks.
210
- padding (int): Number of cells to pad on all sides.
211
-
212
- Returns:
213
- The padded masks and the scale factor of the padding size / original size.
214
- """
215
- B = masks.shape[0]
216
- M = masks.shape[-1]
217
- pad2 = 2 * padding
218
- scale = float(M + pad2) / M
219
- padded_masks = masks.new_zeros((B, M + pad2, M + pad2))
220
- padded_masks[:, padding:-padding, padding:-padding] = masks
221
- return padded_masks, scale
222
-
223
-
224
- def scale_boxes(boxes, scale):
225
- """
226
- Args:
227
- boxes (tensor): A tensor of shape (B, 4) representing B boxes with 4
228
- coords representing the corners x0, y0, x1, y1,
229
- scale (float): The box scaling factor.
230
-
231
- Returns:
232
- Scaled boxes.
233
- """
234
- w_half = (boxes[:, 2] - boxes[:, 0]) * 0.5
235
- h_half = (boxes[:, 3] - boxes[:, 1]) * 0.5
236
- x_c = (boxes[:, 2] + boxes[:, 0]) * 0.5
237
- y_c = (boxes[:, 3] + boxes[:, 1]) * 0.5
238
-
239
- w_half *= scale
240
- h_half *= scale
241
-
242
- scaled_boxes = torch.zeros_like(boxes)
243
- scaled_boxes[:, 0] = x_c - w_half
244
- scaled_boxes[:, 2] = x_c + w_half
245
- scaled_boxes[:, 1] = y_c - h_half
246
- scaled_boxes[:, 3] = y_c + h_half
247
- return scaled_boxes
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tools/convert-torchvision-to-d2.py DELETED
@@ -1,56 +0,0 @@
1
- #!/usr/bin/env python
2
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
3
-
4
- import pickle as pkl
5
- import sys
6
- import torch
7
-
8
- """
9
- Usage:
10
- # download one of the ResNet{18,34,50,101,152} models from torchvision:
11
- wget https://download.pytorch.org/models/resnet50-19c8e357.pth -O r50.pth
12
- # run the conversion
13
- ./convert-torchvision-to-d2.py r50.pth r50.pkl
14
-
15
- # Then, use r50.pkl with the following changes in config:
16
-
17
- MODEL:
18
- WEIGHTS: "/path/to/r50.pkl"
19
- PIXEL_MEAN: [123.675, 116.280, 103.530]
20
- PIXEL_STD: [58.395, 57.120, 57.375]
21
- RESNETS:
22
- DEPTH: 50
23
- STRIDE_IN_1X1: False
24
- INPUT:
25
- FORMAT: "RGB"
26
-
27
- These models typically produce slightly worse results than the
28
- pre-trained ResNets we use in official configs, which are the
29
- original ResNet models released by MSRA.
30
- """
31
-
32
- if __name__ == "__main__":
33
- input = sys.argv[1]
34
-
35
- obj = torch.load(input, map_location="cpu")
36
-
37
- newmodel = {}
38
- for k in list(obj.keys()):
39
- old_k = k
40
- if "layer" not in k:
41
- k = "stem." + k
42
- for t in [1, 2, 3, 4]:
43
- k = k.replace("layer{}".format(t), "res{}".format(t + 1))
44
- for t in [1, 2, 3]:
45
- k = k.replace("bn{}".format(t), "conv{}.norm".format(t))
46
- k = k.replace("downsample.0", "shortcut")
47
- k = k.replace("downsample.1", "shortcut.norm")
48
- print(old_k, "->", k)
49
- newmodel[k] = obj.pop(old_k).detach().numpy()
50
-
51
- res = {"model": newmodel, "__author__": "torchvision", "matching_heuristics": True}
52
-
53
- with open(sys.argv[2], "wb") as f:
54
- pkl.dump(res, f)
55
- if obj:
56
- print("Unconverted keys:", obj.keys())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/random/detail/linear_feedback_shift_engine_wordmask.h DELETED
@@ -1,47 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- namespace thrust
20
- {
21
-
22
- namespace random
23
- {
24
-
25
- namespace detail
26
- {
27
-
28
- template<typename T, int w, int i = w-1>
29
- struct linear_feedback_shift_engine_wordmask
30
- {
31
- static const T value =
32
- (T(1u) << i) |
33
- linear_feedback_shift_engine_wordmask<T, w, i-1>::value;
34
- }; // end linear_feedback_shift_engine_wordmask
35
-
36
- template<typename T, int w>
37
- struct linear_feedback_shift_engine_wordmask<T, w, 0>
38
- {
39
- static const T value = 0;
40
- }; // end linear_feedback_shift_engine_wordmask
41
-
42
- } // end detail
43
-
44
- } // end random
45
-
46
- } // end thrust
47
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/reduce.h DELETED
@@ -1,23 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system inherits reduce
22
- #include <thrust/system/detail/sequential/reduce.h>
23
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/core/bbox/samplers/base_sampler.py DELETED
@@ -1,101 +0,0 @@
1
- from abc import ABCMeta, abstractmethod
2
-
3
- import torch
4
-
5
- from .sampling_result import SamplingResult
6
-
7
-
8
- class BaseSampler(metaclass=ABCMeta):
9
- """Base class of samplers."""
10
-
11
- def __init__(self,
12
- num,
13
- pos_fraction,
14
- neg_pos_ub=-1,
15
- add_gt_as_proposals=True,
16
- **kwargs):
17
- self.num = num
18
- self.pos_fraction = pos_fraction
19
- self.neg_pos_ub = neg_pos_ub
20
- self.add_gt_as_proposals = add_gt_as_proposals
21
- self.pos_sampler = self
22
- self.neg_sampler = self
23
-
24
- @abstractmethod
25
- def _sample_pos(self, assign_result, num_expected, **kwargs):
26
- """Sample positive samples."""
27
- pass
28
-
29
- @abstractmethod
30
- def _sample_neg(self, assign_result, num_expected, **kwargs):
31
- """Sample negative samples."""
32
- pass
33
-
34
- def sample(self,
35
- assign_result,
36
- bboxes,
37
- gt_bboxes,
38
- gt_labels=None,
39
- **kwargs):
40
- """Sample positive and negative bboxes.
41
-
42
- This is a simple implementation of bbox sampling given candidates,
43
- assigning results and ground truth bboxes.
44
-
45
- Args:
46
- assign_result (:obj:`AssignResult`): Bbox assigning results.
47
- bboxes (Tensor): Boxes to be sampled from.
48
- gt_bboxes (Tensor): Ground truth bboxes.
49
- gt_labels (Tensor, optional): Class labels of ground truth bboxes.
50
-
51
- Returns:
52
- :obj:`SamplingResult`: Sampling result.
53
-
54
- Example:
55
- >>> from mmdet.core.bbox import RandomSampler
56
- >>> from mmdet.core.bbox import AssignResult
57
- >>> from mmdet.core.bbox.demodata import ensure_rng, random_boxes
58
- >>> rng = ensure_rng(None)
59
- >>> assign_result = AssignResult.random(rng=rng)
60
- >>> bboxes = random_boxes(assign_result.num_preds, rng=rng)
61
- >>> gt_bboxes = random_boxes(assign_result.num_gts, rng=rng)
62
- >>> gt_labels = None
63
- >>> self = RandomSampler(num=32, pos_fraction=0.5, neg_pos_ub=-1,
64
- >>> add_gt_as_proposals=False)
65
- >>> self = self.sample(assign_result, bboxes, gt_bboxes, gt_labels)
66
- """
67
- if len(bboxes.shape) < 2:
68
- bboxes = bboxes[None, :]
69
-
70
- bboxes = bboxes[:, :4]
71
-
72
- gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8)
73
- if self.add_gt_as_proposals and len(gt_bboxes) > 0:
74
- if gt_labels is None:
75
- raise ValueError(
76
- 'gt_labels must be given when add_gt_as_proposals is True')
77
- bboxes = torch.cat([gt_bboxes, bboxes], dim=0)
78
- assign_result.add_gt_(gt_labels)
79
- gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8)
80
- gt_flags = torch.cat([gt_ones, gt_flags])
81
-
82
- num_expected_pos = int(self.num * self.pos_fraction)
83
- pos_inds = self.pos_sampler._sample_pos(
84
- assign_result, num_expected_pos, bboxes=bboxes, **kwargs)
85
- # We found that sampled indices have duplicated items occasionally.
86
- # (may be a bug of PyTorch)
87
- pos_inds = pos_inds.unique()
88
- num_sampled_pos = pos_inds.numel()
89
- num_expected_neg = self.num - num_sampled_pos
90
- if self.neg_pos_ub >= 0:
91
- _pos = max(1, num_sampled_pos)
92
- neg_upper_bound = int(self.neg_pos_ub * _pos)
93
- if num_expected_neg > neg_upper_bound:
94
- num_expected_neg = neg_upper_bound
95
- neg_inds = self.neg_sampler._sample_neg(
96
- assign_result, num_expected_neg, bboxes=bboxes, **kwargs)
97
- neg_inds = neg_inds.unique()
98
-
99
- sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
100
- assign_result, gt_flags)
101
- return sampling_result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/utils/logger.py DELETED
@@ -1,19 +0,0 @@
1
- import logging
2
-
3
- from mmcv.utils import get_logger
4
-
5
-
6
- def get_root_logger(log_file=None, log_level=logging.INFO):
7
- """Get root logger.
8
-
9
- Args:
10
- log_file (str, optional): File path of log. Defaults to None.
11
- log_level (int, optional): The level of logger.
12
- Defaults to logging.INFO.
13
-
14
- Returns:
15
- :obj:`logging.Logger`: The obtained logger
16
- """
17
- logger = get_logger(name='mmdet', log_file=log_file, log_level=log_level)
18
-
19
- return logger
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Caoyunkang/Segment-Any-Anomaly/SAM/segment_anything/modeling/mask_decoder.py DELETED
@@ -1,176 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import torch
8
- from torch import nn
9
- from torch.nn import functional as F
10
-
11
- from typing import List, Tuple, Type
12
-
13
- from .common import LayerNorm2d
14
-
15
-
16
- class MaskDecoder(nn.Module):
17
- def __init__(
18
- self,
19
- *,
20
- transformer_dim: int,
21
- transformer: nn.Module,
22
- num_multimask_outputs: int = 3,
23
- activation: Type[nn.Module] = nn.GELU,
24
- iou_head_depth: int = 3,
25
- iou_head_hidden_dim: int = 256,
26
- ) -> None:
27
- """
28
- Predicts masks given an image and prompt embeddings, using a
29
- tranformer architecture.
30
-
31
- Arguments:
32
- transformer_dim (int): the channel dimension of the transformer
33
- transformer (nn.Module): the transformer used to predict masks
34
- num_multimask_outputs (int): the number of masks to predict
35
- when disambiguating masks
36
- activation (nn.Module): the type of activation to use when
37
- upscaling masks
38
- iou_head_depth (int): the depth of the MLP used to predict
39
- mask quality
40
- iou_head_hidden_dim (int): the hidden dimension of the MLP
41
- used to predict mask quality
42
- """
43
- super().__init__()
44
- self.transformer_dim = transformer_dim
45
- self.transformer = transformer
46
-
47
- self.num_multimask_outputs = num_multimask_outputs
48
-
49
- self.iou_token = nn.Embedding(1, transformer_dim)
50
- self.num_mask_tokens = num_multimask_outputs + 1
51
- self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
52
-
53
- self.output_upscaling = nn.Sequential(
54
- nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),
55
- LayerNorm2d(transformer_dim // 4),
56
- activation(),
57
- nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),
58
- activation(),
59
- )
60
- self.output_hypernetworks_mlps = nn.ModuleList(
61
- [
62
- MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)
63
- for i in range(self.num_mask_tokens)
64
- ]
65
- )
66
-
67
- self.iou_prediction_head = MLP(
68
- transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth
69
- )
70
-
71
- def forward(
72
- self,
73
- image_embeddings: torch.Tensor,
74
- image_pe: torch.Tensor,
75
- sparse_prompt_embeddings: torch.Tensor,
76
- dense_prompt_embeddings: torch.Tensor,
77
- multimask_output: bool,
78
- ) -> Tuple[torch.Tensor, torch.Tensor]:
79
- """
80
- Predict masks given image and prompt embeddings.
81
-
82
- Arguments:
83
- image_embeddings (torch.Tensor): the embeddings from the image encoder
84
- image_pe (torch.Tensor): positional encoding with the shape of image_embeddings
85
- sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes
86
- dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs
87
- multimask_output (bool): Whether to return multiple masks or a single
88
- mask.
89
-
90
- Returns:
91
- torch.Tensor: batched predicted masks
92
- torch.Tensor: batched predictions of mask quality
93
- """
94
- masks, iou_pred = self.predict_masks(
95
- image_embeddings=image_embeddings,
96
- image_pe=image_pe,
97
- sparse_prompt_embeddings=sparse_prompt_embeddings,
98
- dense_prompt_embeddings=dense_prompt_embeddings,
99
- )
100
-
101
- # Select the correct mask or masks for outptu
102
- if multimask_output:
103
- mask_slice = slice(1, None)
104
- else:
105
- mask_slice = slice(0, 1)
106
- masks = masks[:, mask_slice, :, :]
107
- iou_pred = iou_pred[:, mask_slice]
108
-
109
- # Prepare output
110
- return masks, iou_pred
111
-
112
- def predict_masks(
113
- self,
114
- image_embeddings: torch.Tensor,
115
- image_pe: torch.Tensor,
116
- sparse_prompt_embeddings: torch.Tensor,
117
- dense_prompt_embeddings: torch.Tensor,
118
- ) -> Tuple[torch.Tensor, torch.Tensor]:
119
- """Predicts masks. See 'forward' for more details."""
120
- # Concatenate output tokens
121
- output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)
122
- output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)
123
- tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
124
-
125
- # Expand per-image data in batch direction to be per-mask
126
- src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
127
- src = src + dense_prompt_embeddings
128
- pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
129
- b, c, h, w = src.shape
130
-
131
- # Run the transformer
132
- hs, src = self.transformer(src, pos_src, tokens)
133
- iou_token_out = hs[:, 0, :]
134
- mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]
135
-
136
- # Upscale mask embeddings and predict masks using the mask tokens
137
- src = src.transpose(1, 2).view(b, c, h, w)
138
- upscaled_embedding = self.output_upscaling(src)
139
- hyper_in_list: List[torch.Tensor] = []
140
- for i in range(self.num_mask_tokens):
141
- hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))
142
- hyper_in = torch.stack(hyper_in_list, dim=1)
143
- b, c, h, w = upscaled_embedding.shape
144
- masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
145
-
146
- # Generate mask quality predictions
147
- iou_pred = self.iou_prediction_head(iou_token_out)
148
-
149
- return masks, iou_pred
150
-
151
-
152
- # Lightly adapted from
153
- # https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa
154
- class MLP(nn.Module):
155
- def __init__(
156
- self,
157
- input_dim: int,
158
- hidden_dim: int,
159
- output_dim: int,
160
- num_layers: int,
161
- sigmoid_output: bool = False,
162
- ) -> None:
163
- super().__init__()
164
- self.num_layers = num_layers
165
- h = [hidden_dim] * (num_layers - 1)
166
- self.layers = nn.ModuleList(
167
- nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
168
- )
169
- self.sigmoid_output = sigmoid_output
170
-
171
- def forward(self, x):
172
- for i, layer in enumerate(self.layers):
173
- x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
174
- if self.sigmoid_output:
175
- x = F.sigmoid(x)
176
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChengZ/DeepDanbooru_string0/README.md DELETED
@@ -1,39 +0,0 @@
1
- ---
2
- title: DeepDanbooru String
3
- emoji: 💬
4
- colorFrom: blue
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.6
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: NoCrypt/DeepDanbooru_string
11
- ---
12
-
13
- # Configuration
14
-
15
- `title`: _string_
16
- Display title for the Space
17
-
18
- `emoji`: _string_
19
- Space emoji (emoji-only character allowed)
20
-
21
- `colorFrom`: _string_
22
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
23
-
24
- `colorTo`: _string_
25
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
26
-
27
- `sdk`: _string_
28
- Can be either `gradio`, `streamlit`, or `static`
29
-
30
- `sdk_version` : _string_
31
- Only applicable for `streamlit` SDK.
32
- See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
33
-
34
- `app_file`: _string_
35
- Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
36
- Path is relative to the root of the repository.
37
-
38
- `pinned`: _boolean_
39
- Whether the Space stays on top of your list.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CodingBillionaire/bark-voice-cloning/app.py DELETED
@@ -1,98 +0,0 @@
1
- import math
2
- import os.path
3
- import uuid
4
-
5
- import gradio
6
- import numpy
7
- import torch
8
-
9
- from hubert.hubert_manager import HuBERTManager
10
- from hubert.pre_kmeans_hubert import CustomHubert
11
- from hubert.customtokenizer import CustomTokenizer
12
- from encodec import EncodecModel
13
- from encodec.utils import convert_audio
14
-
15
-
16
- hubert_model = CustomHubert(HuBERTManager.make_sure_hubert_installed())
17
- tokenizer_model = CustomTokenizer.load_from_checkpoint(
18
- HuBERTManager.make_sure_tokenizer_installed(model='quantifier_V1_hubert_base_ls960_23.pth'),
19
- map_location=torch.device('cpu')
20
- )
21
- encodec_model = EncodecModel.encodec_model_24khz()
22
-
23
-
24
-
25
- def clone(audio, *args):
26
- sr, wav = audio
27
-
28
- wav = torch.tensor(wav)
29
-
30
- if wav.dtype == torch.int16:
31
- wav = wav.float() / 32767.0
32
-
33
- if len(wav.shape) == 2:
34
- if wav.shape[0] == 2: # Stereo to mono if needed
35
- wav = wav.mean(0, keepdim=True)
36
- if wav.shape[1] == 2:
37
- wav = wav.mean(1, keepdim=False).unsqueeze(-1)
38
-
39
- wav = wav[-int(sr*20):] # Take only the last 20 seconds
40
-
41
- wav = wav.reshape(1, -1) # Reshape from gradio style to HuBERT shape. (N, 1) to (1, N)
42
-
43
- semantic_vectors = hubert_model.forward(wav, input_sample_hz=sr)
44
- semantic_tokens = tokenizer_model.get_token(semantic_vectors)
45
-
46
- encodec_model.set_target_bandwidth(6.0)
47
- wav = convert_audio(wav, sr, encodec_model.sample_rate, 1)
48
- wav = wav.unsqueeze(0)
49
-
50
- with torch.no_grad():
51
- encoded_frames = encodec_model.encode(wav)
52
-
53
- codes = torch.cat([encoded[0] for encoded in encoded_frames], dim=-1).squeeze() # [B, n_q, T]
54
-
55
- if not os.path.isdir('data/speakers'):
56
- os.makedirs('data/speakers')
57
-
58
- file_path = f'data/speakers/{uuid.uuid4().hex}.npz'
59
-
60
- numpy.savez(
61
- file_path,
62
- semantic_prompt=semantic_tokens,
63
- fine_prompt=codes,
64
- coarse_prompt=codes[:2, :]
65
- )
66
-
67
- return file_path
68
-
69
-
70
-
71
- iface = gradio.interface.Interface(fn=clone, inputs=[
72
- 'audio',
73
- gradio.Markdown(
74
- '''
75
- # Bark text to speech voice cloning
76
- [Model](https://huggingface.co/GitMylo/bark-voice-cloning/), [Model GitHub](https://github.com/gitmylo/bark-voice-cloning-HuBERT-quantizer), [Webui GitHub](https://github.com/gitmylo/audio-webui)
77
-
78
- For faster creation of voice clones [Duplicate this space](https://huggingface.co/spaces/GitMylo/bark-voice-cloning?duplicate=true)
79
-
80
- Uploaded audio files get cut to 20 seconds in order to keep it fast for everyone. Only the last 20 seconds will be used. (Bark only uses the last 14 seconds anyway)
81
-
82
- ## Tips for better cloning
83
- ### Make sure these things are **NOT** in your voice input: (in no particular order)
84
- * Noise (You can use a noise remover before)
85
- * Music (There are also music remover tools) (Unless you want music in the background)
86
- * A cut-off at the end (This will cause it to try and continue on the generation)
87
- * Under 1 second of training data (i personally suggest around 10 seconds for good potential, but i've had great results with 5 seconds as well.)
88
-
89
- ### What makes for good prompt audio? (in no particular order)
90
- * Clearly spoken
91
- * No weird background noises
92
- * Only one speaker
93
- * Audio which ends after a sentence ends
94
- * Regular/common voice (They usually have more success, it's still capable of cloning complex voices, but not as good at it)
95
- * Around 10 seconds of data
96
- ''')
97
- ], outputs='file')
98
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/rpn/__init__.py DELETED
@@ -1,2 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2
- # from .rpn import build_rpn
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/varLib/instancer/solver.py DELETED
@@ -1,305 +0,0 @@
1
- from fontTools.varLib.models import supportScalar
2
- from fontTools.misc.fixedTools import MAX_F2DOT14
3
- from functools import lru_cache
4
-
5
- __all__ = ["rebaseTent"]
6
-
7
- EPSILON = 1 / (1 << 14)
8
-
9
-
10
- def _reverse_negate(v):
11
- return (-v[2], -v[1], -v[0])
12
-
13
-
14
- def _solve(tent, axisLimit, negative=False):
15
- axisMin, axisDef, axisMax, _distanceNegative, _distancePositive = axisLimit
16
- lower, peak, upper = tent
17
-
18
- # Mirror the problem such that axisDef <= peak
19
- if axisDef > peak:
20
- return [
21
- (scalar, _reverse_negate(t) if t is not None else None)
22
- for scalar, t in _solve(
23
- _reverse_negate(tent),
24
- axisLimit.reverse_negate(),
25
- not negative,
26
- )
27
- ]
28
- # axisDef <= peak
29
-
30
- # case 1: The whole deltaset falls outside the new limit; we can drop it
31
- #
32
- # peak
33
- # 1.........................................o..........
34
- # / \
35
- # / \
36
- # / \
37
- # / \
38
- # 0---|-----------|----------|-------- o o----1
39
- # axisMin axisDef axisMax lower upper
40
- #
41
- if axisMax <= lower and axisMax < peak:
42
- return [] # No overlap
43
-
44
- # case 2: Only the peak and outermost bound fall outside the new limit;
45
- # we keep the deltaset, update peak and outermost bound and and scale deltas
46
- # by the scalar value for the restricted axis at the new limit, and solve
47
- # recursively.
48
- #
49
- # |peak
50
- # 1...............................|.o..........
51
- # |/ \
52
- # / \
53
- # /| \
54
- # / | \
55
- # 0--------------------------- o | o----1
56
- # lower | upper
57
- # |
58
- # axisMax
59
- #
60
- # Convert to:
61
- #
62
- # 1............................................
63
- # |
64
- # o peak
65
- # /|
66
- # /x|
67
- # 0--------------------------- o o upper ----1
68
- # lower |
69
- # |
70
- # axisMax
71
- if axisMax < peak:
72
- mult = supportScalar({"tag": axisMax}, {"tag": tent})
73
- tent = (lower, axisMax, axisMax)
74
- return [(scalar * mult, t) for scalar, t in _solve(tent, axisLimit)]
75
-
76
- # lower <= axisDef <= peak <= axisMax
77
-
78
- gain = supportScalar({"tag": axisDef}, {"tag": tent})
79
- out = [(gain, None)]
80
-
81
- # First, the positive side
82
-
83
- # outGain is the scalar of axisMax at the tent.
84
- outGain = supportScalar({"tag": axisMax}, {"tag": tent})
85
-
86
- # Case 3a: Gain is more than outGain. The tent down-slope crosses
87
- # the axis into negative. We have to split it into multiples.
88
- #
89
- # | peak |
90
- # 1...................|.o.....|..............
91
- # |/x\_ |
92
- # gain................+....+_.|..............
93
- # /| |y\|
94
- # ................../.|....|..+_......outGain
95
- # / | | | \
96
- # 0---|-----------o | | | o----------1
97
- # axisMin lower | | | upper
98
- # | | |
99
- # axisDef | axisMax
100
- # |
101
- # crossing
102
- if gain > outGain:
103
- # Crossing point on the axis.
104
- crossing = peak + (1 - gain) * (upper - peak)
105
-
106
- loc = (axisDef, peak, crossing)
107
- scalar = 1
108
-
109
- # The part before the crossing point.
110
- out.append((scalar - gain, loc))
111
-
112
- # The part after the crossing point may use one or two tents,
113
- # depending on whether upper is before axisMax or not, in one
114
- # case we need to keep it down to eternity.
115
-
116
- # Case 3a1, similar to case 1neg; just one tent needed, as in
117
- # the drawing above.
118
- if upper >= axisMax:
119
- loc = (crossing, axisMax, axisMax)
120
- scalar = outGain
121
-
122
- out.append((scalar - gain, loc))
123
-
124
- # Case 3a2: Similar to case 2neg; two tents needed, to keep
125
- # down to eternity.
126
- #
127
- # | peak |
128
- # 1...................|.o................|...
129
- # |/ \_ |
130
- # gain................+....+_............|...
131
- # /| | \xxxxxxxxxxy|
132
- # / | | \_xxxxxyyyy|
133
- # / | | \xxyyyyyy|
134
- # 0---|-----------o | | o-------|--1
135
- # axisMin lower | | upper |
136
- # | | |
137
- # axisDef | axisMax
138
- # |
139
- # crossing
140
- else:
141
- # A tent's peak cannot fall on axis default. Nudge it.
142
- if upper == axisDef:
143
- upper += EPSILON
144
-
145
- # Downslope.
146
- loc1 = (crossing, upper, axisMax)
147
- scalar1 = 0
148
-
149
- # Eternity justify.
150
- loc2 = (upper, axisMax, axisMax)
151
- scalar2 = 0
152
-
153
- out.append((scalar1 - gain, loc1))
154
- out.append((scalar2 - gain, loc2))
155
-
156
- else:
157
- # Special-case if peak is at axisMax.
158
- if axisMax == peak:
159
- upper = peak
160
-
161
- # Case 3:
162
- # We keep delta as is and only scale the axis upper to achieve
163
- # the desired new tent if feasible.
164
- #
165
- # peak
166
- # 1.....................o....................
167
- # / \_|
168
- # ..................../....+_.........outGain
169
- # / | \
170
- # gain..............+......|..+_.............
171
- # /| | | \
172
- # 0---|-----------o | | | o----------1
173
- # axisMin lower| | | upper
174
- # | | newUpper
175
- # axisDef axisMax
176
- #
177
- newUpper = peak + (1 - gain) * (upper - peak)
178
- assert axisMax <= newUpper # Because outGain >= gain
179
- if newUpper <= axisDef + (axisMax - axisDef) * 2:
180
- upper = newUpper
181
- if not negative and axisDef + (axisMax - axisDef) * MAX_F2DOT14 < upper:
182
- # we clamp +2.0 to the max F2Dot14 (~1.99994) for convenience
183
- upper = axisDef + (axisMax - axisDef) * MAX_F2DOT14
184
- assert peak < upper
185
-
186
- loc = (max(axisDef, lower), peak, upper)
187
- scalar = 1
188
-
189
- out.append((scalar - gain, loc))
190
-
191
- # Case 4: New limit doesn't fit; we need to chop into two tents,
192
- # because the shape of a triangle with part of one side cut off
193
- # cannot be represented as a triangle itself.
194
- #
195
- # | peak |
196
- # 1.........|......o.|....................
197
- # ..........|...../x\|.............outGain
198
- # | |xxy|\_
199
- # | /xxxy| \_
200
- # | |xxxxy| \_
201
- # | /xxxxy| \_
202
- # 0---|-----|-oxxxxxx| o----------1
203
- # axisMin | lower | upper
204
- # | |
205
- # axisDef axisMax
206
- #
207
- else:
208
- loc1 = (max(axisDef, lower), peak, axisMax)
209
- scalar1 = 1
210
-
211
- loc2 = (peak, axisMax, axisMax)
212
- scalar2 = outGain
213
-
214
- out.append((scalar1 - gain, loc1))
215
- # Don't add a dirac delta!
216
- if peak < axisMax:
217
- out.append((scalar2 - gain, loc2))
218
-
219
- # Now, the negative side
220
-
221
- # Case 1neg: Lower extends beyond axisMin: we chop. Simple.
222
- #
223
- # | |peak
224
- # 1..................|...|.o.................
225
- # | |/ \
226
- # gain...............|...+...\...............
227
- # |x_/| \
228
- # |/ | \
229
- # _/| | \
230
- # 0---------------o | | o----------1
231
- # lower | | upper
232
- # | |
233
- # axisMin axisDef
234
- #
235
- if lower <= axisMin:
236
- loc = (axisMin, axisMin, axisDef)
237
- scalar = supportScalar({"tag": axisMin}, {"tag": tent})
238
-
239
- out.append((scalar - gain, loc))
240
-
241
- # Case 2neg: Lower is betwen axisMin and axisDef: we add two
242
- # tents to keep it down all the way to eternity.
243
- #
244
- # | |peak
245
- # 1...|...............|.o.................
246
- # | |/ \
247
- # gain|...............+...\...............
248
- # |yxxxxxxxxxxxxx/| \
249
- # |yyyyyyxxxxxxx/ | \
250
- # |yyyyyyyyyyyx/ | \
251
- # 0---|-----------o | o----------1
252
- # axisMin lower | upper
253
- # |
254
- # axisDef
255
- #
256
- else:
257
- # A tent's peak cannot fall on axis default. Nudge it.
258
- if lower == axisDef:
259
- lower -= EPSILON
260
-
261
- # Downslope.
262
- loc1 = (axisMin, lower, axisDef)
263
- scalar1 = 0
264
-
265
- # Eternity justify.
266
- loc2 = (axisMin, axisMin, lower)
267
- scalar2 = 0
268
-
269
- out.append((scalar1 - gain, loc1))
270
- out.append((scalar2 - gain, loc2))
271
-
272
- return out
273
-
274
-
275
- @lru_cache(128)
276
- def rebaseTent(tent, axisLimit):
277
- """Given a tuple (lower,peak,upper) "tent" and new axis limits
278
- (axisMin,axisDefault,axisMax), solves how to represent the tent
279
- under the new axis configuration. All values are in normalized
280
- -1,0,+1 coordinate system. Tent values can be outside this range.
281
-
282
- Return value is a list of tuples. Each tuple is of the form
283
- (scalar,tent), where scalar is a multipler to multiply any
284
- delta-sets by, and tent is a new tent for that output delta-set.
285
- If tent value is None, that is a special deltaset that should
286
- be always-enabled (called "gain")."""
287
-
288
- axisMin, axisDef, axisMax, _distanceNegative, _distancePositive = axisLimit
289
- assert -1 <= axisMin <= axisDef <= axisMax <= +1
290
-
291
- lower, peak, upper = tent
292
- assert -2 <= lower <= peak <= upper <= +2
293
-
294
- assert peak != 0
295
-
296
- sols = _solve(tent, axisLimit)
297
-
298
- n = lambda v: axisLimit.renormalizeValue(v)
299
- sols = [
300
- (scalar, (n(v[0]), n(v[1]), n(v[2])) if v is not None else None)
301
- for scalar, v in sols
302
- if scalar
303
- ]
304
-
305
- return sols